diff --git a/.circleci/config.yml b/.circleci/config.yml index 8f9fa8c9fed0..3014cb5c5074 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.4 + - image: cimg/python:3.11.8 working_directory: ~/repo @@ -56,7 +56,7 @@ jobs: . venv/bin/activate pip install --progress-bar=off -r requirements/test_requirements.txt # get newer, pre-release versions of critical packages - pip install --progress-bar=off --pre --extra-index-url https://pypi.anaconda.org/scientific-python-nightly-wheels/simple -r requirements/doc_requirements.txt + pip install --progress-bar=off --pre -r requirements/doc_requirements.txt # then install numpy HEAD, which will override the version installed above pip install . --config-settings=setup-args="-Dallow-noblas=true" @@ -74,7 +74,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now, see gh-13114" + SPHINXOPTS="-W -n" make -e html if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then echo "doc build failed: build/html is empty" exit -1 @@ -85,7 +85,7 @@ jobs: command: | . venv/bin/activate cd doc/neps - SPHINXOPTS="-j2 -q" make -e html + SPHINXOPTS="-n" make -e html || echo "ignoring errors for now" - store_artifacts: path: doc/build/html/ @@ -95,14 +95,10 @@ jobs: # destination: neps - run: - name: run doctests on documentation + name: run refguide-check command: | . venv/bin/activate - # Note: keep these two checks separate, because they seem to - # influence each other through changing global state (e.g., via - # `np.polynomial.set_default_printstyle`) - python tools/refguide_check.py --rst - python tools/refguide_check.py --doctests + python tools/refguide_check.py -v - persist_to_workspace: root: ~/repo diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml index 0367f937a74f..b237d52424ac 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yml +++ b/.github/ISSUE_TEMPLATE/bug-report.yml @@ -56,7 +56,7 @@ body: label: "Runtime Environment:" description: | 1. Install `threadpoolctl` (e.g. with `pip` or `conda`) - 2. Paste the output of `import numpy; print(numpy.show_runtime())`. + 2. Paste the output of `import numpy; numpy.show_runtime()`. Note: Only valid for NumPy 1.24 or newer. validations: diff --git a/.github/ISSUE_TEMPLATE/typing.yml b/.github/ISSUE_TEMPLATE/typing.yml new file mode 100644 index 000000000000..a35b339e4883 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/typing.yml @@ -0,0 +1,68 @@ +name: Static Typing +description: Report an issue with the NumPy typing hints. +title: "TYP: " +labels: [Static typing] + +body: +- type: markdown + attributes: + value: > + Thank you for taking the time to report this issue. + Please make sure that this issue hasn't already been reported before. + +- type: textarea + attributes: + label: "Describe the issue:" + validations: + required: true + +- type: textarea + attributes: + label: "Reproduce the code example:" + description: > + A short code example that reproduces the error in your type-checker. It + should be self-contained, i.e., can be run as-is via e.g. + `mypy myproblem.py` or `pyright myproblem.py`. + placeholder: | + import numpy as np + import numpy.typing as npt + << your code here >> + render: python + validations: + required: true + +- type: textarea + attributes: + label: "Error message:" + description: > + Please include all relevant error messages from your type-checker or IDE. + render: shell + +- type: textarea + attributes: + label: "Python and NumPy Versions:" + description: > + Output from `import sys, numpy; print(numpy.__version__); print(sys.version)`. + validations: + required: true + +- type: textarea + attributes: + label: "Type-checker version and settings:" + description: > + Please include the exact version of the type-checker you are using. + Popular (static) type checkers include Mypy, Pyright / Pylance, Pytype, + Pyre, PyCharm, etc. + Also include the full CLI command used to run the type-checker, and + all of the relevant configuration options. + validations: + required: true + +- type: textarea + attributes: + label: "Additional typing packages." + description: | + If you are using `typing-extensions` or typing-stub packages, please + list their versions here. + validations: + required: false diff --git a/.github/pr-prefix-labeler.yml b/.github/pr-prefix-labeler.yml index 2b95f1f314f6..4905b502045d 100644 --- a/.github/pr-prefix-labeler.yml +++ b/.github/pr-prefix-labeler.yml @@ -1,15 +1,16 @@ "API": "30 - API" "BENCH": "28 - Benchmark" -"BUG": "00 - Bug" "BLD": "36 - Build" +"BUG": "00 - Bug" "DEP": "07 - Deprecation" "DEV": "16 - Development" "DOC": "04 - Documentation" "ENH": "01 - Enhancement" "MAINT": "03 - Maintenance" +"MNT": "03 - Maintenance" +"REL": "14 - Release" "REV": "34 - Reversion" "STY": "03 - Maintenance" "TST": "05 - Testing" -"REL": "14 - Release" -"WIP": "25 - WIP" "TYP": "static typing" +"WIP": "25 - WIP" diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 81eef46e30a4..467400d99336 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -45,7 +45,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 + uses: github/codeql-action/init@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -55,7 +55,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 + uses: github/codeql-action/autobuild@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -68,6 +68,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.13.4 + uses: github/codeql-action/analyze@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v3.26.0 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/cygwin.yml b/.github/workflows/cygwin.yml index 5bc30262db01..adf2c4442a9e 100644 --- a/.github/workflows/cygwin.yml +++ b/.github/workflows/cygwin.yml @@ -62,7 +62,7 @@ jobs: cd tools /usr/bin/python3.9 -m pytest --pyargs numpy -n2 -m "not slow" - name: Upload wheel if tests fail - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 if: failure() with: name: numpy-cygwin-wheel diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index d18b1a0b18ef..461ef2b4253b 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -17,4 +17,4 @@ jobs: - name: 'Checkout Repository' uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: 'Dependency Review' - uses: actions/dependency-review-action@9129d7d40b8c12c1ed0f60400d00c92d437adcce # v4.1.3 + uses: actions/dependency-review-action@5a2ce3f5b92ee19cbb1541a4984c76d921601d7c # v4.3.4 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index 728a91f691b3..276592e1840f 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -5,6 +5,28 @@ on: branches: - main - maintenance/** + # Note: this workflow gets triggered on the same schedule as the + # wheels.yml workflow, with the exception that this workflow runs + # the test suite for the Pyodide wheel too, prior to uploading it. + # + # Run on schedule to upload to Anaconda.org + schedule: + # ┌───────────── minute (0 - 59) + # │ ┌───────────── hour (0 - 23) + # │ │ ┌───────────── day of the month (1 - 31) + # │ │ │ ┌───────────── month (1 - 12 or JAN-DEC) + # │ │ │ │ ┌───────────── day of the week (0 - 6 or SUN-SAT) + # │ │ │ │ │ + - cron: "42 2 * * SUN,WED" + workflow_dispatch: + inputs: + push_wheels: + # Can be 'true' or 'false'. Default is 'false'. + # Warning: this will overwrite existing wheels. + description: > + Push wheels to Anaconda.org if the build succeeds + required: false + default: 'false' env: FORCE_COLOR: 3 @@ -23,13 +45,13 @@ jobs: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' env: - PYODIDE_VERSION: 0.25.0 + PYODIDE_VERSION: 0.26.0 # PYTHON_VERSION and EMSCRIPTEN_VERSION are determined by PYODIDE_VERSION. # The appropriate versions can be found in the Pyodide repodata.json # "info" field, or in Makefile.envs: # https://github.com/pyodide/pyodide/blob/main/Makefile.envs#L2 - PYTHON_VERSION: 3.11.3 - EMSCRIPTEN_VERSION: 3.1.46 + PYTHON_VERSION: 3.12.1 + EMSCRIPTEN_VERSION: 3.1.58 NODE_VERSION: 18 steps: - name: Checkout NumPy @@ -42,7 +64,7 @@ jobs: - name: Set up Python ${{ env.PYTHON_VERSION }} id: setup-python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ env.PYTHON_VERSION }} @@ -53,7 +75,7 @@ jobs: actions-cache-folder: emsdk-cache - name: Install pyodide-build - run: pip install "pydantic<2" pyodide-build==${{ env.PYODIDE_VERSION }} + run: pip install pyodide-build==${{ env.PYODIDE_VERSION }} - name: Find installation for pyodide-build shell: python @@ -69,17 +91,16 @@ jobs: with open(env_file, "a") as myfile: myfile.write(f"PYODIDE_BUILD_PATH={pyodide_build_path}\n") - - name: Apply patch(es) for pyodide-build installation - run: | - ls -a ${{ env.PYODIDE_BUILD_PATH }} - patch -d "${{ env.PYODIDE_BUILD_PATH }}" -p1 < tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch - - name: Build NumPy for Pyodide run: | - pyodide build -Cbuild-dir=build -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" -Csetup-args="-Dblas=none" -Csetup-args="-Dlapack=none" + pyodide build \ + -Cbuild-dir=build \ + -Csetup-args="--cross-file=$PWD/tools/ci/emscripten/emscripten.meson.cross" \ + -Csetup-args="-Dblas=none" \ + -Csetup-args="-Dlapack=none" - name: Set up Node.js - uses: actions/setup-node@60edb5dd545a775178f52524783378180af0d1f8 # v4.0.2 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: ${{ env.NODE_VERSION }} @@ -95,3 +116,15 @@ jobs: source .venv-pyodide/bin/activate cd .. pytest --pyargs numpy -m "not slow" + + # Push to https://anaconda.org/scientific-python-nightly-wheels/numpy + # WARNING: this job will overwrite any existing WASM wheels. + - name: Push to Anaconda PyPI index + if: >- + (github.repository == 'numpy/numpy') && + (github.event_name == 'workflow_dispatch' && github.event.inputs.push_wheels == 'true') || + (github.event_name == 'schedule') + uses: scientific-python/upload-nightly-action@b67d7fcc0396e1128a474d1ab2b48aa94680f9fc # v0.5.0 + with: + artifacts_path: dist/ + anaconda_nightly_upload_token: ${{ secrets.NUMPY_NIGHTLY_UPLOAD_TOKEN }} diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index d0df9834ad70..2e63c7494c54 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -37,9 +37,9 @@ jobs: with: submodules: recursive fetch-depth: 0 - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: '3.9' + python-version: '3.10' - name: Install linter requirements run: python -m pip install -r requirements/linter_requirements.txt @@ -53,14 +53,17 @@ jobs: runs-on: ubuntu-latest env: MESON_ARGS: "-Dallow-noblas=true -Dcpu-baseline=none -Dcpu-dispatch=none" + strategy: + matrix: + version: ["3.10", "3.11", "3.12", "3.13-dev"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: '3.9' + python-version: ${{ matrix.version }} - uses: ./.github/meson_actions pypy: @@ -72,9 +75,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: 'pypy3.9-v7.3.12' + python-version: 'pypy3.10-v7.3.15' - name: Setup using scipy-openblas run: | python -m pip install -r requirements/ci_requirements.txt @@ -111,7 +114,7 @@ jobs: pytest --pyargs numpy -m "not slow" full: - # Build a wheel, install it, then run the full test suite with code coverage + # Install as editable, then run the full test suite with code coverage needs: [smoke_test] runs-on: ubuntu-22.04 steps: @@ -119,9 +122,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: '3.9' + python-version: '3.10' - name: Install build and test dependencies from PyPI run: | pip install -r requirements/build_requirements.txt @@ -135,16 +138,14 @@ jobs: mkdir -p ./.openblas python -c"import scipy_openblas32 as ob32; print(ob32.get_pkg_config())" > ./.openblas/scipy-openblas.pc - - name: Build a wheel + - name: Install as editable env: PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas run: | - python -m build --wheel --no-isolation --skip-dependency-check - pip install dist/numpy*.whl + pip install -e . --no-build-isolation - name: Run full test suite run: | - cd tools - pytest --pyargs numpy --cov-report=html:build/coverage + pytest numpy --cov-report=html:build/coverage # TODO: gcov benchmark: @@ -156,14 +157,14 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: '3.9' + python-version: '3.10' - name: Install build and benchmarking dependencies run: | sudo apt-get update sudo apt-get install libopenblas-dev ninja-build - pip install spin cython asv virtualenv packaging + pip install asv virtualenv packaging -r requirements/build_requirements.txt - name: Install NumPy run: | spin build -- -Dcpu-dispatch=none @@ -177,6 +178,12 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' run: | spin bench --quick + - name: Check docstests + shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' + run: | + pip install scipy-doctest hypothesis matplotlib scipy pytz pandas + spin check-docs -v + spin check-tutorials -v sdist: needs: [smoke_test] @@ -187,7 +194,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install gfortran and setup OpenBLAS (sdist build) @@ -225,11 +232,11 @@ jobs: uses: actions/checkout@v4 with: repository: data-apis/array-api-tests - ref: '9afe8c709d81f005c98d383c82ad5e1c2cd8166c' # Latest commit as of 2023-11-24 + ref: '827edd804bcace9d64176b8115138d29ae3e8dec' # Latest commit as of 2024-07-30 submodules: 'true' path: 'array-api-tests' - name: Set up Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -246,9 +253,7 @@ jobs: PYTHONWARNINGS: 'ignore::UserWarning::,ignore::DeprecationWarning::,ignore::RuntimeWarning::' run: | cd ${GITHUB_WORKSPACE}/array-api-tests - # remove once https://github.com/data-apis/array-api-tests/pull/217 is merged - touch pytest.ini - pytest array_api_tests -v -c pytest.ini --ci --max-examples=2 --derandomize --disable-deadline --skips-file ${GITHUB_WORKSPACE}/tools/ci/array-api-skips.txt + pytest array_api_tests -v -c pytest.ini --ci --max-examples=100 --derandomize --disable-deadline --xfails-file ${GITHUB_WORKSPACE}/tools/ci/array-api-xfails.txt custom_checks: needs: [smoke_test] @@ -259,7 +264,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install build and test dependencies from PyPI @@ -281,3 +286,28 @@ jobs: run: | # Need the explicit `bash -c` here because `grep` returns exit code 1 for no matches bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/,vendored-meson/ | grep 'unreachable'" + - name: Check usage of install_tag + run: | + rm -rf build-install + ./vendored-meson/meson/meson.py install -C build --destdir ../build-install --tags=runtime,python-runtime,devel + python tools/check_installed_files.py $(find ./build-install -path '*/site-packages/numpy') --no-tests + + free-threaded: + needs: [smoke_test] + runs-on: ubuntu-latest + if: github.event_name != 'push' + steps: + - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + submodules: recursive + fetch-tags: true + # TODO: replace with setup-python when there is support + - uses: deadsnakes/action@6c8b9b82fe0b4344f4b98f2775fcc395df45e494 # v3.1.0 + with: + python-version: '3.13-dev' + nogil: true + # TODO: remove cython nightly install when cython does a release + - name: Install nightly Cython + run: | + pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + - uses: ./.github/meson_actions diff --git a/.github/workflows/linux_blas.yml b/.github/workflows/linux_blas.yml index 598a1c784b62..e3d032ee25d4 100644 --- a/.github/workflows/linux_blas.yml +++ b/.github/workflows/linux_blas.yml @@ -69,7 +69,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -111,7 +111,9 @@ jobs: shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: TERM: xterm-256color - + # TODO: remove when scipy-openblas nightly tests aren't failing anymore. + # xref gh-26824 + continue-on-error: true run: | pip install pytest pytest-xdist hypothesis typing_extensions spin test -j auto @@ -196,7 +198,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -224,7 +226,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -284,7 +286,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -347,7 +349,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -383,7 +385,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' diff --git a/.github/workflows/linux_compiler_sanitizers.yml b/.github/workflows/linux_compiler_sanitizers.yml index 78e90122c348..efa8eb980730 100644 --- a/.github/workflows/linux_compiler_sanitizers.yml +++ b/.github/workflows/linux_compiler_sanitizers.yml @@ -21,7 +21,7 @@ permissions: contents: read # to fetch code (actions/checkout) jobs: - gcc_sanitizers: + clang_sanitizers: # To enable this workflow on a fork, comment out: if: github.repository == 'numpy/numpy' runs-on: ubuntu-latest @@ -30,11 +30,13 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ env.PYTHON_VERSION }} - name: Install dependencies run: | + sudo apt update + sudo apt install -y llvm libstdc++-12-dev pip install -r requirements/build_requirements.txt pip install -r requirements/ci_requirements.txt - name: Build @@ -43,7 +45,7 @@ jobs: TERM: xterm-256color PKG_CONFIG_PATH: ${{ github.workspace }}/.openblas run: - spin build --with-scipy-openblas=32 -- --werror -Db_sanitize=address,undefined + CC=clang CXX=clang++ spin build --with-scipy-openblas=32 -- -Db_sanitize=address,undefined - name: Test shell: 'script -q -e -c "bash --noprofile --norc -eo pipefail {0}"' env: @@ -52,5 +54,5 @@ jobs: pip install pytest pytest-xdist hypothesis typing_extensions ASAN_OPTIONS=detect_leaks=0:symbolize=1:strict_init_order=true:allocator_may_return_null=1:halt_on_error=1 \ UBSAN_OPTIONS=halt_on_error=0 \ - LD_PRELOAD=$(gcc --print-file-name=libasan.so) \ + LD_PRELOAD=$(clang --print-file-name=libclang_rt.asan-x86_64.so) \ python -m spin test -- -v -s diff --git a/.github/workflows/linux_qemu.yml b/.github/workflows/linux_qemu.yml index 967e16b327a9..d44fc365973b 100644 --- a/.github/workflows/linux_qemu.yml +++ b/.github/workflows/linux_qemu.yml @@ -44,22 +44,25 @@ jobs: # test_unary_spurious_fpexception is currently skipped # FIXME(@seiko2plus): Requires confirmation for the following issue: # The presence of an FP invalid exception caused by sqrt. Unsure if this is a qemu bug or not. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_unary_spurious_fpexception", + "arm" + ] - [ "ppc64le", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "ppc64le - baseline(Power9)", "powerpc64le-linux-gnu", "ppc64le/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vsx3", "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", - ] + "ppc64le" + ] - [ "s390x", "s390x-linux-gnu", @@ -68,27 +71,31 @@ jobs: # Skipping TestRationalFunctions.test_gcd_overflow test # because of a possible qemu bug that appears to be related to int64 overflow in absolute operation. # TODO(@seiko2plus): Confirm the bug and provide a minimal reproducer, then report it to upstream. - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "s390x - baseline(Z13)", "s390x-linux-gnu", "s390x/ubuntu:22.04", "-Dallow-noblas=true -Dcpu-baseline=vx", - "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow" - ] + "(test_kind or test_multiarray or test_simd or test_umath or test_ufunc) and not test_gcd_overflow", + "s390x" + ] - [ "riscv64", "riscv64-linux-gnu", "riscv64/ubuntu:22.04", "-Dallow-noblas=true", - "test_kind or test_multiarray or test_simd or test_umath or test_ufunc" - ] + "test_kind or test_multiarray or test_simd or test_umath or test_ufunc", + "riscv64" + ] env: TOOLCHAIN_NAME: ${{ matrix.BUILD_PROP[1] }} DOCKER_CONTAINER: ${{ matrix.BUILD_PROP[2] }} MESON_OPTIONS: ${{ matrix.BUILD_PROP[3] }} RUNTIME_TEST_FILTER: ${{ matrix.BUILD_PROP[4] }} + ARCH: ${{ matrix.BUILD_PROP[5] }} TERM: xterm-256color name: "${{ matrix.BUILD_PROP[0] }}" @@ -108,7 +115,7 @@ jobs: sudo apt install -y ninja-build gcc-${TOOLCHAIN_NAME} g++-${TOOLCHAIN_NAME} gfortran-${TOOLCHAIN_NAME} - name: Cache docker container - uses: actions/cache@v4.0.1 + uses: actions/cache@v4.1.1 id: container-cache with: path: ~/docker_${{ matrix.BUILD_PROP[1] }} @@ -117,7 +124,8 @@ jobs: - name: Creates new container if: steps.container-cache.outputs.cache-hit != 'true' run: | - docker run --name the_container --interactive -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " + docker run --platform=linux/${ARCH} --name the_container --interactive \ + -v /:/host -v $(pwd):/numpy ${DOCKER_CONTAINER} /bin/bash -c " apt update && apt install -y cmake git python3 python-is-python3 python3-dev python3-pip && mkdir -p /lib64 && ln -s /host/lib64/ld-* /lib64/ && @@ -147,10 +155,11 @@ jobs: - name: Meson Build run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - cd /numpy && spin build --clean -- ${MESON_OPTIONS} - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + cd /numpy && spin build --clean -- ${MESON_OPTIONS} + '" - name: Meson Log if: always() @@ -158,9 +167,11 @@ jobs: - name: Run Tests run: | - docker run --rm -e "TERM=xterm-256color" -v $(pwd):/numpy -v /:/host the_container \ - /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' - export F90=/usr/bin/gfortran - cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" - '" + docker run --rm --platform=linux/${ARCH} -e "TERM=xterm-256color" \ + -v $(pwd):/numpy -v /:/host the_container \ + /bin/script -e -q -c "/bin/bash --noprofile --norc -eo pipefail -c ' + export F90=/usr/bin/gfortran + cd /numpy && spin test -- -k \"${RUNTIME_TEST_FILTER}\" + '" + diff --git a/.github/workflows/linux_simd.yml b/.github/workflows/linux_simd.yml index 13ef2bffe005..a19ae38502ba 100644 --- a/.github/workflows/linux_simd.yml +++ b/.github/workflows/linux_simd.yml @@ -62,9 +62,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: '3.9' + python-version: '3.10' - uses: ./.github/meson_actions name: Build/Test @@ -79,9 +79,9 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: - python-version: '3.9' + python-version: '3.10' - name: Install GCC/8/9 run: | @@ -117,7 +117,7 @@ jobs: - [ "without optimizations", "-Dallow-noblas=true -Ddisable-optimization=true", - "3.12-dev" + "3.12" ] - [ "native", @@ -132,7 +132,7 @@ jobs: - [ "without avx512/avx2/fma3", "-Dallow-noblas=true -Dcpu-dispatch=SSSE3,SSE41,POPCNT,SSE42,AVX,F16C", - "3.9" + "3.10" ] env: @@ -144,7 +144,7 @@ jobs: with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: "${{ matrix.BUILD_PROP[2] }}" - uses: ./.github/meson_actions @@ -152,13 +152,13 @@ jobs: intel_sde_avx512: needs: [baseline_only] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -170,27 +170,23 @@ jobs: - name: Install dependencies run: | - sudo apt update - sudo apt install -y g++-13 - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 1 python -m pip install -r requirements/build_requirements.txt python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512f -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' + run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_skx -Dtest-simd='BASELINE,AVX512_KNL,AVX512_KNM,AVX512_SKX,AVX512_CLX,AVX512_CNL,AVX512_ICL,AVX512_SPR' - name: Meson Log if: always() run: cat build/meson-logs/meson-log.txt - - name: SIMD tests (KNM) + - name: SIMD tests (SKX) run: | export NUMPY_SITE=$(realpath build-install/usr/lib/python*/site-packages/) export PYTHONPATH="$PYTHONPATH:$NUMPY_SITE" cd build-install && - sde -knm -- python -c "import numpy; numpy.show_config()" && - sde -knm -- python -m pytest $NUMPY_SITE/numpy/_core/tests/test_simd* + sde -skx -- python -c "import numpy; numpy.show_config()" && + sde -skx -- python -m pytest $NUMPY_SITE/numpy/_core/tests/test_simd* - name: linalg/ufunc/umath tests (TGL) run: | @@ -206,13 +202,13 @@ jobs: intel_sde_spr: needs: [baseline_only] - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' @@ -224,15 +220,11 @@ jobs: - name: Install dependencies run: | - sudo apt update - sudo apt install -y g++-13 - sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-13 1 - sudo update-alternatives --install /usr/bin/g++ g++ /usr/bin/g++-13 1 python -m pip install -r requirements/build_requirements.txt python -m pip install pytest pytest-xdist hypothesis typing_extensions - name: Build - run: spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr + run: CC=gcc-13 CXX=g++-13 spin build -- -Dallow-noblas=true -Dcpu-baseline=avx512_spr - name: Meson Log if: always() diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index d0d7605221f4..c941c46fd2bc 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -25,7 +25,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.11"] + python-version: ["3.12"] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -44,7 +44,7 @@ jobs: echo "today=$(/bin/date -u '+%Y%m%d')" >> $GITHUB_OUTPUT - name: Setup compiler cache - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1 id: cache-ccache with: path: ${{ steps.prep-ccache.outputs.dir }} @@ -52,15 +52,15 @@ jobs: restore-keys: | ${{ github.workflow }}-${{ matrix.python-version }}-ccache-macos- - - name: Setup Mambaforge - uses: conda-incubator/setup-miniconda@030178870c779d9e5e1b4e563269f3aa69b04081 # v3.0.3 + - name: Setup Miniforge + uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 with: python-version: ${{ matrix.python-version }} channels: conda-forge channel-priority: true activate-environment: numpy-dev use-only-tar-bz2: false - miniforge-variant: Mambaforge + miniforge-variant: Miniforge3 miniforge-version: latest use-mamba: true @@ -68,7 +68,7 @@ jobs: # ensure we re-solve once a day (since we don't lock versions). Could be # replaced by a conda-lock based approach in the future. - name: Cache conda environment - uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1 + uses: actions/cache@3624ceb22c1c5a301c8db4169662070a689d9ea8 # v4.1.1 env: # Increase this value to reset cache if environment.yml has not changed CACHE_NUMBER: 1 @@ -119,7 +119,7 @@ jobs: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: '3.10' diff --git a/.github/workflows/mypy.yml b/.github/workflows/mypy.yml index 6d0a25eb71c5..726e6b839051 100644 --- a/.github/workflows/mypy.yml +++ b/.github/workflows/mypy.yml @@ -48,13 +48,13 @@ jobs: os_python: - [ubuntu-latest, '3.12'] - [windows-2019, '3.11'] - - [macos-12, '3.9'] + - [macos-12, '3.10'] steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: recursive fetch-tags: true - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: ${{ matrix.os_python[1] }} - name: Install dependencies diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index aef8222d9ea7..d9577fae45ac 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -30,7 +30,7 @@ jobs: persist-credentials: false - name: "Run analysis" - uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1 + uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0 with: results_file: results.sarif results_format: sarif @@ -42,7 +42,7 @@ jobs: # Upload the results as artifacts (optional). Commenting out will disable # uploads of run results in SARIF format to the repository Actions tab. - name: "Upload artifact" - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + uses: actions/upload-artifact@834a144ee995460fba8ed112a2fc961b36a5ec5a # v4.3.6 with: name: SARIF file path: results.sarif @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@cdcdbb579706841c47f7063dda365e292e5cad7a # v2.1.27 + uses: github/codeql-action/upload-sarif@eb055d739abdc2e8de2e5f4ba1a8b246daa779aa # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 734dd635b549..e763b8d86dd4 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -26,6 +26,9 @@ on: branches: - main - maintenance/** + push: + tags: + - v* workflow_dispatch: concurrency: @@ -73,8 +76,8 @@ jobs: # Github Actions doesn't support pairing matrix values together, let's improvise # https://github.com/github/feedback/discussions/7835#discussioncomment-1769026 buildplat: - - [ubuntu-20.04, manylinux_x86_64, ""] - - [ubuntu-20.04, musllinux_x86_64, ""] + - [ubuntu-22.04, manylinux_x86_64, ""] + - [ubuntu-22.04, musllinux_x86_64, ""] - [macos-13, macosx_x86_64, openblas] # targeting macos >= 14. Could probably build on macos-14, but it would be a cross-compile @@ -82,13 +85,18 @@ jobs: - [macos-14, macosx_arm64, accelerate] # always use accelerate - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] - python: ["cp39", "cp310", "cp311", "cp312", "pp39"] + python: ["cp310", "cp311", "cp312", "pp310", "cp313", "cp313t"] exclude: # Don't build PyPy 32-bit windows - - buildplat: [windows-2019, win32] - python: "pp39" - - buildplat: [ ubuntu-20.04, musllinux_x86_64 ] - python: "pp39" + - buildplat: [windows-2019, win32, ""] + python: "pp310" + - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] + python: "pp310" + - buildplat: [ macos-14, macosx_arm64, accelerate ] + python: "pp310" + - buildplat: [ macos13, macosx_x86_64, openblas ] + python: "cp313t" + env: IS_32_BIT: ${{ matrix.buildplat[1] == 'win32' }} IS_PUSH: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') }} @@ -98,11 +106,6 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true - # versioneer.py requires the latest tag to be reachable. Here we - # fetch the complete history to get access to the tags. - # A shallow clone can work when the following issue is resolved: - # https://github.com/actions/checkout/issues/338 - fetch-tags: true - name: Setup MSVC (32-bit) if: ${{ matrix.buildplat[1] == 'win32' }} @@ -123,13 +126,18 @@ jobs: if: runner.os == 'windows' # Used to push the built wheels - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: python-version: "3.x" - name: Setup macOS if: matrix.buildplat[0] == 'macos-13' || matrix.buildplat[0] == 'macos-14' run: | + # Needed due to https://github.com/actions/runner-images/issues/3371 + # Supported versions: https://github.com/actions/runner-images/blob/main/images/macos/macos-14-arm64-Readme.md + echo "FC=gfortran-13" >> "$GITHUB_ENV" + echo "F77=gfortran-13" >> "$GITHUB_ENV" + echo "F90=gfortran-13" >> "$GITHUB_ENV" if [[ ${{ matrix.buildplat[2] }} == 'accelerate' ]]; then # macosx_arm64 and macosx_x86_64 with accelerate # only target Sonoma onwards @@ -148,18 +156,25 @@ jobs: echo "CIBW_ENVIRONMENT_MACOS=$CIBW PKG_CONFIG_PATH=$PKG_CONFIG_PATH DYLD_LIBRARY_PATH=$DYLD" >> "$GITHUB_ENV" fi + - name: Set up free-threaded build + if: matrix.python == 'cp313t' + shell: bash -el {0} + run: | + echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" + - name: Build wheels - uses: pypa/cibuildwheel@ce3fb7832089eb3e723a0a99cab7f3eaccf074fd # v2.16.5 + uses: pypa/cibuildwheel@7940a4c0e76eb2030e473a5f864f291f63ee879b # v2.21.3 env: CIBW_PRERELEASE_PYTHONS: True + CIBW_FREE_THREADED_SUPPORT: True CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} - - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: ${{ matrix.python }}-${{ matrix.buildplat[1] }}-${{ matrix.buildplat[2] }} path: ./wheelhouse/*.whl - - uses: mamba-org/setup-micromamba@422500192359a097648154e8db4e39bdb6c6eed7 + - uses: mamba-org/setup-micromamba@617811f69075e3fd3ae68ca64220ad065877f246 with: # for installation of anaconda-client, required for upload to # anaconda.org @@ -212,16 +227,11 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 with: submodules: true - # versioneer.py requires the latest tag to be reachable. Here we - # fetch the complete history to get access to the tags. - # A shallow clone can work when the following issue is resolved: - # https://github.com/actions/checkout/issues/338 - fetch-tags: true # Used to push the built wheels - - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + - uses: actions/setup-python@f677139bbe7f9c59b41e40162b753c062f5d49a3 # v5.2.0 with: # Build sdist on lowest supported Python - python-version: "3.9" + python-version: "3.10" - name: Build sdist run: | python -m pip install -U pip build @@ -240,12 +250,12 @@ jobs: python -mpip install twine twine check dist/* - - uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1 + - uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: sdist path: ./dist/* - - uses: conda-incubator/setup-miniconda@030178870c779d9e5e1b4e563269f3aa69b04081 # v3.0.3 + - uses: conda-incubator/setup-miniconda@a4260408e20b96e80095f42ff7f1a15b27dd94ca # v3.0.4 with: # for installation of anaconda-client, required for upload to # anaconda.org diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 38a6cf24b7e0..0ecf4be83628 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -31,13 +31,13 @@ jobs: fetch-tags: true - name: Setup Python - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.11' - name: Install build dependencies from PyPI run: | - python -m pip install spin Cython + python -m pip install -r requirements/build_requirements.txt - name: Install pkg-config run: | @@ -48,7 +48,10 @@ jobs: - name: Install Clang-cl if: matrix.compiler == 'Clang-cl' run: | - choco install llvm -y --version=16.0.6 + # llvm is preinstalled, but leave + # this here in case we need to pin the + # version at some point. + #choco install llvm -y - name: Install NumPy (MSVC) if: matrix.compiler == 'MSVC' @@ -91,7 +94,7 @@ jobs: fetch-tags: true - name: Setup Python (32-bit) - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 with: python-version: '3.10' architecture: 'x86' diff --git a/.gitmodules b/.gitmodules index 3934afe4500c..4aa48f5bac5c 100644 --- a/.gitmodules +++ b/.gitmodules @@ -16,3 +16,6 @@ [submodule "numpy/fft/pocketfft"] path = numpy/fft/pocketfft url = https://github.com/mreineck/pocketfft +[submodule "numpy/_core/src/common/pythoncapi-compat"] + path = numpy/_core/src/common/pythoncapi-compat + url = https://github.com/python/pythoncapi-compat diff --git a/.mailmap b/.mailmap index 2d910fe98fea..23a556dd9fc4 100644 --- a/.mailmap +++ b/.mailmap @@ -7,47 +7,66 @@ # # This file is up-to-date if the command git log --format="%aN <%aE>" | sort -u # gives no duplicates. - -@8bitmp3 <19637339+8bitmp3@users.noreply.github.com> -@DWesl <22566757+DWesl@users.noreply.github.com> -@Endolith -@GalaxySnail -@Illviljan <14371165+Illviljan@users.noreply.github.com> -@LSchroefl <65246829+LSchroefl@users.noreply.github.com> -@Lbogula -@Lisa <34400837+lyzlisa@users.noreply.github.com> -@Patrick <39380924+xamm@users.noreply.github.com> -@Scian <65375075+hoony6134@users.noreply.github.com> -@amagicmuffin <2014wcheng@gmail.com> -@code-review-doctor -@dg3192 <113710955+dg3192@users.noreply.github.com> -@h-vetinari -@h6197627 <44726212+h6197627@users.noreply.github.com> -@jbCodeHub -@juztamau5 -@legoffant <58195095+legoffant@users.noreply.github.com> -@luzpaz -@luzpaz -@partev -@pkubaj -@pmvz -@pratiklp00 -@sfolje0 -@spacescientist -@tajbinjohn -@tautaus -@xoviat <49173759+xoviat@users.noreply.github.com> -@xoviat <49173759+xoviat@users.noreply.github.com> -@yan-wyb -@yetanothercheer +!8bitmp3 <19637339+8bitmp3@users.noreply.github.com> +!Algorithmist-Girl <36552319+Algorithmist-Girl@users.noreply.github.com> +!DWesl <22566757+DWesl@users.noreply.github.com> +!Endolith +!GalaxySnail +!Illviljan <14371165+Illviljan@users.noreply.github.com> +!LSchroefl <65246829+LSchroefl@users.noreply.github.com> +!Lbogula +!Lisa <34400837+lyzlisa@users.noreply.github.com> +!Patrick <39380924+xamm@users.noreply.github.com> +!Scian <65375075+hoony6134@users.noreply.github.com> +!Searchingdays +!amagicmuffin <2014wcheng@gmail.com> +!code-review-doctor +!cook-1229 <70235336+cook-1229@users.noreply.github.com> +!dg3192 <113710955+dg3192@users.noreply.github.com> +!ellaella12 +!ellaella12 <120079323+ellaella12@users.noreply.github.com> +!h-vetinari +!h6197627 <44726212+h6197627@users.noreply.github.com> +!jbCodeHub +!juztamau5 +!legoffant <58195095+legoffant@users.noreply.github.com> +!liang3zy22 <35164941+liang3zy22@users.noreply.github.com> +!luzpaz +!luzpaz +!matoro +!mcp292 +!mgunyho <20118130+mgunyho@users.noreply.github.com> +!msavinash <73682349+msavinash@users.noreply.github.com> +!mykykh <49101849+mykykh@users.noreply.github.com> +!ogidig5 <82846833+ogidig5@users.noreply.github.com> +!partev +!pkubaj +!pmvz +!pojaghi <36278217+pojaghi@users.noreply.github.com> +!pratiklp00 +!sfolje0 +!spacescientist +!stefan6419846 +!stefan6419846 <96178532+stefan6419846@users.noreply.github.com> +!tajbinjohn +!tautaus +!undermyumbrella1 +!vahidmech +!xoviat <49173759+xoviat@users.noreply.github.com> +!xoviat <49173759+xoviat@users.noreply.github.com> +!yan-wyb +!yetanothercheer Aaron Baecker +Adrin Jalali Arun Kota Arun Kota Arun Kota Aarthi Agurusa Adarsh Singh ADARSH SINGH +Aditi Saluja <136131452+salujaditi14@users.noreply.github.com> Andrei Batomunkuev Ajay DS Ajay DS +Ajay Kumar Janapareddi Alan Fontenot Alan Fontenot <36168460+logeaux@users.noreply.github.com> Abdul Muneer @@ -89,6 +108,8 @@ Andrea Bianchi Andrea Bianchi andrea-bia Ankit Dwivedi Ankit Dwivedi +Ankur Singh +Ankur Singh <98346896+ankur0904@users.noreply.github.com> Amir Sarabadani Anas Khan Anatoly Techtonik @@ -99,6 +120,7 @@ Andrea Sangalli <53617841+and-sang@users.noreply.github.c Andreas Klöckner Andreas Schwab Andrei Kucharavy +Andrej Zhilenkov Andrew Lawson Anirudh Subramanian Anne Archibald @@ -109,9 +131,11 @@ Antoine Pitrou Anton Prosekin Anže Starič Arfy Slowy +Arnaud Ma Aron Ahmadia Arun Kota Arun Kota +Arun Pa Arun Palaniappen Arun Persaud Ashutosh Singh @@ -126,6 +150,7 @@ Bhargav V <12525622+brpy@users.noreply.github.com> Bas van Beek <43369155+BvB93@users.noreply.github.com> Behzad Nouri Ben Nathanson +Ben Woodruff Benjamin Root Benjamin Root weathergod Bernardt Duvenhage @@ -152,6 +177,8 @@ Bui Duc Minh <41239569+Mibu287@users.noreply.github.co Caio Agiani Carl Kleffner Carl Leake +Carlos Henrique Hermanny Moreira da Silva +Carlos Henrique Hermanny Moreira da Silva <121122527+carlosilva10260@users.noreply.github.com> Cédric Hannotier Charles Stern <62192187+cisaacstern@users.noreply.github.com> Chiara Marmo @@ -173,6 +200,7 @@ Chun-Wei Chen Chunlin Fang Chunlin Fang <834352945@qq.com> Chunlin Fang +Cobalt Yang Colin Snyder <8csnyder@gmail.com> <47012605+colinsnyder@users.noreply.github.com> Constanza Fierro Dahyun Kim @@ -205,24 +233,30 @@ Derek Homeier Derek Homeier Derrick Williams Devin Shanahan +Daval Parmar <53395856+DhavalParmar61@users.noreply.github.com> Digya Acharya Dima Pasechnik Dima Pasechnik Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Ding Liu Ding Liu +D.J. Ramones +D.J. Ramones <50655786+djramones@users.noreply.github.com> Dmitriy Shalyga Dmitry Belov Dustan Levenstein <43019642+dustanlevenstein@users.noreply.github.com> Dylan Cutler Ed Schofield Egor Zindy +Élie Goudout +Élie Goudout <114467748+eliegoudout@users.noreply.github.com> Elliott M. Forney Erik M. Bray Erik M. Bray Erik M. Bray Eric Fode Eric Fode Eric Quintero +Eric Xie <161030123+EngineerEricXie@users.noreply.github.com> Ernest N. Mamikonyan Eskild Eriksen Eskild Eriksen <42120229+iameskild@users.noreply.github.com> @@ -270,6 +304,7 @@ Imen Rajhi Inessa Pawson Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com> Irvin Probst +Ishan Koradia <39583356+Ishankoradia@users.noreply.github.com> Ivan Meleshko Isabela Presedo-Floyd Ganesh Kathiresan @@ -277,13 +312,21 @@ Gerhard Hobler Giannis Zapantis Guillaume Peillex Jack J. Woehr +Jacob M. Casey +Jakob Stevens Haas <37048747+Jacob-Stevens-Haas@users.noreply.github.com> Jaime Fernandez Jaime Fernandez Jaime Fernandez Jake Close +Jake VanderPlas +Jake VanderPlas +Jake VanderPlas Jakob Jakobson Jakob Jakobson <43045863+jakobjakobson13@users.noreply.github.com> James Bourbeau +James Joseph Thomas +James Joseph Thomas quotuva +James Oliver <46758370+jamesoliverh@users.noreply.github.com> James Webber Jamie Macey Jan Schlüter @@ -325,8 +368,11 @@ Joseph Fox-Rabinovitz Joshua Himmens Joyce Brum +Joren Hammudoglu Jory Klaverstijn Jory Klaverstijn <63673224+JoryKlaverstijn@users.noreply.github.com> +Julia Poo +Julia Poo <57632293+JuliaPoo@users.noreply.github.com> Julian Taylor Julian Taylor Julian Taylor @@ -340,6 +386,8 @@ Kai Striega Kasia Leszek Kasia Leszek <39829548+katleszek@users.noreply.github.com> Karan Dhir +Karthik Gangula <56480632+gangula-karthik@users.noreply.github.com> +Karthik Kaiplody Keller Meier Kenny Huynh Kevin Granados @@ -351,6 +399,8 @@ Khaled Ben Abdallah Okuda Kiko Correoso kikocorreoso Kiko Correoso kikocorreoso Konrad Kapp +Kristoffer Pedersen +Kristoffer Pedersen Kriti Singh Kmol Yuan Kumud Lakara <55556183+kumudlakara@users.noreply.github.com> @@ -362,18 +412,23 @@ Lars Grüter Leona Taric Leona Taric <92495067+LeonaTaric@users.noreply.github.com> Leonardus Chen +Liangyu Zhang Licht Takeuchi Lorenzo Mammana Lillian Zha Lillian Zha +Linus Sommer +Linus Sommer <95619282+linus-md@users.noreply.github.com> Lu Yun Chi <32014765+LuYunChi@users.noreply.github.com> Luis Pedro Coelho +Lucas Colley Luke Zoltan Kelley Madhulika Jain Chambers <53166646+madhulikajc@users.noreply.github.com> Magdalena Proszewska Magdalena Proszewska <38814059+mproszewska@users.noreply.github.com> Malik Idrees Hasan Khan <77000356+MalikIdreesHasanKhan@users.noreply.github.com>C Manoj Kumar +Marcel Loose Marcin Podhajski <36967358+m-podhajski@users.noreply.github.com> Margret Pax Margret Pax <13646646+paxcodes@users.noreply.github.com> @@ -386,6 +441,8 @@ Mark Wiebe Mark Wiebe Mars Lee Mars Lee <46167686+MarsBarLee@users.noreply.github.com> +Marten van Kerkwijk +Marten van Kerkwijk Martin Goodson Martin Reinecke Martin Teichmann @@ -395,18 +452,24 @@ Matheus Vieira Portela Matheus Santana Patriarca Mathieu Lamarre Matías Ríos +Matt Hancock Matt Ord Matt Ord <55235095+Matt-Ord@users.noreply.github.com> -Matt Hancock +Matt Thompson +Matthias Bussonnier Martino Sorbaro Márton Gunyhó Mattheus Ueckermann Matthew Barber Matthew Harrigan Matthias Bussonnier +Matthias Schaufelberger +Matthias Schaufelberger <45293673+maisevector@users.noreply.github.com> Matthieu Darbois Matti Picus Matti Picus mattip +Maya Anderson +Maya Anderson <63074550+andersonm-ibm@users.noreply.github.com> Maximilian Konrad Melissa Weber Mendonça Melissa Weber Mendonça @@ -427,15 +490,21 @@ Michel Fruchart Miki Watanabe (渡邉 美希) Miles Cranmer +Milica Dančuk +Milica Dančuk love-bees <33499899+love-bees@users.noreply.github.com> Mircea Akos Bruma Mircea Akos Bruma Mitchell Faas <35742861+Mitchell-Faas@users.noreply.github.com> +Mohaned Qunaibit Muhammad Kasim +Muhammed Muhsin Mukulika Pahari Mukulika Pahari <60316606+Mukulikaa@users.noreply.github.com> Munira Alduraibi Namami Shanker Namami Shanker NamamiShanker +Nathan Goldbaum +Nathan Goldbaum Nathaniel J. Smith Naveen Arunachalam naveenarun Neil Girdhar @@ -445,24 +514,32 @@ Nicolas Scheffer Nicolas Scheffer nickdg Nicholas McKibben Nick Minkyu Lee fivemok <9394929+fivemok@users.noreply.github.com> +Nyakku Shigure Norwid Behrnd Norwid Behrnd -Oliver Eberle Oleksiy Kononenko Oleksiy Kononenko <35204136+oleksiyskononenko@users.noreply.github.com> +Oliver Eberle +Olivier Barthelemy +Olivier Mattelaer Omar Ali Omid Rajaei Omid Rajaei <89868505+rajaeinet@users.noreply.github.com> Ondřej Čertík Óscar Villellas Guillén +Pablo Losada +Pablo Losada <48804010+TheHawz@users.noreply.github.com> Panos Mavrogiorgos Pantelis Antonoudiou Pantelis Antonoudiou Pat Miller patmiller Paul Ivanov Paul Ivanov -Paul YS Lee Paul Paul Jacobson +Paul Juma Otieno +Paul Juma Otieno <103896399+otieno-juma@users.noreply.github.com> +Paul Reece +Paul YS Lee Paul Pey Lian Lim Pey Lian Lim <2090236+pllim@users.noreply.github.com> Pearu Peterson @@ -488,6 +565,7 @@ Rakesh Vasudevan Ralf Gommers Ralf Gommers rgommers Rehas Sachdeva +Richard Howe <45905457+rmhowe425@users.noreply.github.com> Ritta Narita Riya Sharma Robert Kern @@ -499,6 +577,7 @@ Rohit Goswami Roland Kaufmann Roman Yurchak Ronan Lamy Ronan Lamy +Rostan Tabet Roy Jacobson Russell Hewett Ryan Blakemore @@ -514,6 +593,7 @@ Sam Radhakrishnan = <=> # committed without an email address Samesh Lakhotia Samesh Lakhotia <43701530+sameshl@users.noreply.github.com> Sami Salonen +Samuel Albanie Sanchez Gonzalez Alvaro Sanya Sinha <83265366+ssanya942@users.noreply.github.com> Saransh Chopra @@ -521,6 +601,8 @@ Saullo Giovani Saurabh Mehta Sayantika Banik Schrijvers Luc +Sean Cheah +Sean Cheah <67928790+thalassemia@users.noreply.github.com> Sebastian Berg Sebastian Schleehauf Serge Guelton @@ -539,6 +621,7 @@ Simon Gasse Simon Gasse Sista Seetaram Sista Seetaram <65669128+sistaseetaram@users.noreply.github.com> +Slava Gorloff <31761951+gorloffslava@users.noreply.github.com> Søren Rasmussen <47032123+sorenrasmussenai@users.noreply.github.com> Spencer Hill Srimukh Sripada @@ -586,6 +669,8 @@ Vrinda Narayan Vrinda Narayan Vrinda Narayan <48102157+vrindaaa@users.noreply.github.com> Wansoo Kim +Warrick Ball +Warrick Ball Warren Weckesser Warren Weckesser Weitang Li @@ -594,9 +679,12 @@ William Spotz Wim Glenn Wojtek Ruszczewski Wojciech Rzadkowski <33913808+wrzadkow@users.noreply.github.com> +Xiangyi Wang Yamada Fuyuka Yang Hau Yang Hau +Yash Pethe +Yash Pethe <83630710+patient74@users.noreply.github.com> Yashasvi Misra Yashasvi Misra <54177363+yashasvimisra2798@users.noreply.github.com> Yogesh Raisinghani <46864533+raisinghanii@users.noreply.github.com> @@ -607,6 +695,7 @@ Yuji Kanagawa Yuki K Yury Kirienko Zac Hatfield-Dodds +Zach Brugh <111941670+zachbrugh@users.noreply.github.com> Zé Vinícius Zhang Na Zixu Zhao @@ -614,4 +703,5 @@ Ziyan Zhou Zieji Pohz Zieji Pohz <8103276+zjpoh@users.noreply.github.com> Zolboo Erdenebaatar +Zolisa Bleki Zolisa Bleki <44142765+zoj613@users.noreply.github.com> diff --git a/.spin/cmds.py b/.spin/cmds.py index 11e2b1b0e2d3..0773578de913 100644 --- a/.spin/cmds.py +++ b/.spin/cmds.py @@ -1,11 +1,7 @@ import os import shutil -import sys -import argparse -import tempfile import pathlib import shutil -import json import pathlib import importlib import subprocess @@ -99,7 +95,7 @@ def changelog(ctx, token, revision_range): ) @click.argument("meson_args", nargs=-1) @click.pass_context -def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False): +def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose=False, quiet=False, *args, **kwargs): """🔧 Build package with Meson/ninja and install MESON_ARGS are passed through e.g.: @@ -136,11 +132,13 @@ def build(ctx, meson_args, with_scipy_openblas, jobs=None, clean=False, verbose= @click.option( '--jobs', '-j', metavar='N_JOBS', - default="auto", - help="Number of parallel build jobs" + # Avoids pydata_sphinx_theme extension warning from default="auto". + default="1", + help=("Number of parallel build jobs." + "Can be set to `auto` to use all cores.") ) @click.pass_context -def docs(ctx, sphinx_target, clean, first_build, jobs): +def docs(ctx, sphinx_target, clean, first_build, jobs, *args, **kwargs): """📖 Build Sphinx documentation By default, SPHINXOPTS="-W", raising errors on warnings. @@ -163,6 +161,21 @@ def docs(ctx, sphinx_target, clean, first_build, jobs): """ meson.docs.ignore_unknown_options = True + # See https://github.com/scientific-python/spin/pull/199 + # Can be changed when spin updates to 0.11, and moved to pyproject.toml + if clean: + clean_dirs = [ + './doc/build/', + './doc/source/reference/generated', + './doc/source/reference/random/bit_generators/generated', + './doc/source/reference/random/generated', + ] + + for target_dir in clean_dirs: + if os.path.isdir(target_dir): + print(f"Removing {target_dir!r}") + shutil.rmtree(target_dir) + # Run towncrier without staging anything for commit. This is the way to get # release notes snippets included in a local doc build. cmd = ['towncrier', 'build', '--version', '2.x.y', '--keep', '--draft'] @@ -209,7 +222,7 @@ def docs(ctx, sphinx_target, clean, first_build, jobs): '--verbose', '-v', is_flag=True, default=False ) @click.pass_context -def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): +def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose, *args, **kwargs): """🔧 Run tests PYTEST_ARGS are passed through directly to pytest, e.g.: @@ -233,7 +246,7 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): spin test -- -k "geometric and not rgeometric" By default, spin will run `-m 'not slow'`. To run the full test suite, use - `spin -m full` + `spin test -m full` For more, see `pytest --help`. """ # noqa: E501 @@ -241,6 +254,9 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): pytest_args = ('numpy',) if '-m' not in pytest_args: + if len(pytest_args) == 1 and not tests: + tests = pytest_args[0] + pytest_args = () if markexpr != "full": pytest_args = ('-m', markexpr) + pytest_args @@ -260,6 +276,152 @@ def test(ctx, pytest_args, markexpr, n_jobs, tests, verbose): ctx.forward(meson.test) +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.option( + "-j", + "n_jobs", + metavar='N_JOBS', + default="1", + help=("Number of parallel jobs for testing. " + "Can be set to `auto` to use all cores.") +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.pass_context +def check_docs(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): + """🔧 Run doctests of objects in the public API. + + PYTEST_ARGS are passed through directly to pytest, e.g.: + + spin check-docs -- --pdb + + To run tests on a directory: + + \b + spin check-docs numpy/linalg + + To report the durations of the N slowest doctests: + + spin check-docs -- --durations=N + + To run doctests that match a given pattern: + + \b + spin check-docs -- -k "slogdet" + spin check-docs numpy/linalg -- -k "det and not slogdet" + + \b + Note: + ----- + + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. + + """ # noqa: E501 + try: + # prevent obscure error later + import scipy_doctest + except ModuleNotFoundError as e: + raise ModuleNotFoundError("scipy-doctest not installed") from e + if (not pytest_args): + pytest_args = ('numpy',) + + if (n_jobs != "1") and ('-n' not in pytest_args): + pytest_args = ('-n', str(n_jobs)) + pytest_args + + if verbose: + pytest_args = ('-v',) + pytest_args + + # turn doctesting on: + doctest_args = ( + '--doctest-modules', + '--doctest-collect=api' + ) + + pytest_args = pytest_args + doctest_args + + ctx.params['pytest_args'] = pytest_args + + for extra_param in ('n_jobs', 'verbose'): + del ctx.params[extra_param] + + ctx.forward(meson.test) + + +@click.command() +@click.argument("pytest_args", nargs=-1) +@click.option( + "-j", + "n_jobs", + metavar='N_JOBS', + default="1", + help=("Number of parallel jobs for testing. " + "Can be set to `auto` to use all cores.") +) +@click.option( + '--verbose', '-v', is_flag=True, default=False +) +@click.pass_context +def check_tutorials(ctx, pytest_args, n_jobs, verbose, *args, **kwargs): + """🔧 Run doctests of user-facing rst tutorials. + + To test all tutorials in the numpy/doc/source/user/ directory, use + + spin check-tutorials + + To run tests on a specific RST file: + + \b + spin check-tutorials numpy/doc/source/user/absolute-beginners.rst + + \b + Note: + ----- + + \b + - This command only runs doctests and skips everything under tests/ + - This command only doctests public objects: those which are accessible + from the top-level `__init__.py` file. + + """ # noqa: E501 + # handle all of + # - `spin check-tutorials` (pytest_args == ()) + # - `spin check-tutorials path/to/rst`, and + # - `spin check-tutorials path/to/rst -- --durations=3` + if (not pytest_args) or all(arg.startswith('-') for arg in pytest_args): + pytest_args = ('numpy/doc/source/user',) + pytest_args + + # make all paths relative to the numpy source folder + pytest_args = tuple( + str(curdir / '..' / '..' / arg) if not arg.startswith('-') else arg + for arg in pytest_args + ) + + if (n_jobs != "1") and ('-n' not in pytest_args): + pytest_args = ('-n', str(n_jobs)) + pytest_args + + if verbose: + pytest_args = ('-v',) + pytest_args + + # turn doctesting on: + doctest_args = ( + '--doctest-glob=*rst', + ) + + pytest_args = pytest_args + doctest_args + + ctx.params['pytest_args'] = pytest_args + + for extra_param in ('n_jobs', 'verbose'): + del ctx.params[extra_param] + + ctx.forward(meson.test) + + # From scipy: benchmarks/benchmarks/common.py def _set_mem_rlimit(max_mem=None): """ @@ -313,7 +475,7 @@ def _run_asv(cmd): '/usr/local/lib/ccache', '/usr/local/lib/f90cache' ]) env = os.environ - env['PATH'] = f'EXTRA_PATH:{PATH}' + env['PATH'] = f'{EXTRA_PATH}{os.pathsep}{PATH}' # Control BLAS/LAPACK threads env['OPENBLAS_NUM_THREADS'] = '1' @@ -490,7 +652,7 @@ def bench(ctx, tests, compare, verbose, quick, commits): }) @click.argument("python_args", metavar='', nargs=-1) @click.pass_context -def python(ctx, python_args): +def python(ctx, python_args, *args, **kwargs): """🐍 Launch Python shell with PYTHONPATH set OPTIONS are passed through directly to Python, e.g.: @@ -621,16 +783,10 @@ def notes(ctx, version_override): ) # towncrier build --version 2.1 --yes cmd = ["towncrier", "build", "--version", version, "--yes"] - try: - p = util.run( - cmd=cmd, - sys_exit=False, - output=True, - encoding="utf-8" - ) - except subprocess.SubprocessError as e: + p = util.run(cmd=cmd, sys_exit=False, output=True, encoding="utf-8") + if p.returncode != 0: raise click.ClickException( - f"`towncrier` failed returned {e.returncode} with error `{e.stderr}`" + f"`towncrier` failed returned {p.returncode} with error `{p.stderr}`" ) output_path = project_config['tool.towncrier.filename'].format(version=version) diff --git a/INSTALL.rst b/INSTALL.rst index e305e29facdd..eea2e3c9d7de 100644 --- a/INSTALL.rst +++ b/INSTALL.rst @@ -14,13 +14,13 @@ Prerequisites Building NumPy requires the following installed software: -1) Python__ 3.9.x or newer. +1) Python__ 3.10.x or newer. Please note that the Python development headers also need to be installed, e.g., on Debian/Ubuntu one needs to install both `python3` and `python3-dev`. On Windows and macOS this is normally not an issue. -2) Cython >= 3.0 +2) Cython >= 3.0.6 3) pytest__ (optional) @@ -44,7 +44,7 @@ Hypothesis__ https://hypothesis.readthedocs.io/en/latest/ .. note:: More extensive information on building NumPy is maintained at - https://numpy.org/devdocs/user/building.html#building-from-source + https://numpy.org/devdocs/building/#building-numpy-from-source Basic installation diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt index 815c9a1dba33..b3d8aa8bed06 100644 --- a/LICENSES_bundled.txt +++ b/LICENSES_bundled.txt @@ -29,3 +29,8 @@ Name: spin Files: .spin/cmds.py License: BSD-3 For license text, see .spin/LICENSE + +Name: tempita +Files: numpy/_build_utils/tempita/* +License: MIT + For details, see numpy/_build_utils/tempita/LICENCE.txt diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 29fb0f7a8974..2393a96d3f86 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -44,7 +44,7 @@ stages: steps: - task: UsePythonVersion@0 inputs: - versionSpec: '3.9' + versionSpec: '3.10' addToPath: true architecture: 'x64' - script: >- @@ -57,7 +57,7 @@ stages: displayName: 'Run Lint Checks' failOnStderr: true - - job: Linux_Python_39_32bit_full_with_asserts + - job: Linux_Python_310_32bit_full_with_asserts pool: vmImage: 'ubuntu-20.04' steps: @@ -89,8 +89,8 @@ stages: TEST_MODE: full BITS: 64 _USE_BLAS_ILP64: '1' - PyPy39-64bit-fast: - PYTHON_VERSION: 'pypy3.9' + PyPy310-64bit-fast: + PYTHON_VERSION: 'pypy3.10' PYTHON_ARCH: 'x64' TEST_MODE: fast BITS: 64 diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml index cc458723f28f..0baf374e1e3f 100644 --- a/azure-steps-windows.yml +++ b/azure-steps-windows.yml @@ -14,10 +14,7 @@ steps: displayName: 'Install dependencies; some are optional to avoid test skips' - powershell: | - choco install -y --stoponfirstfailure unzip choco install -y --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite - choco install --stoponfirstfailure ninja - echo "##vso[task.setvariable variable=RTOOLS43_HOME]c:\rtools43" displayName: 'Install utilities' - powershell: | @@ -42,7 +39,7 @@ steps: - powershell: | cd tools # avoid root dir to not pick up source tree # Get a gfortran onto the path for f2py tests - $env:PATH = "$env:RTOOLS43_HOME\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" + $env:PATH = "c:\\rtools43\\x86_64-w64-mingw32.static.posix\\bin;$env:PATH" If ( $env:TEST_MODE -eq "full" ) { pytest --pyargs numpy -rsx --junitxml=junit/test-results.xml } else { diff --git a/benchmarks/README.rst b/benchmarks/README.rst index e44f8fe02f1e..e7e42a377819 100644 --- a/benchmarks/README.rst +++ b/benchmarks/README.rst @@ -127,4 +127,4 @@ Some things to consider: you are benchmarking an algorithm, it is unlikely that a user will be executing said algorithm on a newly created empty/zero array. One can force pagefaults to occur in the setup phase either by calling ``np.ones`` or - ``arr.fill(value)`` after creating the array, + ``arr.fill(value)`` after creating the array. diff --git a/benchmarks/benchmarks/bench_clip.py b/benchmarks/benchmarks/bench_clip.py new file mode 100644 index 000000000000..ce0511da82a4 --- /dev/null +++ b/benchmarks/benchmarks/bench_clip.py @@ -0,0 +1,35 @@ +from .common import Benchmark + +import numpy as np + + +class ClipFloat(Benchmark): + param_names = ["dtype", "size"] + params = [ + [np.float32, np.float64, np.longdouble], + [100, 100_000] + ] + + def setup(self, dtype, size): + rnd = np.random.RandomState(994584855) + self.array = rnd.random(size=size).astype(dtype) + self.dataout = np.full_like(self.array, 0.5) + + def time_clip(self, dtype, size): + np.clip(self.array, 0.125, 0.875, self.dataout) + + +class ClipInteger(Benchmark): + param_names = ["dtype", "size"] + params = [ + [np.int32, np.int64], + [100, 100_000] + ] + + def setup(self, dtype, size): + rnd = np.random.RandomState(1301109903) + self.array = rnd.randint(256, size=size, dtype=dtype) + self.dataout = np.full_like(self.array, 128) + + def time_clip(self, dtype, size): + np.clip(self.array, 32, 224, self.dataout) diff --git a/benchmarks/benchmarks/bench_creation.py b/benchmarks/benchmarks/bench_creation.py index 76d871e2d411..8c06c2125940 100644 --- a/benchmarks/benchmarks/bench_creation.py +++ b/benchmarks/benchmarks/bench_creation.py @@ -13,7 +13,8 @@ class MeshGrid(Benchmark): timeout = 10 def setup(self, size, ndims, ind, ndtype): - self.grid_dims = [(np.random.ranf(size)).astype(ndtype) for + rnd = np.random.RandomState(1864768776) + self.grid_dims = [(rnd.random_sample(size)).astype(ndtype) for x in range(ndims)] def time_meshgrid(self, size, ndims, ind, ndtype): diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index d4b08a3a0e65..657db7d2cac7 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -154,17 +154,19 @@ class SortGenerator: @staticmethod @memoize - def random(size, dtype): + def random(size, dtype, rnd): """ Returns a randomly-shuffled array. """ arr = np.arange(size, dtype=dtype) + rnd = np.random.RandomState(1792364059) np.random.shuffle(arr) + rnd.shuffle(arr) return arr @staticmethod @memoize - def ordered(size, dtype): + def ordered(size, dtype, rnd): """ Returns an ordered array. """ @@ -172,7 +174,7 @@ def ordered(size, dtype): @staticmethod @memoize - def reversed(size, dtype): + def reversed(size, dtype, rnd): """ Returns an array that's in descending order. """ @@ -187,7 +189,7 @@ def reversed(size, dtype): @staticmethod @memoize - def uniform(size, dtype): + def uniform(size, dtype, rnd): """ Returns an array that has the same value everywhere. """ @@ -195,20 +197,7 @@ def uniform(size, dtype): @staticmethod @memoize - def swapped_pair(size, dtype, swap_frac): - """ - Returns an ordered array, but one that has ``swap_frac * size`` - pairs swapped. - """ - a = np.arange(size, dtype=dtype) - for _ in range(int(size * swap_frac)): - x, y = np.random.randint(0, size, 2) - a[x], a[y] = a[y], a[x] - return a - - @staticmethod - @memoize - def sorted_block(size, dtype, block_size): + def sorted_block(size, dtype, block_size, rnd): """ Returns an array with blocks that are all sorted. """ @@ -221,35 +210,6 @@ def sorted_block(size, dtype, block_size): b.extend(a[i::block_num]) return np.array(b) - @classmethod - @memoize - def random_unsorted_area(cls, size, dtype, frac, area_size=None): - """ - This type of array has random unsorted areas such that they - compose the fraction ``frac`` of the original array. - """ - if area_size is None: - area_size = cls.AREA_SIZE - - area_num = int(size * frac / area_size) - a = np.arange(size, dtype=dtype) - for _ in range(area_num): - start = np.random.randint(size-area_size) - end = start + area_size - np.random.shuffle(a[start:end]) - return a - - @classmethod - @memoize - def random_bubble(cls, size, dtype, bubble_num, bubble_size=None): - """ - This type of array has ``bubble_num`` random unsorted areas. - """ - if bubble_size is None: - bubble_size = cls.BUBBLE_SIZE - frac = bubble_size * bubble_num / size - - return cls.random_unsorted_area(size, dtype, frac, bubble_size) class Sort(Benchmark): """ @@ -270,15 +230,6 @@ class Sort(Benchmark): ('sorted_block', 10), ('sorted_block', 100), ('sorted_block', 1000), - # ('swapped_pair', 0.01), - # ('swapped_pair', 0.1), - # ('swapped_pair', 0.5), - # ('random_unsorted_area', 0.5), - # ('random_unsorted_area', 0.1), - # ('random_unsorted_area', 0.01), - # ('random_bubble', 1), - # ('random_bubble', 5), - # ('random_bubble', 10), ], ] param_names = ['kind', 'dtype', 'array_type'] @@ -287,9 +238,9 @@ class Sort(Benchmark): ARRAY_SIZE = 10000 def setup(self, kind, dtype, array_type): - np.random.seed(1234) + rnd = np.random.RandomState(507582308) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:]) + self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_sort(self, kind, dtype, array_type): # Using np.sort(...) instead of arr.sort(...) because it makes a copy. @@ -321,10 +272,10 @@ class Partition(Benchmark): ARRAY_SIZE = 100000 def setup(self, dtype, array_type, k): - np.random.seed(1234) + rnd = np.random.seed(2136297818) array_class = array_type[0] - self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, - dtype, *array_type[1:]) + self.arr = getattr(SortGenerator, array_class)( + self.ARRAY_SIZE, dtype, *array_type[1:], rnd) def time_partition(self, dtype, array_type, k): temp = np.partition(self.arr, k) diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index f792116a6b9c..dc8815ffe95b 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -66,10 +66,10 @@ class Nan(Benchmark): ] def setup(self, array_size, percent_nans): - np.random.seed(123) + rnd = np.random.RandomState(1819780348) # produce a randomly shuffled array with the # approximate desired percentage np.nan content - base_array = np.random.uniform(size=array_size) + base_array = rnd.uniform(size=array_size) base_array[base_array < percent_nans / 100.] = np.nan self.arr = base_array diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py index 307735723707..f3eb819c1803 100644 --- a/benchmarks/benchmarks/bench_linalg.py +++ b/benchmarks/benchmarks/bench_linalg.py @@ -72,7 +72,7 @@ def time_tensordot_a_b_axes_1_0_0_1(self): class Linalg(Benchmark): - params = set(TYPES1) - set(['float16']) + params = sorted(list(set(TYPES1) - set(['float16']))) param_names = ['dtype'] def setup(self, typename): diff --git a/benchmarks/benchmarks/bench_ma.py b/benchmarks/benchmarks/bench_ma.py index 26c977c9748c..f17da1a9ebe1 100644 --- a/benchmarks/benchmarks/bench_ma.py +++ b/benchmarks/benchmarks/bench_ma.py @@ -265,3 +265,47 @@ def time_where(self, mtype, msize): fun(self.nmxs > 2, self.nmxs, self.nmys) elif msize == 'big': fun(self.nmxl > 2, self.nmxl, self.nmyl) + + +class Cov(Benchmark): + param_names = ["size"] + params = [["small", "large"]] + + def setup(self, size): + # Set the proportion of masked values. + prop_mask = 0.2 + # Set up a "small" array with 10 vars and 10 obs. + rng = np.random.default_rng() + data = rng.random((10, 10), dtype=np.float32) + self.small = np.ma.array(data, mask=(data <= prop_mask)) + # Set up a "large" array with 100 vars and 100 obs. + data = rng.random((100, 100), dtype=np.float32) + self.large = np.ma.array(data, mask=(data <= prop_mask)) + + def time_cov(self, size): + if size == "small": + np.ma.cov(self.small) + if size == "large": + np.ma.cov(self.large) + + +class Corrcoef(Benchmark): + param_names = ["size"] + params = [["small", "large"]] + + def setup(self, size): + # Set the proportion of masked values. + prop_mask = 0.2 + # Set up a "small" array with 10 vars and 10 obs. + rng = np.random.default_rng() + data = rng.random((10, 10), dtype=np.float32) + self.small = np.ma.array(data, mask=(data <= prop_mask)) + # Set up a "large" array with 100 vars and 100 obs. + data = rng.random((100, 100), dtype=np.float32) + self.large = np.ma.array(data, mask=(data <= prop_mask)) + + def time_corrcoef(self, size): + if size == "small": + np.ma.corrcoef(self.small) + if size == "large": + np.ma.corrcoef(self.large) diff --git a/benchmarks/benchmarks/bench_polynomial.py b/benchmarks/benchmarks/bench_polynomial.py new file mode 100644 index 000000000000..ab2e95b7d1ab --- /dev/null +++ b/benchmarks/benchmarks/bench_polynomial.py @@ -0,0 +1,29 @@ +from .common import Benchmark + +import numpy as np + + +class Polynomial(Benchmark): + + def setup(self): + self.polynomial_degree2 = np.polynomial.Polynomial(np.array([1, 2])) + self.array3 = np.linspace(0, 1, 3) + self.array1000 = np.linspace(0, 1, 10_000) + self.float64 = np.float64(1.0) + + def time_polynomial_evaluation_scalar(self): + self.polynomial_degree2(self.float64) + + def time_polynomial_evaluation_python_float(self): + self.polynomial_degree2(1.0) + + def time_polynomial_evaluation_array_3(self): + self.polynomial_degree2(self.array3) + + def time_polynomial_evaluation_array_1000(self): + self.polynomial_degree2(self.array1000) + + def time_polynomial_addition(self): + _ = self.polynomial_degree2 + self.polynomial_degree2 + + diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py index 9482eb04de97..d987426694e9 100644 --- a/benchmarks/benchmarks/bench_random.py +++ b/benchmarks/benchmarks/bench_random.py @@ -147,10 +147,11 @@ class Bounded(Benchmark): ]] def setup(self, bitgen, args): + seed = 707250673 if bitgen == 'numpy': - self.rg = np.random.RandomState() + self.rg = np.random.RandomState(seed) else: - self.rg = Generator(getattr(np.random, bitgen)()) + self.rg = Generator(getattr(np.random, bitgen)(seed)) self.rg.random() def time_bounded(self, bitgen, args): diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py index 72c2a6132e4e..eb13ff969353 100644 --- a/benchmarks/benchmarks/bench_shape_base.py +++ b/benchmarks/benchmarks/bench_shape_base.py @@ -152,3 +152,19 @@ def time_scalar_kron(self): def time_mat_kron(self): np.kron(self.large_mat, self.large_mat) + +class AtLeast1D(Benchmark): + """Benchmarks for np.atleast_1d""" + + def setup(self): + self.x = np.array([1, 2, 3]) + self.zero_d = np.float64(1.) + + def time_atleast_1d(self): + np.atleast_1d(self.x, self.x, self.x) + + def time_atleast_1d_reshape(self): + np.atleast_1d(self.zero_d, self.zero_d, self.zero_d) + + def time_atleast_1d_single_argument(self): + np.atleast_1d(self.x) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 2ac820bc2e5c..3545d939cf36 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -21,13 +21,23 @@ 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh', - 'true_divide', 'trunc'] + 'true_divide', 'trunc', 'vecdot'] arrayfuncdisp = ['real', 'round'] +for name in ufuncs: + f = getattr(np, name, None) + if not isinstance(f, np.ufunc): + raise ValueError(f"Bench target `np.{name}` is not a ufunc") -for name in dir(np): - if isinstance(getattr(np, name, None), np.ufunc) and name not in ufuncs: - print("Missing ufunc %r" % (name,)) +all_ufuncs = (getattr(np, name, None) for name in dir(np)) +all_ufuncs = set(filter(lambda f: isinstance(f, np.ufunc), all_ufuncs)) +bench_ufuncs = set(getattr(np, name, None) for name in ufuncs) + +missing_ufuncs = all_ufuncs - bench_ufuncs +if len(missing_ufuncs) > 0: + missing_ufunc_names = [f.__name__ for f in missing_ufuncs] + raise NotImplementedError( + "Missing benchmarks for ufuncs %r" % missing_ufunc_names) class ArrayFunctionDispatcher(Benchmark): @@ -487,7 +497,7 @@ def time_floor_divide_int(self, dtype, size): class Scalar(Benchmark): def setup(self): self.x = np.asarray(1.0) - self.y = np.asarray((1.0 + 1j)) + self.y = np.asarray(1.0 + 1j) self.z = complex(1.0, 1.0) def time_add_scalar(self): @@ -559,21 +569,40 @@ def time_add_reduce_arg_parsing(self, arg_pack): np.add.reduce(*arg_pack.args, **arg_pack.kwargs) class BinaryBench(Benchmark): - def setup(self): + params = [np.float32, np.float64] + param_names = ['dtype'] + + def setup(self, dtype): N = 1000000 - self.a32 = np.random.rand(N).astype(np.float32) - self.b32 = np.random.rand(N).astype(np.float32) - self.a64 = np.random.rand(N).astype(np.float64) - self.b64 = np.random.rand(N).astype(np.float64) + self.a = np.random.rand(N).astype(dtype) + self.b = np.random.rand(N).astype(dtype) + + def time_pow(self, dtype): + np.power(self.a, self.b) - def time_pow_32(self): - np.power(self.a32, self.b32) + def time_pow_2(self, dtype): + np.power(self.a, 2.0) - def time_pow_64(self): - np.power(self.a64, self.b64) + def time_pow_half(self, dtype): + np.power(self.a, 0.5) + + def time_atan2(self, dtype): + np.arctan2(self.a, self.b) + +class BinaryBenchInteger(Benchmark): + params = [np.int32, np.int64] + param_names = ['dtype'] + + def setup(self, dtype): + N = 1000000 + self.a = np.random.randint(20, size=N).astype(dtype) + self.b = np.random.randint(4, size=N).astype(dtype) + + def time_pow(self, dtype): + np.power(self.a, self.b) - def time_atan2_32(self): - np.arctan2(self.a32, self.b32) + def time_pow_two(self, dtype): + np.power(self.a, 2) - def time_atan2_64(self): - np.arctan2(self.a64, self.b64) + def time_pow_five(self, dtype): + np.power(self.a, 5) diff --git a/benchmarks/benchmarks/bench_ufunc_strides.py b/benchmarks/benchmarks/bench_ufunc_strides.py index 183c7c4fb75e..1c7eb0a68e2c 100644 --- a/benchmarks/benchmarks/bench_ufunc_strides.py +++ b/benchmarks/benchmarks/bench_ufunc_strides.py @@ -8,7 +8,7 @@ class _AbstractBinary(Benchmark): params = [] - param_names = ['ufunc', 'stride_in0', 'stride_in1' 'stride_out', 'dtype'] + param_names = ['ufunc', 'stride_in0', 'stride_in1', 'stride_out', 'dtype'] timeout = 10 arrlen = 10000 data_finite = True diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index d4c1540ff203..5cbc2f38f31d 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -41,7 +41,7 @@ @lru_cache(typed=True) def get_values(): - rnd = np.random.RandomState(1) + rnd = np.random.RandomState(1804169117) values = np.tile(rnd.uniform(0, 100, size=nx*ny//10), 10) return values @@ -60,7 +60,7 @@ def get_square(dtype): @lru_cache(typed=True) def get_squares(): - return {t: get_square(t) for t in TYPES1} + return {t: get_square(t) for t in sorted(TYPES1)} @lru_cache(typed=True) @@ -72,14 +72,7 @@ def get_square_(dtype): @lru_cache(typed=True) def get_squares_(): # smaller squares - return {t: get_square_(t) for t in TYPES1} - - -@lru_cache(typed=True) -def get_vectors(): - # vectors - vectors = {t: s[0] for t, s in get_squares().items()} - return vectors + return {t: get_square_(t) for t in sorted(TYPES1)} @lru_cache(typed=True) diff --git a/building_with_meson.md b/building_with_meson.md index ec7625b0d2c3..6498d3659bb0 100644 --- a/building_with_meson.md +++ b/building_with_meson.md @@ -1,9 +1,8 @@ # Building with Meson _Note: this is for early adopters. It has been tested on Linux and macOS, and -with Python 3.9-3.12. Windows will be tested soon. There is one CI job to keep -the build stable. This may have rough edges, please open an issue if you run -into a problem._ +with Python 3.10-3.12. There is one CI job to keep the build stable. This may +have rough edges, please open an issue if you run into a problem._ ### Developer build diff --git a/doc/Makefile b/doc/Makefile index 2f04c7084ce9..57d063e9c936 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -11,13 +11,13 @@ PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".fo PYTHON = python$(PYVER) # You can set these variables from the command line. -SPHINXOPTS ?= +SPHINXOPTS ?= -W SPHINXBUILD ?= LANG=C sphinx-build PAPER ?= DOXYGEN ?= doxygen # For merging a documentation archive into a git checkout of numpy/doc # Turn a tag like v1.18.0 into 1.18 -# Use sed -n -e 's/patttern/match/p' to return a blank value if no match +# Use sed -n -e 's/pattern/match/p' to return a blank value if no match TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p') FILES= @@ -25,7 +25,7 @@ FILES= # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ +ALLSPHINXOPTS = -T --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ $(SPHINXOPTS) source .PHONY: help clean html web htmlhelp latex changes linkcheck \ diff --git a/doc/Py3K.rst b/doc/Py3K.rst deleted file mode 100644 index 3f312f7ec53a..000000000000 --- a/doc/Py3K.rst +++ /dev/null @@ -1,903 +0,0 @@ -.. -*-rst-*- - -********************************************* -Developer notes on the transition to Python 3 -********************************************* - -:date: 2010-07-11 -:author: Charles R. Harris -:author: Pauli Virtanen - -General -======= - -NumPy has now been ported to Python 3. - -Some glitches may still be present; however, we are not aware of any -significant ones, the test suite passes. - - -Resources ---------- - -Information on porting to 3K: - -- https://wiki.python.org/moin/cporting -- https://wiki.python.org/moin/PortingExtensionModulesToPy3k - - -Prerequisites -------------- - -The Nose test framework has currently (Nov 2009) no released Python 3 -compatible version. Its 3K SVN branch, however, works quite well: - -- http://python-nose.googlecode.com/svn/branches/py3k - - -Known semantic changes on Py2 -============================= - -As a side effect, the Py3 adaptation has caused the following semantic -changes that are visible on Py2. - -* Objects (except bytes and str) that implement the PEP 3118 array interface - will behave as ndarrays in `array(...)` and `asarray(...)`; the same way - as if they had ``__array_interface__`` defined. - -* Otherwise, there are no known semantic changes. - - -Known semantic changes on Py3 -============================= - -The following semantic changes have been made on Py3: - -* Division: integer division is by default true_divide, also for arrays. - -* Dtype field names are Unicode. - -* Only unicode dtype field titles are included in fields dict. - -* :pep:`3118` buffer objects will behave differently from Py2 buffer objects - when used as an argument to `array(...)`, `asarray(...)`. - - In Py2, they would cast to an object array. - - In Py3, they cast similarly as objects having an - ``__array_interface__`` attribute, ie., they behave as if they were - an ndarray view on the data. - - - -Python code -=========== - - -2to3 in setup.py ----------------- - -Currently, setup.py calls 2to3 automatically to convert Python sources -to Python 3 ones, and stores the results under:: - - build/py3k - -Only changed files will be re-converted when setup.py is called a second -time, making development much faster. - -Currently, this seems to handle all of the necessary Python code -conversion. - -Not all of the 2to3 transformations are appropriate for all files. -Especially, 2to3 seems to be quite trigger-happy in replacing e.g. -``unicode`` by ``str`` which causes problems in ``defchararray.py``. -For files that need special handling, add entries to -``tools/py3tool.py``. - - - -numpy.compat.py3k ------------------ - -There are some utility functions needed for 3K compatibility in -``numpy.compat.py3k`` -- they can be imported from ``numpy.compat``: - -- bytes, unicode: bytes and unicode constructors -- asbytes: convert string to bytes (no-op on Py2) -- asbytes_nested: convert strings in lists to Bytes -- asunicode: convert string to unicode -- asunicode_nested: convert strings in lists to Unicode -- asstr: convert item to the str type -- getexception: get current exception (see below) -- isfileobj: detect Python file objects -- strchar: character for Unicode (Py3) or Strings (Py2) -- open_latin1: open file in the latin1 text mode - -More can be added as needed. - - -numpy.f2py ----------- - -F2py is ported to Py3. - - -Bytes vs. strings ------------------ - -At many points in NumPy, bytes literals are needed. These can be created via -numpy.compat.asbytes and asbytes_nested. - - -Exception syntax ----------------- - -Syntax change: "except FooException, bar:" -> "except FooException as bar:" - -This is taken care by 2to3, however. - - -Relative imports ----------------- - -The new relative import syntax, - - from . import foo - -is not available on Py2.4, so we can't simply use it. - -Using absolute imports everywhere is probably OK, if they just happen -to work. - -2to3, however, converts the old syntax to new syntax, so as long as we -use the converter, it takes care of most parts. - - -Print ------ - -The Print statement changed to a builtin function in Py3. - -Also this is taken care of by 2to3. - -``types`` module ----------------- - -The following items were removed from `types` module in Py3: - -- StringType (Py3: `bytes` is equivalent, to some degree) -- InstanceType (Py3: ???) -- IntType (Py3: no equivalent) -- LongType (Py3: equivalent `long`) -- FloatType (Py3: equivalent `float`) -- BooleanType (Py3: equivalent `bool`) -- ComplexType (Py3: equivalent `complex`) -- UnicodeType (Py3: equivalent `str`) -- BufferType (Py3: more-or-less equivalent `memoryview`) - -In ``numerictypes.py``, the "common" types were replaced by their -plain equivalents, and `IntType` was dropped. - - -numpy._core.numerictypes ------------------------ - -In numerictypes, types on Python 3 were changed so that: - -=========== ============ -Scalar type Value -=========== ============ -str_ This is the basic Unicode string type on Py3 -bytes_ This is the basic Byte-string type on Py3 -string_ bytes_ alias -unicode_ str_ alias -=========== ============ - - -numpy.loadtxt et al -------------------- - -These routines are difficult to duck-type to read both Unicode and -Bytes input. - -I assumed they are meant for reading Bytes streams -- this is probably -the far more common use case with scientific data. - - -Cyclic imports --------------- - -Python 3 is less forgiving about cyclic imports than Python 2. Cycles -need to be broken to have the same code work both on Python 2 and 3. - - -C code -====== - - -NPY_PY3K --------- - -A #define in config.h, defined when building for Py3. - -.. todo:: - - Currently, this is generated as a part of the config. - Is this sensible (we could also use Py_VERSION_HEX)? - - -private/npy_3kcompat.h ----------------------- - -Convenience macros for Python 3 support: - -- PyInt -> PyLong on Py3 -- PyString -> PyBytes on Py3 -- PyUString -> PyUnicode on Py3 and PyString on Py2 -- PyBytes on Py2 -- PyUnicode_ConcatAndDel, PyUnicode_Concat2 -- Py_SIZE et al., for older Python versions -- npy_PyFile_Dup, etc. to get FILE* from Py3 file objects -- PyObject_Cmp, convenience comparison function on Py3 -- NpyCapsule_* helpers: PyCObject - -Any new ones that need to be added should be added in this file. - -.. todo:: - - Remove PyString_* eventually -- having a call to one of these in NumPy - sources is a sign of an error... - - -ob_type, ob_size ----------------- - -These use Py_SIZE, etc. macros now. The macros are also defined in -npy_3kcompat.h for the Python versions that don't have them natively. - - -Py_TPFLAGS_CHECKTYPES ---------------------- - -Python 3 no longer supports type coercion in arithmetic. - -Py_TPFLAGS_CHECKTYPES is now on by default, and so the C-level -interface, ``nb_*`` methods, still unconditionally receive whatever -types as their two arguments. - -However, this will affect Python-level code: previously if you -inherited from a Py_TPFLAGS_CHECKTYPES enabled class that implemented -a ``__mul__`` method, the same ``__mul__`` method would still be -called also as when a ``__rmul__`` was required, but with swapped -arguments (see Python/Objects/typeobject.c:wrap_binaryfunc_r). -However, on Python 3, arguments are swapped only if both are of same -(sub-)type, and otherwise things fail. - -This means that ``ndarray``-derived subclasses must now implement all -relevant ``__r*__`` methods, since they cannot any more automatically -fall back to ndarray code. - - -PyNumberMethods ---------------- - -The structures have been converted to the new format: - -- number.c -- scalartypes.c.src -- scalarmathmodule.c.src - -The slots np_divide, np_long, np_oct, np_hex, and np_inplace_divide -have gone away. The slot np_int is what np_long used to be, tp_divide -is now tp_floor_divide, and np_inplace_divide is now -np_inplace_floor_divide. - -These have simply been #ifdef'd out on Py3. - -The Py2/Py3 compatible structure definition looks like:: - - static PyNumberMethods @name@_as_number = { - (binaryfunc)0, /*nb_add*/ - (binaryfunc)0, /*nb_subtract*/ - (binaryfunc)0, /*nb_multiply*/ - #if defined(NPY_PY3K) - #else - (binaryfunc)0, /*nb_divide*/ - #endif - (binaryfunc)0, /*nb_remainder*/ - (binaryfunc)0, /*nb_divmod*/ - (ternaryfunc)0, /*nb_power*/ - (unaryfunc)0, - (unaryfunc)0, /*nb_pos*/ - (unaryfunc)0, /*nb_abs*/ - #if defined(NPY_PY3K) - (inquiry)0, /*nb_bool*/ - #else - (inquiry)0, /*nb_nonzero*/ - #endif - (unaryfunc)0, /*nb_invert*/ - (binaryfunc)0, /*nb_lshift*/ - (binaryfunc)0, /*nb_rshift*/ - (binaryfunc)0, /*nb_and*/ - (binaryfunc)0, /*nb_xor*/ - (binaryfunc)0, /*nb_or*/ - #if defined(NPY_PY3K) - #else - 0, /*nb_coerce*/ - #endif - (unaryfunc)0, /*nb_int*/ - #if defined(NPY_PY3K) - (unaryfunc)0, /*nb_reserved*/ - #else - (unaryfunc)0, /*nb_long*/ - #endif - (unaryfunc)0, /*nb_float*/ - #if defined(NPY_PY3K) - #else - (unaryfunc)0, /*nb_oct*/ - (unaryfunc)0, /*nb_hex*/ - #endif - 0, /*inplace_add*/ - 0, /*inplace_subtract*/ - 0, /*inplace_multiply*/ - #if defined(NPY_PY3K) - #else - 0, /*inplace_divide*/ - #endif - 0, /*inplace_remainder*/ - 0, /*inplace_power*/ - 0, /*inplace_lshift*/ - 0, /*inplace_rshift*/ - 0, /*inplace_and*/ - 0, /*inplace_xor*/ - 0, /*inplace_or*/ - (binaryfunc)0, /*nb_floor_divide*/ - (binaryfunc)0, /*nb_true_divide*/ - 0, /*nb_inplace_floor_divide*/ - 0, /*nb_inplace_true_divide*/ - (unaryfunc)NULL, /*nb_index*/ - }; - - - -PyBuffer (provider) -------------------- - -PyBuffer usage is widely spread in multiarray: - -1) The void scalar makes use of buffers -2) Multiarray has methods for creating buffers etc. explicitly -3) Arrays can be created from buffers etc. -4) The .data attribute of an array is a buffer - -Py3 introduces the PEP 3118 buffer protocol as the *only* protocol, -so we must implement it. - -The exporter parts of the PEP 3118 buffer protocol are currently -implemented in ``buffer.c`` for arrays, and in ``scalartypes.c.src`` -for generic array scalars. The generic array scalar exporter, however, -doesn't currently produce format strings, which needs to be fixed. - -Also some code also stops working when ``bf_releasebuffer`` is -defined. Most importantly, ``PyArg_ParseTuple("s#", ...)`` refuses to -return a buffer if ``bf_releasebuffer`` is present. For this reason, -the buffer interface for arrays is implemented currently *without* -defining ``bf_releasebuffer`` at all. This forces us to go through -some additional work. - -There are a couple of places that need further attention: - -- VOID_getitem - - In some cases, this returns a buffer object on Python 2. On Python 3, - there is no stand-alone buffer object, so we return a byte array instead. - -The Py2/Py3 compatible PyBufferMethods definition looks like:: - - NPY_NO_EXPORT PyBufferProcs array_as_buffer = { - #if !defined(NPY_PY3K) - #if PY_VERSION_HEX >= 0x02050000 - (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (segcountproc)array_getsegcount, /*bf_getsegcount*/ - (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ - #else - (getreadbufferproc)array_getreadbuf, /*bf_getreadbuffer*/ - (getwritebufferproc)array_getwritebuf, /*bf_getwritebuffer*/ - (getsegcountproc)array_getsegcount, /*bf_getsegcount*/ - (getcharbufferproc)array_getcharbuf, /*bf_getcharbuffer*/ - #endif - #endif - #if PY_VERSION_HEX >= 0x02060000 - (getbufferproc)array_getbuffer, /*bf_getbuffer*/ - (releasebufferproc)array_releasebuffer, /*bf_releasebuffer*/ - #endif - }; - -.. todo:: - - Produce PEP 3118 format strings for array scalar objects. - -.. todo:: - - There's stuff to clean up in numarray/_capi.c - - -PyBuffer (consumer) -------------------- - -There are two places in which we may want to be able to consume buffer -objects and cast them to ndarrays: - -1) `multiarray.frombuffer`, ie., ``PyArray_FromAny`` - - The frombuffer returns only arrays of a fixed dtype. It does not - make sense to support PEP 3118 at this location, since not much - would be gained from that -- the backward compatibility functions - using the old array interface still work. - - So no changes needed here. - -2) `multiarray.array`, ie., ``PyArray_FromAny`` - - In general, we would like to handle :pep:`3118` buffers in the same way - as ``__array_interface__`` objects. Hence, we want to be able to cast - them to arrays already in ``PyArray_FromAny``. - - Hence, ``PyArray_FromAny`` needs additions. - -There are a few caveats in allowing :pep:`3118` buffers in -``PyArray_FromAny``: - -a) `bytes` (and `str` on Py2) objects offer a buffer interface that - specifies them as 1-D array of bytes. - - Previously ``PyArray_FromAny`` has cast these to 'S#' dtypes. We - don't want to change this, since will cause problems in many places. - - We do, however, want to allow other objects that provide 1-D byte arrays - to be cast to 1-D ndarrays and not 'S#' arrays -- for instance, 'S#' - arrays tend to strip trailing NUL characters. - -So what is done in ``PyArray_FromAny`` currently is that: - -- Presence of :pep:`3118` buffer interface is checked before checking - for array interface. If it is present *and* the object is not - `bytes` object, then it is used for creating a view on the buffer. - -- We also check in ``discover_depth`` and ``_array_find_type`` for the - 3118 buffers, so that:: - - array([some_3118_object]) - - will treat the object similarly as it would handle an `ndarray`. - - However, again, bytes (and unicode) have priority and will not be - handled as buffer objects. - -This amounts to possible semantic changes: - -- ``array(buffer)`` will no longer create an object array - ``array([buffer], dtype='O')``, but will instead expand to a view - on the buffer. - -.. todo:: - - Take a second look at places that used PyBuffer_FromMemory and - PyBuffer_FromReadWriteMemory -- what can be done with these? - -.. todo:: - - There's some buffer code in numarray/_capi.c that needs to be addressed. - - -PyBuffer (object) ------------------ - -Since there is a native buffer object in Py3, the `memoryview`, the -`newbuffer` and `getbuffer` functions are removed from `multiarray` in -Py3: their functionality is taken over by the new `memoryview` object. - - -PyString --------- - -There is no PyString in Py3, everything is either Bytes or Unicode. -Unicode is also preferred in many places, e.g., in __dict__. - -There are two issues related to the str/bytes change: - -1) Return values etc. should prefer unicode -2) The 'S' dtype - -This entry discusses return values etc. only, the 'S' dtype is a -separate topic. - -All uses of PyString in NumPy should be changed to one of - -- PyBytes: one-byte character strings in Py2 and Py3 -- PyUString (defined in npy_3kconfig.h): PyString in Py2, PyUnicode in Py3 -- PyUnicode: UCS in Py2 and Py3 - -In many cases the conversion only entails replacing PyString with -PyUString. - -PyString is currently defined to PyBytes in npy_3kcompat.h, for making -things to build. This definition will be removed when Py3 support is -finished. - -Where ``*_AsStringAndSize`` is used, more care needs to be taken, as -encoding Unicode to Bytes may needed. If this cannot be avoided, the -encoding should be ASCII, unless there is a very strong reason to do -otherwise. Especially, I don't believe we should silently fall back to -UTF-8 -- raising an exception may be a better choice. - -Exceptions should use PyUnicode_AsUnicodeEscape -- this should result -to an ASCII-clean string that is appropriate for the exception -message. - -Some specific decisions that have been made so far: - -* descriptor.c: dtype field names are UString - - At some places in NumPy code, there are some guards for Unicode field - names. However, the dtype constructor accepts only strings as field names, - so we should assume field names are *always* UString. - -* descriptor.c: field titles can be arbitrary objects. - If they are UString (or, on Py2, Bytes or Unicode), insert to fields dict. - -* descriptor.c: dtype strings are Unicode. - -* descriptor.c: datetime tuple contains Bytes only. - -* repr() and str() should return UString - -* comparison between Unicode and Bytes is not defined in Py3 - -* Type codes in numerictypes.typeInfo dict are Unicode - -* Func name in errobj is Bytes (should be forced to ASCII) - -.. todo:: - - tp_doc -- it's a char* pointer, but what is the encoding? - Check esp. lib/src/_compiled_base - - Currently, UTF-8 is assumed. - -.. todo:: - - ufunc names -- again, what's the encoding? - -.. todo:: - - Cleanup to do later on: Replace all occurrences of PyString by - PyBytes, PyUnicode, or PyUString. - -.. todo:: - - Revise errobj decision? - -.. todo:: - - Check that non-UString field names are not accepted anywhere. - - -PyUnicode ---------- - -PyUnicode in Py3 is pretty much as it was in Py2, except that it is -now the only "real" string type. - -In Py3, Unicode and Bytes are not comparable, ie., 'a' != b'a'. NumPy -comparison routines were handled to act in the same way, leaving -comparison between Unicode and Bytes undefined. - -.. todo:: - - Check that indeed all comparison routines were changed. - - -Fate of the 'S' dtype ---------------------- - -On Python 3, the 'S' dtype will still be Bytes. - -However,:: - - str, str_ == unicode_ - - -PyInt ------ - -There is no limited-range integer type any more in Py3. It makes no -sense to inherit NumPy ints from Py3 ints. - -Currently, the following is done: - -1) NumPy's integer types no longer inherit from Python integer. -2) int is taken dtype-equivalent to NPY_LONG -3) ints are converted to NPY_LONG - -PyInt methods are currently replaced by PyLong, via macros in npy_3kcompat.h. - -Dtype decision rules were changed accordingly, so that NumPy understands -Py3 int translate to NPY_LONG as far as dtypes are concerned. - -array([1]).dtype will be the default NPY_LONG integer. - -.. todo:: - - Not inheriting from `int` on Python 3 makes the following not work: - ``np.intp("0xff", 16)`` -- because the NumPy type does not take - the second argument. This could perhaps be fixed... - - -Divide ------- - -The Divide operation is no more. - -Calls to PyNumber_Divide were replaced by FloorDivide or TrueDivide, -as appropriate. - -The PyNumberMethods entry is #ifdef'd out on Py3, see above. - - -tp_compare, PyObject_Compare ----------------------------- - -The compare method has vanished, and is replaced with richcompare. -We just #ifdef the compare methods out on Py3. - -New richcompare methods were implemented for: - -* flagsobject.c - -On the consumer side, we have a convenience wrapper in npy_3kcompat.h -providing PyObject_Cmp also on Py3. - - -Pickling --------- - -The ndarray and dtype __setstate__ were modified to be -backward-compatible with Py3: they need to accept a Unicode endian -character, and Unicode data since that's what Py2 str is unpickled to -in Py3. - -An encoding assumption is required for backward compatibility: the user -must do - - loads(f, encoding='latin1') - -to successfully read pickles created by Py2. - -.. todo:: - - Forward compatibility? Is it even possible? - For sure, we are not knowingly going to store data in PyUnicode, - so probably the only way for forward compatibility is to implement - a custom Unpickler for Py2? - -.. todo:: - - If forward compatibility is not possible, aim to store also the endian - character as Bytes... - - -Module initialization ---------------------- - -The module initialization API changed in Python 3.1. - -Most NumPy modules are now converted. - - -PyTypeObject ------------- - -The PyTypeObject of py3k is binary compatible with the py2k version and the -old initializers should work. However, there are several considerations to -keep in mind. - -1) Because the first three slots are now part of a struct some compilers issue - warnings if they are initialized in the old way. - -2) The compare slot has been made reserved in order to preserve binary - compatibility while the tp_compare function went away. The tp_richcompare - function has replaced it and we need to use that slot instead. This will - likely require modifications in the searchsorted functions and generic sorts - that currently use the compare function. - -3) The previous numpy practice of initializing the COUNT_ALLOCS slots was - bogus. They are not supposed to be explicitly initialized and were out of - place in any case because an extra base slot was added in python 2.6. - -Because of these facts it is better to use #ifdefs to bring the old -initializers up to py3k snuff rather than just fill the tp_richcompare -slot. They also serve to mark the places where changes have been -made. Note that explicit initialization can stop once none of the -remaining entries are non-zero, because zero is the default value that -variables with non-local linkage receive. - -The Py2/Py3 compatible TypeObject definition looks like:: - - NPY_NO_EXPORT PyTypeObject Foo_Type = { - #if defined(NPY_PY3K) - PyVarObject_HEAD_INIT(0,0) - #else - PyObject_HEAD_INIT(0) - 0, /* ob_size */ - #endif - "numpy.foo" /* tp_name */ - 0, /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - 0, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - #if defined(NPY_PY3K) - (void *)0, /* tp_reserved */ - #else - 0, /* tp_compare */ - #endif - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - 0, /* tp_flags */ - 0, /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ - 0, /* tp_free */ - 0, /* tp_is_gc */ - 0, /* tp_bases */ - 0, /* tp_mro */ - 0, /* tp_cache */ - 0, /* tp_subclasses */ - 0, /* tp_weaklist */ - 0, /* tp_del */ - 0 /* tp_version_tag (2.6) */ - }; - - - -PySequenceMethods ------------------ - -Types with tp_as_sequence defined - -* multiarray/descriptor.c -* multiarray/scalartypes.c.src -* multiarray/arrayobject.c - -PySequenceMethods in py3k are binary compatible with py2k, but some of the -slots have gone away. I suspect this means some functions need redefining so -the semantics of the slots needs to be checked:: - - PySequenceMethods foo_sequence_methods = { - (lenfunc)0, /* sq_length */ - (binaryfunc)0, /* sq_concat */ - (ssizeargfunc)0, /* sq_repeat */ - (ssizeargfunc)0, /* sq_item */ - (void *)0, /* nee sq_slice */ - (ssizeobjargproc)0, /* sq_ass_item */ - (void *)0, /* nee sq_ass_slice */ - (objobjproc)0, /* sq_contains */ - (binaryfunc)0, /* sq_inplace_concat */ - (ssizeargfunc)0 /* sq_inplace_repeat */ - }; - - -PyMappingMethods ----------------- - -Types with tp_as_mapping defined - -* multiarray/descriptor.c -* multiarray/iterators.c -* multiarray/scalartypes.c.src -* multiarray/flagsobject.c -* multiarray/arrayobject.c - -PyMappingMethods in py3k look to be the same as in py2k. The semantics -of the slots needs to be checked:: - - PyMappingMethods foo_mapping_methods = { - (lenfunc)0, /* mp_length */ - (binaryfunc)0, /* mp_subscript */ - (objobjargproc)0 /* mp_ass_subscript */ - }; - - -PyFile ------- - -Many of the PyFile items have disappeared: - -1) PyFile_Type -2) PyFile_AsFile -3) PyFile_FromString - -Most importantly, in Py3 there is no way to extract a FILE* pointer -from the Python file object. There are, however, new PyFile_* functions -for writing and reading data from the file. - -Compatibility wrappers that return a dup-ed `fdopen` file pointer are -in private/npy_3kcompat.h. This causes more flushing to be necessary, -but it appears there is no alternative solution. The FILE pointer so -obtained must be closed with fclose after use. - -.. todo:: - - Should probably be done much later on... - - Adapt all NumPy I/O to use the PyFile_* methods or the low-level - IO routines. In any case, it's unlikely that C stdio can be used any more. - - Perhaps using PyFile_* makes numpy.tofile e.g. to a gzip to work? - - -READONLY --------- - -The RO alias for READONLY is no more. - -These were replaced, as READONLY is present also on Py2. - - -PyOS ----- - -Deprecations: - -1) PyOS_ascii_strtod -> PyOS_double_from_string; - curiously enough, PyOS_ascii_strtod is not only deprecated but also - causes segfaults - - -PyInstance ----------- - -There are some checks for PyInstance in ``common.c`` and ``ctors.c``. - -Currently, ``PyInstance_Check`` is just #ifdef'd out for Py3. This is, -possibly, not the correct thing to do. - -.. todo:: - - Do the right thing for PyInstance checks. - - -PyCObject / PyCapsule ---------------------- - -The PyCObject API is removed in Python 3.2, so we need to rewrite it -using PyCapsule. - -NumPy was changed to use the Capsule API, using NpyCapsule* wrappers. diff --git a/doc/TESTS.rst b/doc/TESTS.rst index 195935ccf380..ee8a8b4b07e1 100644 --- a/doc/TESTS.rst +++ b/doc/TESTS.rst @@ -74,6 +74,24 @@ Testing a subset of NumPy:: For detailed info on testing, see :ref:`testing-builds` + +Running doctests +---------------- + +NumPy documentation contains code examples, "doctests". To check that the examples +are correct, install the ``scipy-doctest`` package:: + + $ pip install scipy-doctest + +and run one of:: + + $ spin check-docs -v + $ spin check-docs numpy/linalg + $ spin check-docs -- -k 'det and not slogdet' + +Note that the doctests are not run when you use ``spin test``. + + Other methods of running tests ------------------------------ diff --git a/doc/changelog/2.0.0-changelog.rst b/doc/changelog/2.0.0-changelog.rst new file mode 100644 index 000000000000..78e250f508d9 --- /dev/null +++ b/doc/changelog/2.0.0-changelog.rst @@ -0,0 +1,1304 @@ + +Contributors +============ + +A total of 212 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @Algorithmist-Girl + +* @DWesl +* @Illviljan +* @Searchingdays +* @ellaella12 + +* @liang3zy22 + +* @matoro + +* @mcp292 + +* @mgunyho + +* @msavinash + +* @mykykh + +* @pojaghi + +* @pratiklp00 + +* @stefan6419846 + +* @undermyumbrella1 + +* Aaron Meurer +* Aditi Saluja + +* Adrin Jalali + +* Agriya Khetarpal + +* Albert Steppi + +* Alex Cabrera + +* Alexander Grund +* Andrea Bianchi + +* Andreas Florath + +* Andrew Ardill + +* Andrew Ho + +* Andrew Nelson +* Andrey Rybakov + +* Ankur Singh + +* Anton Prosekin + +* Antony Lee +* Arun Kannawadi + +* Bas van Beek +* Ben Woodruff + +* Bharat Raghunathan +* Bhavya Alekhya + +* Brandon Smith + +* Brian Walshe + +* Brigitta Sipőcz +* Brock Mendel +* Carl Meyer + +* Charles Bousseau + +* Charles Harris +* Chris Sidebottom +* Christian Lorentzen +* Christian Veenhuis +* Christoph Reiter +* Christopher Sidebottom +* Clément Robert +* Cédric Hannotier +* Cobalt Yang + +* Gonçalo Bárias + +* D.J. Ramones + +* DanShatford + +* Daniel Li + +* Daniel Vanzo +* Daval Parmar +* Developer-Ecosystem-Engineering +* Dhruv Rawat + +* Dimitri Papadopoulos Orfanos +* Edward E +* Edward Yang + +* Eisuke Kawashima + +* Eliah Kagan + +* Élie Goudout + +* Elliott Sales de Andrade +* Emil Olszewski + +* Emily Hunt + +* Éric Piel + +* Eric Wieser +* Eric Xie + +* Even Rouault + +* Evgeni Burovski +* Filipe Laíns + +* Francisco Sousa + +* Ganesh Kathiresan +* Gonçalo Bárias + +* Gonzalo Tornaría + +* Hans Meine +* Heberto Mayorquin + +* Heinz-Alexander Fuetterer + +* Hood Chatham +* Hugo van Kemenade +* Ivan A. Melnikov + +* Jacob M. Casey + +* Jake Lishman + +* Jake VanderPlas +* James Oliver + +* Jan Wassenberg + +* Janukan Sivajeyan + +* Johann Rohwer + +* Johannes Kaisinger + +* John Muradeli + +* Joris Van den Bossche +* Justus Magin +* Jyn Spring 琴春 +* Kai Striega +* Kevin Sheppard +* Kevin Wu + +* Khawaja Junaid + +* Kit Lee + +* Kristian Minchev + +* Kristoffer Pedersen + +* Kuan-Wei Chiu + +* Lane Votapka + +* Larry Bradley +* Leo Singer +* Liang Yan + +* Linus Sommer + +* Logan Thomas +* Lucas Colley + +* Luiz Eduardo Amaral + +* Lukas Geiger +* Lysandros Nikolaou + +* Maanas Arora + +* Maharshi Basu + +* Mahder Gebremedhin + +* Marcel Bargull + +* Marcel Loose + +* Mark Mentovai + +* Mark Ryan + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Barber +* Matthew Thompson + +* Matthias Bussonnier +* Matthias Koeppe +* Matthias Schaufelberger + +* Matti Picus +* Maxwell Aladago +* Maya Anderson + +* Melissa Weber Mendonça +* Meng Xiangzhuo + +* Michael Kiffer +* Miki Watanabe (渡邉 美希) +* Milan Curcic + +* Miles Cranmer +* Miro Hrončok + +* Mohamed E. BRIKI + +* Mohaned Qunaibit + +* Mohit Kumar + +* Muhammed Muhsin + +* Mukulika Pahari +* Munira Alduraibi + +* Namami Shanker +* Nathan Goldbaum +* Nyakku Shigure + +* Ola x Nilsson + +* Olivier Mattelaer + +* Olivier Grisel +* Omid Rajaei +* Pablo Losada + +* Pamphile Roy +* Paul Reece + +* Pedro Kaj Kjellerup Nacht + +* Peiyuan Liu + +* Peter Hawkins +* Pierre +* Pieter Eendebak +* Quentin Barthélemy + +* Raghuveer Devulapalli +* Ralf Gommers +* Randy Eckenrode + +* Raquel Braunschweig + +* Richard Howe + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ronald van Elburg + +* Ross Barnowski +* Sam James + +* Sam Van Kooten + +* Samuel Albanie + +* Sarah Wang + +* Sarah Zwiep + +* Sarah-Yifei-Wang + +* Sarthak Dawar + +* Sayantika Banik +* Sayed Adel +* Sean Cheah + +* Sebastian Berg +* Serge Guelton +* Shalini Roy + +* Shen Zhou +* Shubhal Gupta + +* Stefan van der Walt +* Stefano Rivera + +* Takumasa N. + +* Taras Tsugrii +* Thomas A Caswell +* Thomas Grainger + +* Thomas Li +* Tim Hoffmann +* Tim Paine + +* Timo Röhling + +* Trey Woodlief + +* Tyler Reddy +* Victor Tang + +* Vladimir Fokow + +* Warren Weckesser +* Warrick Ball + +* Will Ayd +* William Andrea + +* William Ayd + +* Xiangyi Wang + +* Yash Pethe + +* Yuki K +* Zach Brugh + +* Zach Rottman + +* Zolisa Bleki + +Pull requests merged +==================== + +A total of 1078 pull requests were merged for this release. + +* `#15457 `__: BUG: Adds support for array parameter declaration in fortran... +* `#21199 `__: ENH: expose datetime.c functions to cython +* `#21429 `__: ENH: Added ``bitwise_count`` UFuncs +* `#21760 `__: MAINT: Make output of Polynomial representations consistent +* `#21975 `__: ENH: Add binding for random pyx files +* `#22449 `__: ENH: Update scalar representations as per NEP 51 +* `#22657 `__: BUG: Fix common block handling in f2py +* `#23096 `__: BLD, SIMD: The meson CPU dispatcher implementation +* `#23282 `__: BUG: Fix data stmt handling for complex values in f2py +* `#23347 `__: DOC: changed formula in random.Generator.pareto doc #22701 +* `#23351 `__: ENH: Use AVX512-FP16 SVML content for float16 umath functions +* `#23508 `__: DOC: Update scalar types in ``Py{TYPE}ArrType_Type`` +* `#23537 `__: NEP: add NEP on a Python API cleanup for NumPy 2.0 +* `#23611 `__: DOC: Make input/output type consistent and add more examples... +* `#23729 `__: ENH: allow int sequences as shape arguments in numpy.memmap +* `#23762 `__: API: Add .mT attribute for arrays +* `#23764 `__: CI,TYP: Bump mypy to 1.4.1 +* `#23780 `__: BUG: Create complex scalars from real and imaginary parts +* `#23785 `__: DOC: tweak NEP 50 examples +* `#23787 `__: DOC: Add brief note about custom converters to genfromtext. +* `#23789 `__: ENH: add copy parameter for api.reshape function +* `#23795 `__: Use tuple instead of string for (LOWER|UPPER)_TABLEs. +* `#23804 `__: REL: Prepare main for NumPy 2.0.0 development +* `#23809 `__: MAINT: removing the deprecated submodule +* `#23810 `__: MAINT: Bump github/codeql-action from 2.3.3 to 2.3.4 +* `#23813 `__: DOC: Clean up errstate handling in our tests +* `#23814 `__: DOC: switching to use the plot directive +* `#23817 `__: MAINT: Bump github/codeql-action from 2.3.4 to 2.3.5 +* `#23819 `__: BUG: Doctest doesn't have a SHOW_WARNINGS directive. +* `#23822 `__: DOC: Added ``pathlib.Path`` where applicable +* `#23825 `__: BLD: use cython3 for one CI run +* `#23826 `__: MAINT: io.open → open +* `#23828 `__: MAINT: fix typos found by codespell +* `#23830 `__: API: deprecate compat and selected lib utils +* `#23831 `__: DOC: use float64 instead of float128 in docstring +* `#23832 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23834 `__: MAINT: IOError → OSError +* `#23835 `__: MAINT: Update versioneer: 0.26 → 0.28 +* `#23836 `__: DOC: update distutils migration guide +* `#23838 `__: BLD: switch to meson-python as the default build backend +* `#23840 `__: REL: Prepare for the NumPy 1.25.0rc1 release +* `#23841 `__: MAINT: Bump pypa/cibuildwheel from 2.12.3 to 2.13.0 +* `#23843 `__: MAINT: Update download-wheels +* `#23845 `__: MAINT: Do not call PyArray_Item_XDECREF in PyArray_Pack +* `#23846 `__: TST: Add tests for np.argsort +* `#23847 `__: MAINT: const correctness for the generalized ufunc C API +* `#23850 `__: MAINT: Bump actions/dependency-review-action from 3.0.4 to 3.0.6 +* `#23851 `__: CI: Update cirrus nightly wheel upload token +* `#23852 `__: CI: Change "weekly" to "nightly" in cirrus +* `#23854 `__: DOC:removed examples which refers to a non existent function +* `#23855 `__: BUG: make use of locals() in a comprehension fully compatible... +* `#23856 `__: CI: bump nightly upload frequency to twice a week +* `#23857 `__: BUG: fix cron syntax +* `#23859 `__: DOC: Note that f2py isn't consiered safe +* `#23861 `__: MAINT: Remove all "NumPy 2" as that should be main now +* `#23865 `__: MAINT: Bump github/codeql-action from 2.3.5 to 2.3.6 +* `#23868 `__: DOC: Fix ``NPY_OUT_ARRAY`` to ``NPY_ARRAY_OUT_ARRAY`` in how-to-extend... +* `#23871 `__: NEP: Fix NEP 53 file format and minor formatting issue +* `#23878 `__: TST: Add tests for np.argsort +* `#23881 `__: ENH: Add array API standard v2022.12 support to numpy.array_api +* `#23887 `__: TYP,DOC: Annotate and document the ``metadata`` parameter of... +* `#23897 `__: DOC: Fix transpose() description with a correct reference to... +* `#23898 `__: API: Change string to bool conversions to be consistent with... +* `#23902 `__: MAINT: Use ``--allow-downgrade`` option for rtools. +* `#23906 `__: MAINT: Use vectorcall for call forwarding in methods +* `#23907 `__: MAINT: Bump github/codeql-action from 2.3.6 to 2.13.4 +* `#23908 `__: MAINT: Bump actions/checkout from 3.5.2 to 3.5.3 +* `#23911 `__: BUG: Allow np.info on non-hashable objects with a dtype +* `#23912 `__: API: Switch to NEP 50 behavior by default +* `#23913 `__: ENH: let zeros, empty, and empty_like accept dtype classes +* `#23914 `__: DOC: Fix reference ``ComplexWarning`` in release note +* `#23915 `__: DOC: Update development_environment doc. +* `#23916 `__: ABI: Bump C-ABI to 2 but accept older NumPy if compiled against... +* `#23917 `__: ENH: Speed up boolean indexing of flatiters +* `#23918 `__: DOC: Fix references to ``AxisError`` in docstrings +* `#23919 `__: API: Remove interrupt handling and ``noprefix.h`` +* `#23920 `__: DOC: fix DOI on badge +* `#23921 `__: DEP: Expire the PyDataMem_SetEventHook deprecation and remove... +* `#23922 `__: API: Remove ``seterrobj``/``geterrobj``/``extobj=`` and related C-API... +* `#23923 `__: BUG:Fix for call to 'vec_st' is ambiguous +* `#23924 `__: MAINT: Bump pypa/cibuildwheel from 2.13.0 to 2.13.1 +* `#23925 `__: MAINT: Disable SIMD version of float64 sin and cos +* `#23927 `__: DOC: Fix references to ``r_`` in ``mr_class`` docstring +* `#23935 `__: MAINT: Update to latest x86-simd-sort +* `#23936 `__: ENH,API: Make the errstate/extobj a contextvar +* `#23941 `__: BUG: Fix NpyIter cleanup in einsum error path +* `#23942 `__: BUG: Fixup for win64 fwrite issue +* `#23943 `__: DOC: Update required C++ version in building.rst (and copy-edit). +* `#23944 `__: DOC: const correctness in PyUFunc_FromFuncAndData... functions +* `#23950 `__: MAINT: Upgrade install-rtools version +* `#23952 `__: Replace a divider with a colon for _monotonicity +* `#23953 `__: BUG: Fix AVX2 intrinsic npyv_store2_till_s64 on MSVC > 19.29 +* `#23960 `__: DOC: adding release note for 23809 +* `#23961 `__: BLD: update pypy in CI to latest version +* `#23962 `__: TEST: change subprocess call to capture stderr too +* `#23964 `__: MAINT: Remove references to removed functions +* `#23965 `__: MAINT: Simplify codespaces conda environment activation +* `#23967 `__: DOC: Fix references to ``trimseq`` in docstrings +* `#23969 `__: MAINT: Update main after 1.25.0 release. +* `#23971 `__: BUG: Fix private procedures in ``f2py`` modules +* `#23977 `__: MAINT: pipes.quote → shlex.quote +* `#23979 `__: MAINT: Fix typos found by codespell +* `#23980 `__: MAINT: use ``yield from`` where applicable +* `#23982 `__: BLD: Port long double identification to C for meson +* `#23983 `__: BLD: change file extension for installed static libraries back... +* `#23984 `__: BLD: improve handling of CBLAS, add ``-Duse-ilp64`` build option +* `#23985 `__: Revert "TST: disable longdouble string/print tests on Linux aarch64" +* `#23990 `__: DOC: Fix np.vectorize Doc +* `#23991 `__: CI: BLD: build wheels and fix test suite for Python 3.12 +* `#23995 `__: MAINT: Do not use ``--side-by-side`` choco option +* `#23997 `__: MAINT: make naming of C aliases for dtype classes consistent +* `#23998 `__: DEP: Expire ``set_numeric_ops`` and the corresponding C functions... +* `#24004 `__: BUG: Fix reduction ``return NULL`` to be ``goto fail`` +* `#24006 `__: ENH: Use high accuracy SVML for double precision umath functions +* `#24009 `__: DOC: Update __array__ description +* `#24011 `__: API: Remove ``old_defines.h`` (part of NumPy 1.7 deprecated C-API) +* `#24012 `__: MAINT: Remove hardcoded f2py numeric/numarray compatibility switch +* `#24014 `__: BUG: Make errstate decorator compatible with threading +* `#24017 `__: MAINT: Further cleanups for errstate +* `#24018 `__: ENH: Use Highway's VQSort on AArch64 +* `#24020 `__: Fix typo in random sampling documentation +* `#24021 `__: BUG: Fix error message for nanargmin/max of empty sequence +* `#24025 `__: TST: improve test for Cholesky decomposition +* `#24026 `__: DOC: Add note for installing ``asv`` library to run benchmark tests +* `#24027 `__: DOC: Fix reference to ``__array_struct__`` in ``arrays.interface.rst`` +* `#24029 `__: DOC: Add link to NEPs in top navbar +* `#24030 `__: BUG: Avoid undefined behavior in array.astype() +* `#24031 `__: BUG: Ensure ``__array_ufunc__`` works without any kwargs passed +* `#24046 `__: DOC: Fix reference to python module ``string`` in ``routines.char.rst`` +* `#24047 `__: DOC: Fix reference to ``array()`` in release note +* `#24049 `__: MAINT: Update main after 1.24.4 release. +* `#24051 `__: MAINT: Pin urllib3 to avoid anaconda-client bug. +* `#24052 `__: MAINT: Bump ossf/scorecard-action from 2.1.3 to 2.2.0 +* `#24053 `__: ENH: Adopt new macOS Accelerate BLAS/LAPACK Interfaces, including... +* `#24054 `__: BUG: Multiply or divides using SIMD without a full vector can... +* `#24058 `__: DOC: Remove references to ``PyArray_SetNumericOps`` and ``PyArray_GetNumericOps`` in release note +* `#24059 `__: MAINT: Remove ability to enter errstate twice (sequentially) +* `#24060 `__: BLD: use ``-ftrapping-math`` with Clang on macOS in Meson build +* `#24061 `__: DOC: PR adds casting option's description to Glossary and ``numpy.concatenate``. +* `#24068 `__: DOC: Add NpzFile class documentation. +* `#24071 `__: MAINT: Overwrite previous wheels when uploading to anaconda. +* `#24073 `__: API: expose PyUFunc_GiveFloatingpointErrors in the dtype API +* `#24075 `__: DOC: Add missing indentation in ``ma.mT`` docstring +* `#24076 `__: DOC: Fix incorrect reST markups in ``numpy.void`` docstring +* `#24077 `__: DOC: Fix documentation for ``ndarray.mT`` +* `#24082 `__: MAINT: testing for IS_MUSL closes #24074 +* `#24083 `__: ENH: Add ``spin`` command ``gdb``; customize ``docs`` and ``test`` +* `#24085 `__: ENH: Replace npy complex structs with native complex types +* `#24087 `__: NEP: Mark NEP 51 as accepted +* `#24090 `__: MAINT: print error from verify_c_api_version.py failing +* `#24092 `__: TST: Pin pydantic<2 in Pyodide workflow +* `#24094 `__: ENH: Added compiler ``args`` and ``link_args`` +* `#24097 `__: DOC: Add reference to dtype parameter in NDArray +* `#24098 `__: ENH: raise early exception if 0d array is used in np.cross +* `#24100 `__: DOC: Clarify correlate function definition +* `#24101 `__: BUG: Fix empty structured array dtype alignment +* `#24102 `__: DOC: fix rst formatting in datetime C API docs +* `#24103 `__: BUG: Only replace dtype temporarily if dimensions changed +* `#24105 `__: DOC: Correctly use savez_compressed in examples for that function. +* `#24107 `__: ENH: Add ``spin benchmark`` command +* `#24112 `__: DOC: Fix warnings and errors caused by reference/c-api/datetimes +* `#24113 `__: DOC: Fix the reference in the docstring of numpy.meshgrid +* `#24123 `__: BUG: ``spin gdb``: launch Python directly so that breakpoint... +* `#24124 `__: MAINT: Bump actions/setup-node from 3.6.0 to 3.7.0 +* `#24125 `__: MAINT: import numpy as ``np`` in ``spin ipython`` +* `#24126 `__: ENH: add mean keyword to std and var +* `#24130 `__: DOC: Fix warning for PyArray_MapIterNew. +* `#24133 `__: DOC: Update python as glue doc. +* `#24135 `__: DOC: Fix string types in ``arrays.dtypes.rst`` +* `#24138 `__: DOC: add NEP 54 on SIMD - moving to C++ and adopting Highway... +* `#24142 `__: ENH: Allow NEP 42 dtypes to use np.save and np.load +* `#24143 `__: Corrected a grammatical error in doc/source/user/absolute_beginners.rst +* `#24144 `__: API: Remove several niche objects for numpy 2.0 python API cleanup +* `#24149 `__: MAINT: Update main after 1.25.1 release. +* `#24150 `__: BUG: properly handle negative indexes in ufunc_at fast path +* `#24152 `__: DOC: Fix reference warning for recarray. +* `#24153 `__: BLD, TST: refactor test to use meson not setup.py, improve spin... +* `#24154 `__: API: deprecate undocumented functions +* `#24158 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24159 `__: MAINT: Bump pypa/cibuildwheel from 2.13.1 to 2.14.0 +* `#24160 `__: MAINT: Update cibuildwheel to 2.14.0 +* `#24161 `__: BUG: histogram small range robust +* `#24162 `__: ENH: Improve clang-cl compliance +* `#24163 `__: MAINT: update pytest, hypothesis, pytest-cov, and pytz in test_requirements.txt +* `#24172 `__: DOC: Add note that NEP 29 is superseded by SPEC 0 +* `#24173 `__: MAINT: Bump actions/setup-python from 4.6.1 to 4.7.0 +* `#24176 `__: MAINT: do not use copyswap in flatiter internals +* `#24178 `__: BUG: PyObject_IsTrue and PyObject_Not error handling in setflags +* `#24187 `__: BUG: Fix the signature for np.array_api.take +* `#24188 `__: BUG: fix choose refcount leak +* `#24191 `__: BUG: array2string does not add signs for positive integers. Fixes... +* `#24193 `__: DEP: Remove datetime64 deprecation warning when constructing... +* `#24196 `__: MAINT: Remove versioneer +* `#24199 `__: BLD: update OpenBLAS to an intermediate commit +* `#24201 `__: ENH: Vectorize np.partition and np.argpartition using AVX-512 +* `#24202 `__: MAINT: Bump pypa/cibuildwheel from 2.14.0 to 2.14.1 +* `#24204 `__: BUG: random: Fix check for both uniform variates being 0 in random_beta() +* `#24205 `__: MAINT: Fix new or residual typos found by codespell +* `#24206 `__: TST: convert remaining setup.py tests to meson instead +* `#24208 `__: CI: Add a sanitizer CI job +* `#24211 `__: BUG: Fix reference count leak in str(scalar). +* `#24212 `__: BUG: fix invalid function pointer conversion error +* `#24214 `__: ENH: Create helper for conversion to arrays +* `#24219 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#24220 `__: BUG: random: Fix generation of nan by dirichlet. +* `#24222 `__: BUG: Fix cblas detection for the wheel builds +* `#24223 `__: BUG: Fix undefined behavior in complex pow(). +* `#24224 `__: API: Make 64bit default integer on 64bit windows +* `#24225 `__: DOC: Fix doc build warning for random. +* `#24227 `__: DOC: Update year in doc/source/conf.py to 2023 +* `#24228 `__: DOC: fix some double includes in f2py.getting-started.rst +* `#24231 `__: API: expose NPY_DTYPE macro in the dtype API +* `#24235 `__: BLD: only install the ``f2py`` command, not ``f2py3`` or ``f2py3.X`` +* `#24236 `__: BLD: update requirements to use cython>3.0 +* `#24237 `__: BUG: Added missing PyObject_IsTrue error check (return -1) #24177 +* `#24238 `__: BLD/CI: re-enable ILP64 usage and PyPy job in Azure +* `#24240 `__: BUG: Fix C types in scalartypes +* `#24248 `__: BUG: Factor out slow ``getenv`` call used for memory policy warning +* `#24249 `__: TST: enable test that checks for ``numpy.array_api`` entry point +* `#24250 `__: CI: Test NumPy against OpenBLAS weekly builds +* `#24254 `__: ENH: add weighted quantile for inverted_cdf +* `#24256 `__: DEV: Use ``exec_lines`` and not profile dir for ``spin ipython`` +* `#24257 `__: BUG: Add size check for threaded array assignment +* `#24258 `__: DEP: Remove PyArray complex macros and move PyArray_MIN/MAX +* `#24262 `__: DOC: Fix links to random.Generator methods in quickstart +* `#24263 `__: BUG: Fix use of renamed variable. +* `#24267 `__: BUG: random: Fix generation of nan by beta. +* `#24268 `__: CI: Enable running intel_spr_sde_test with Intel SDE +* `#24270 `__: BUG: Move legacy check for void printing +* `#24271 `__: API: Remove legacy-inner-loop-selector +* `#24272 `__: BUG: do not modify the input to ufunc_at +* `#24273 `__: TYP: Trim down the ``_NestedSequence.__getitem__`` signature +* `#24276 `__: DOC: Remove ``np.source`` and ``np.lookfor`` +* `#24277 `__: DOC: inconsistency between doc and code +* `#24278 `__: DOC: fix a couple typos and rst formatting errors in NEP 0053 +* `#24279 `__: CI/BLD: fail by default if no BLAS/LAPACK, add 32-bit Python... +* `#24281 `__: BUG: Further fixes to indexing loop and added tests +* `#24285 `__: CI: correct URL in cirrus.star +* `#24286 `__: CI: only build cirrus wheels when requested +* `#24287 `__: DOC: Fix some incorrectly formatted documents +* `#24289 `__: DOC: update code comment about ``NPY_USE_BLAS_ILP64`` environment... +* `#24291 `__: CI: improve test suite runtime via pytest parallelism and disabling... +* `#24298 `__: DOC: update stride reference doc. +* `#24299 `__: BUG: Fix assumed length f2py regression +* `#24303 `__: CI: apt update before apt install on cirrus +* `#24304 `__: MAINT: Update main after 1.25.2 release. +* `#24307 `__: CI: Cannot run ``intel_spr_sde_test`` on Intel SDE +* `#24311 `__: BLD: update openblas to newer version +* `#24312 `__: DEP: Finalize ``fastCopyAndTranpose`` and other old C-funcs/members... +* `#24315 `__: DOC: Fix some links in documents +* `#24316 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 1... +* `#24320 `__: DOC: Remove promoting twitter in heading +* `#24321 `__: DEP: Remove deprecated numpy.who +* `#24331 `__: DOC: Fix reference warning for buffer. +* `#24332 `__: DOC: Refactor description of ``PyArray_FromAny/PyArray_CheckFromAny`` +* `#24346 `__: DOC: use nightly dependencies [skip actions] [azp skip] [skip... +* `#24347 `__: DOC: Update to release upcoming change document +* `#24349 `__: BUG: polynomial: Handle non-array inputs in polynomial class... +* `#24354 `__: TST: fix distutils tests for deprecations in recent setuptools... +* `#24357 `__: API: Cleaning numpy/__init__.py and main namespace - Part 2 [NEP... +* `#24358 `__: BUG: flexible inheritance segfault +* `#24360 `__: BENCH: fix small array det benchmark +* `#24362 `__: DOC: Add release notes for complex types changes in 2.x +* `#24364 `__: BUG: Remove #undef complex from npy_common.h +* `#24369 `__: ENH: assert_array_less should report max violations instead of... +* `#24370 `__: BLD: Clean up build for complex +* `#24371 `__: MAINT: Fix codespaces setup.sh script +* `#24372 `__: MAINT: Bump pypa/cibuildwheel from 2.14.1 to 2.15.0 +* `#24373 `__: MAINT: Bump actions/dependency-review-action from 3.0.6 to 3.0.7 +* `#24374 `__: MAINT: Update cibuildwheel for cirrus builds +* `#24376 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 3... +* `#24379 `__: ENH: Vendor meson for multi-target build support +* `#24380 `__: DOC: Remove extra indents in documents +* `#24383 `__: DOC: Fix reference warning for ABCPolyBase. +* `#24393 `__: DOC: Add missing sphinx reference roles +* `#24396 `__: BLD: vendor meson-python to make the Windows builds with SIMD... +* `#24400 `__: TST: revert xfail in ``test_umath.py`` +* `#24402 `__: DOC: Fix reference warning for routines.polynomials.rst. +* `#24407 `__: DOC: add warning to ``allclose``, revise "Notes" in ``isclose`` +* `#24412 `__: [BUG] Return value of use_hugepage in hugepage_setup +* `#24413 `__: BUG: cleanup warnings [skip azp][skip circle][skip travis][skip... +* `#24414 `__: BLD: allow specifying the long double format to avoid the runtime... +* `#24415 `__: MAINT: Bump actions/setup-node from 3.7.0 to 3.8.0 +* `#24419 `__: CI/BUG: add Python 3.12 CI job and fix ``numpy.distutils`` AttributeError +* `#24420 `__: ENH: Introduce tracer for enabled CPU targets on each optimized... +* `#24421 `__: DOC: Remove mixed capitalization +* `#24422 `__: MAINT: Remove unused variable ``i`` +* `#24423 `__: MAINT: Bump actions/dependency-review-action from 3.0.7 to 3.0.8 +* `#24425 `__: CI: only run cirrus on commit to PR [skip actions] +* `#24427 `__: MAINT: revert adding ``distutils`` and ``array_api`` to ``np.__all__`` +* `#24434 `__: DOC: Fix reference warning for types-and-structures.rst. +* `#24435 `__: CI: cirrus run linux_aarch64 first +* `#24437 `__: MAINT: Bump actions/setup-node from 3.8.0 to 3.8.1 +* `#24439 `__: MAINT: Pin upper version of sphinx. +* `#24442 `__: DOC: Fix reference warning in Arrayterator and recfunctions. +* `#24445 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 4... +* `#24452 `__: ENH: Add prefix to _ALIGN Macro +* `#24457 `__: MAINT: Upgrade to spin 0.5 +* `#24461 `__: MAINT: Refactor partial load workaround for Clang +* `#24463 `__: MAINT: Fix broken link in runtests.py +* `#24468 `__: BUG: Fix meson build failure due to unchanged inplace auto-generated... +* `#24469 `__: DEP: Replace deprecation warning for non-integral arguments in... +* `#24471 `__: DOC: Fix some incorrect markups +* `#24473 `__: MAINT: Improve docstring and performance of trimseq +* `#24476 `__: MAINT: Move ``RankWarning`` to exceptions module +* `#24477 `__: MAINT: Remove deprecated functions [NEP 52] +* `#24479 `__: CI: Implements Cross-Compile Builds for armhf, ppc64le, and s390x +* `#24481 `__: DOC: Rm np.who from autosummary. +* `#24483 `__: NEP: add NEP 55 for a variable width string dtype +* `#24484 `__: BUG: fix NPY_cast_info error handling in choose +* `#24485 `__: DOC: Fix some broken links +* `#24486 `__: BUG: ``asv dev`` has been removed, use ``asv run`` instead. +* `#24487 `__: DOC: Fix reference warning in some rst and code files. +* `#24488 `__: MAINT: Stop testing on ppc64le. +* `#24493 `__: CI: GitHub Actions CI job restructuring +* `#24494 `__: API: Remove deprecated ``msort`` function +* `#24498 `__: MAINT: Re-write 16-bit qsort dispatch +* `#24504 `__: DOC: Remove extra indents in docstrings +* `#24505 `__: DOC: Fix mentions in ``isin`` docs +* `#24510 `__: DOC: Add missing changelogs for NEP 52 PRs +* `#24511 `__: BUG: Use a default assignment for git_hash [skip ci] +* `#24513 `__: API: Update ``lib.histograms`` namespace +* `#24515 `__: BUG: fix issue with git-version script, needs a shebang to run +* `#24516 `__: DOC: unpin sphinx +* `#24517 `__: MAINT: Harmonize fortranobject, drop C99 style for loop +* `#24518 `__: MAINT: Add expiration notes for NumPy 2.0 removals +* `#24519 `__: MAINT: remove ``setup.py`` and other files for distutils builds +* `#24520 `__: CI: remove obsolete jobs, and move macOS and conda Azure jobs... +* `#24523 `__: CI: switch the Cygwin job to Meson +* `#24527 `__: TYP: add kind argument to numpy.isin type specification +* `#24528 `__: MAINT: Bump actions/checkout from 3.5.3 to 3.6.0 +* `#24532 `__: ENH: ``meson`` backend for ``f2py`` +* `#24535 `__: CI: remove spurious wheel build action runs +* `#24536 `__: API: Update ``lib.nanfunctions`` namespace +* `#24537 `__: API: Update ``lib.type_check`` namespace +* `#24538 `__: API: Update ``lib.function_base`` namespace +* `#24539 `__: CI: fix CircleCI job for move to Meson +* `#24540 `__: API: Add ``lib.array_utils`` namespace +* `#24543 `__: DOC: re-pin sphinx<7.2 +* `#24547 `__: DOC: Cleanup removed objects +* `#24549 `__: DOC: fix typos in percentile documentation +* `#24551 `__: Update .mailmap 2 +* `#24555 `__: BUG, ENH: Fix ``iso_c_binding`` type maps and fix ``bind(c)``... +* `#24556 `__: BUG: fix comparisons between masked and unmasked structured arrays +* `#24559 `__: BUG: ensure nomask in comparison result is not broadcast +* `#24560 `__: CI/BENCH: move more jobs to Meson and fix all broken benchmarks +* `#24562 `__: DOC: Fix typos +* `#24564 `__: API: Readd ``add_docstring`` and ``add_newdoc`` to ``np.lib`` +* `#24566 `__: API: Update ``lib.shape_base`` namespace +* `#24567 `__: API: Update ``arraypad``,``arraysetops``, ``ufunclike`` and ``utils``... +* `#24570 `__: CI: Exclude import libraries from list of DLLs on Cygwin. +* `#24571 `__: MAINT: Add tests for Polynomial with fractions.Fraction coefficients +* `#24573 `__: DOC: Update building docs to use Meson +* `#24577 `__: API: Update ``lib.twodim_base`` namespace +* `#24578 `__: API: Update ``lib.polynomial`` and ``lib.npyio`` namespaces +* `#24579 `__: DOC: fix ``import mat`` warning. +* `#24580 `__: API: Update ``lib.stride_tricks`` namespace +* `#24581 `__: API: Update ``lib.index_tricks`` namespace +* `#24582 `__: DOC: fix typos in ndarray.setflags doc +* `#24584 `__: BLD: fix ``_umath_linalg`` dependencies +* `#24587 `__: API: Cleaning ``numpy/__init__.py`` and main namespace - Part 5... +* `#24589 `__: NEP: fix typos and formatting in NEP 55 +* `#24596 `__: BUG: Fix hash of user-defined dtype +* `#24598 `__: DOC: fix two misspellings in documentation +* `#24599 `__: DOC: unpin sphinx to pick up 7.2.5 +* `#24600 `__: DOC: wrong name in docs +* `#24601 `__: BLD: meson-cpu: fix SIMD support on platforms with no features +* `#24605 `__: DOC: fix isreal docstring (complex -> imaginary) +* `#24607 `__: DOC: Fix import find_common_type warning[skip actions][skip cirrus][s… +* `#24610 `__: MAINT: Avoid creating an intermediate array in np.quantile +* `#24611 `__: TYP: Add the missing ``casting`` keyword to ``np.clip`` +* `#24612 `__: DOC: Replace "cube cube-root" with "cube root" in cbrt docstring +* `#24618 `__: DOC: Fix markups for code blocks +* `#24620 `__: DOC: Update NEP 52 file +* `#24623 `__: TYP: Explicitly declare ``dtype`` and ``generic`` as hashable +* `#24625 `__: CI: Switch SIMD tests to meson +* `#24626 `__: DOC: add release notes link to PyPI. +* `#24628 `__: TYP: Allow ``binary_repr`` to accept any object implementing... +* `#24631 `__: DOC: Clarify usage of --include-paths as an f2py CLI argument +* `#24634 `__: API: Rename ``numpy/core`` to ``numpy/_core`` [NEP 52] +* `#24635 `__: ENH: Refactor the typing "reveal" tests using ``typing.assert_type`` +* `#24636 `__: MAINT: Bump actions/checkout from 3.6.0 to 4.0.0 +* `#24643 `__: TYP, MAINT: General type annotation maintenance +* `#24644 `__: MAINT: remove the ``oldnumeric.h`` header +* `#24657 `__: Add read-only token to linux_qemu.yml +* `#24658 `__: BUG, ENH: Access ``PyArrayMultiIterObject`` fields using macros. +* `#24663 `__: ENH: optimisation of array_equal +* `#24664 `__: BLD: fix bug in random.mtrand extension, don't link libnpyrandom +* `#24666 `__: MAINT: Bump actions/upload-artifact from 3.1.2 to 3.1.3 +* `#24667 `__: DOC: TEST.rst: add example with ``pytest.mark.parametrize`` +* `#24671 `__: BLD: build wheels for 32-bit Python on Windows, using MSVC +* `#24672 `__: MAINT: Bump actions/dependency-review-action from 3.0.8 to 3.1.0 +* `#24674 `__: DOC: Remove extra indents in documents +* `#24677 `__: DOC: improve the docstring's examples for np.searchsorted +* `#24679 `__: MAINT: Refactor of ``numpy/core/_type_aliases.py`` +* `#24680 `__: ENH: add parameter ``strict`` to ``assert_allclose`` +* `#24681 `__: BUG: Fix weak promotion with some mixed float/int dtypes +* `#24682 `__: API: Remove ``ptp``, ``itemset`` and ``newbyteorder`` from ``np.ndarray``... +* `#24690 `__: DOC: Fix reference warning in some rst files +* `#24691 `__: ENH: Add the Array Iterator API to Cython +* `#24693 `__: DOC: NumPy 2.0 migration guide +* `#24695 `__: CI: enable use of Cirrus CI compute credits by collaborators +* `#24696 `__: DOC: Updated the f2py docs to remove a note on ``-fimplicit-none`` +* `#24697 `__: API: Readd ``sctypeDict`` to the main namespace +* `#24698 `__: BLD: fix issue with compiler selection during cross compilation +* `#24702 `__: DOC: Fix typos +* `#24705 `__: TYP: Add annotations for the py3.12 buffer protocol +* `#24710 `__: BUG: Fix np.quantile([0, 1], 0, method='weibull') +* `#24711 `__: BUG: Fix np.quantile([Fraction(2,1)], 0.5) +* `#24714 `__: DOC: Update asarray docstring to use shares_memory +* `#24715 `__: DOC: Fix trailing backticks characters. +* `#24716 `__: CI: do apt update before apt install +* `#24717 `__: MAINT: remove relaxed strides debug build setting +* `#24721 `__: DOC: Doc fixes and updates. +* `#24725 `__: MAINT: Update main after 1.26.0 release. +* `#24733 `__: BLD, BUG: Fix build failure for host flags e.g. ``-march=native``... +* `#24735 `__: MAINT: Update RELEASE_WALKTHROUGH +* `#24740 `__: MAINT: Bump pypa/cibuildwheel from 2.15.0 to 2.16.0 +* `#24741 `__: MAINT: Remove cibuildwheel pin in cirrus_wheels +* `#24745 `__: ENH: Change default values in polynomial package +* `#24752 `__: DOC: Fix reference warning in some rst files +* `#24753 `__: BLD: add libquadmath to licences and other tweaks +* `#24758 `__: ENH: fix printing structured dtypes with a non-legacy dtype member +* `#24762 `__: BUG: Fix order of Windows OS detection macros. +* `#24766 `__: DOC: add a note on the ``.c.src`` format to the distutils migration... +* `#24770 `__: ENH: add parameter ``strict`` to ``assert_equal`` +* `#24772 `__: MAINT: align test_dispatcher s390x targets with _umath_tests_mtargets +* `#24775 `__: ENH: add parameter ``strict`` to ``assert_array_less`` +* `#24777 `__: BUG: ``numpy.array_api``: fix ``linalg.cholesky`` upper decomp... +* `#24778 `__: BUG: Fix DATA statements for f2py +* `#24780 `__: DOC: Replace http:// by https:// +* `#24781 `__: MAINT, DOC: fix typos found by codespell +* `#24787 `__: DOC: Closes issue #24730, 'sigma' to 'signum' in piecewise example +* `#24791 `__: BUG: Fix f2py to enable use of string optional inout argument +* `#24792 `__: TYP,DOC: Document the ``np.number`` parameter type as invariant +* `#24793 `__: MAINT: fix licence path win +* `#24795 `__: MAINT : fix spelling mistake for "imaginary" param in _read closes... +* `#24798 `__: MAINT: Bump actions/checkout from 4.0.0 to 4.1.0 +* `#24799 `__: MAINT: Bump maxim-lobanov/setup-xcode from 1.5.1 to 1.6.0 +* `#24802 `__: BLD: updated vendored-meson/meson for mips64 fix +* `#24805 `__: DOC: Fix reference warning in some rst files +* `#24806 `__: BUG: Fix build on ppc64 when the baseline set to Power9 or higher +* `#24807 `__: API: Remove zero names from dtype aliases +* `#24811 `__: DOC: explain why we avoid string.ascii_letters +* `#24812 `__: MAINT: Bump pypa/cibuildwheel from 2.16.0 to 2.16.1 +* `#24816 `__: MAINT: Upgrade to spin 0.7 +* `#24817 `__: DOC: Fix markups for emphasis +* `#24818 `__: API: deprecate size-2 inputs for ``np.cross`` [Array API] +* `#24820 `__: MAINT: remove ``wheel`` as a build dependency +* `#24825 `__: DOC: Fix docstring of matrix class +* `#24828 `__: BUG, SIMD: use scalar cmul on bad Apple clang x86_64 +* `#24834 `__: DOC: Update debugging section +* `#24835 `__: ENH: Add ufunc for np.char.isalpha +* `#24839 `__: BLD: use scipy-openblas wheel +* `#24845 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24847 `__: DOC: Fix reference warning in some rst files +* `#24848 `__: DOC: TESTS.rst: suggest np.testing assertion function strict=True +* `#24854 `__: MAINT: Remove 'a' dtype alias +* `#24858 `__: ENH: Extend np.add ufunc to work with unicode and byte dtypes +* `#24860 `__: MAINT: Bump pypa/cibuildwheel from 2.16.1 to 2.16.2 +* `#24864 `__: MAINT: Xfail test failing on PyPy. +* `#24866 `__: API: Add ``NumpyUnpickler`` +* `#24867 `__: DOC: Update types table +* `#24868 `__: ENH: Add find/rfind ufuncs for unicode and byte dtypes +* `#24869 `__: BUG: Fix ma.convolve if propagate_mask=False +* `#24875 `__: DOC: testing.assert_array_equal: distinguish from assert_equal +* `#24876 `__: BLD: fix math func feature checks, fix FreeBSD build, add CI... +* `#24877 `__: ENH: testing: argument ``err_msg`` of assertion functions can be... +* `#24878 `__: ENH: isclose/allclose: support array_like ``atol``/``rtol`` +* `#24880 `__: BUG: Fix memory leak in timsort's buffer resizing +* `#24883 `__: BLD: fix "Failed to guess install tag" in meson-log.txt, add... +* `#24884 `__: DOC: replace 'a' dtype with 'S' in format_parser docs +* `#24886 `__: DOC: Fix eigenvector typo in linalg.py docs +* `#24887 `__: API: Add ``diagonal`` and ``trace`` to ``numpy.linalg`` [Array API] +* `#24888 `__: API: Make ``intp`` ``ssize_t`` and introduce characters nN +* `#24891 `__: MAINT: Bump ossf/scorecard-action from 2.2.0 to 2.3.0 +* `#24893 `__: ENH: meson: implement BLAS/LAPACK auto-detection and many CI... +* `#24896 `__: API: Add missing deprecation and release note files +* `#24901 `__: MAINT: Bump actions/setup-python from 4.7.0 to 4.7.1 +* `#24904 `__: BUG: loongarch doesn't use REAL(10) +* `#24910 `__: BENCH: Fix benchmark bug leading to failures +* `#24913 `__: DOC: fix typos +* `#24915 `__: API: Allow comparisons with and between any python integers +* `#24920 `__: MAINT: Reenable PyPy wheel builds. +* `#24922 `__: API: Add ``np.long`` and ``np.ulong`` +* `#24923 `__: ENH: Add Cython enumeration for NPY_FR_GENERIC +* `#24925 `__: DOC: Fix parameter markups in ``c-api/ufunc.rst`` +* `#24927 `__: DOC: how-to-io.rst: document solution for NumPy JSON serialization +* `#24930 `__: MAINT: Update main after 1.26.1 release. +* `#24931 `__: ENH: testing: consistent names for actual and desired results +* `#24935 `__: DOC: Update lexsort docstring for axis kwargs +* `#24938 `__: DOC: Add warning about ill-conditioning to linalg.inv docstring +* `#24939 `__: DOC: Add legacy directive to mark outdated objects +* `#24940 `__: API: Add ``svdvals`` to ``numpy.linalg`` [Array API] +* `#24941 `__: MAINT: Bump actions/checkout from 4.1.0 to 4.1.1 +* `#24943 `__: MAINT: don't warn for symbols needed by import_array() +* `#24945 `__: MAINT: Make ``numpy.fft.helper`` private +* `#24946 `__: MAINT: Make ``numpy.linalg.linalg`` private +* `#24947 `__: ENH: Add startswith & endswith ufuncs for unicode and bytes dtypes +* `#24949 `__: API: Enforce ABI version and print info when compiled against... +* `#24950 `__: TEST: Add test for checking functions' one location rule +* `#24951 `__: ENH: Add isdigit/isspace/isdecimal/isnumeric ufuncs for string... +* `#24953 `__: DOC: Indicate shape param of ndarray.reshape is position-only +* `#24958 `__: MAINT: Remove unhelpful error replacements from ``import_array()`` +* `#24959 `__: MAINT: Python API cleanup nitpicks +* `#24967 `__: BLD: use classic linker on macOS, the new one in XCode 15 has... +* `#24968 `__: BLD: mingw-w64 build fixes +* `#24969 `__: MAINT: fix a few issues with CPython main/3.13.0a1 +* `#24970 `__: BLD: Use the correct Python interpreter when running tempita.py +* `#24975 `__: DOC: correct Logo SVG files rendered in dark by Figma +* `#24978 `__: MAINT: testing: rename parameters x/y to actual/desired +* `#24979 `__: BLD: clean up incorrect-but-hardcoded define for ``strtold_l``... +* `#24980 `__: BLD: remove ``NPY_USE_BLAS_ILP64`` environment variable [wheel... +* `#24981 `__: DOC: revisions to "absolute beginners" tutorial +* `#24983 `__: ENH: Added a ``lint`` spin command +* `#24984 `__: DOC: fix reference in user/basics.rec.html#record-arrays +* `#24985 `__: MAINT: Disable warnings for items imported by pybind11 +* `#24986 `__: ENH: Added ``changelog`` spin command +* `#24988 `__: ENH: DType API slot for descriptor finalization before array... +* `#24990 `__: MAINT: Bump ossf/scorecard-action from 2.3.0 to 2.3.1 +* `#24991 `__: DOC: add note to default_rng about requiring non-negative seed +* `#24993 `__: BLD: musllinux_aarch64 [wheel build] +* `#24995 `__: DOC: update vectorize docstring for proper rendering of decorator... +* `#24996 `__: DOC: Clarify a point in basic indexing user guide +* `#24997 `__: DOC: Use ``spin`` to generate changelog +* `#25001 `__: DOC: Visually divide main license and bundled licenses in wheels +* `#25005 `__: MAINT: remove LGTM.com configuration file +* `#25006 `__: DOC: update ndarray.item docstring +* `#25008 `__: BLD: unvendor meson-python +* `#25010 `__: MAINT: test-refactor of ``numpy/_core/numeric.py`` +* `#25016 `__: DOC: standardize capitalization of headings +* `#25017 `__: ENH: Added ``notes`` command for spin +* `#25019 `__: Update .mailmap +* `#25022 `__: TYP: add None to ``__getitem__`` in ``numpy.array_api`` +* `#25029 `__: DOC: "What is NumPy?" section of the documentation +* `#25030 `__: DOC: Include ``np.long`` in ``arrays.scalars.rst`` +* `#25032 `__: MAINT: Add missing ``noexcept`` to shuffle helpers +* `#25037 `__: MAINT: Unpin urllib3 for anaconda-client install +* `#25039 `__: MAINT: Adjust typing for readded ``np.long`` +* `#25040 `__: BLD: make macOS version check for Accelerate NEWLAPACK more robust +* `#25042 `__: BUG: ensure passing ``np.dtype`` to itself doesn't crash +* `#25045 `__: ENH: Vectorize np.sort and np.partition with AVX2 +* `#25050 `__: TST: Ensure test is not run on 32bit platforms +* `#25051 `__: MAINT: Make bitfield integers unsigned +* `#25054 `__: API: Introduce ``np.isdtype`` function [Array API] +* `#25055 `__: BLD: improve detection of Netlib libblas/libcblas/liblapack +* `#25056 `__: DOC: Small fixes for NEP 52 +* `#25057 `__: MAINT: Add ``npy_2_compat.h`` which is designed to work also if... +* `#25059 `__: MAINT: ``np.long`` typing nitpick +* `#25060 `__: DOC: standardize capitalization of NEP headings +* `#25062 `__: ENH: Change add/isalpha ufuncs to use buffer class & general... +* `#25063 `__: BLD: change default of the ``allow-noblas`` option to true +* `#25064 `__: DOC: Fix description of auto bin_width +* `#25067 `__: DOC: add missing word to internals.rst +* `#25068 `__: TST: skip flaky test in test_histogram +* `#25072 `__: MAINT: default to C11 rather than C99, fix most build warnings... +* `#25073 `__: BLD,BUG: quadmath required where available [f2py] +* `#25078 `__: BUG: alpha doesn't use REAL(10) +* `#25079 `__: API: Introduce ``np.astype`` [Array API] +* `#25080 `__: API: Add and redefine ``numpy.bool`` [Array API] +* `#25081 `__: DOC: Provide migration notes for scalar inspection functions +* `#25082 `__: MAINT: Bump actions/dependency-review-action from 3.1.0 to 3.1.1 +* `#25085 `__: BLD: limit scipy-openblas32 wheel to 0.3.23.293.2 +* `#25086 `__: API: Add Array API aliases (math, bitwise, linalg, misc) [Array... +* `#25088 `__: API: Add Array API setops [Array API] +* `#25089 `__: BUG, BLD: Fixed VSX4 feature check +* `#25090 `__: BUG: Make n a long int for np.random.multinomial +* `#25091 `__: MAINT: Bump actions/dependency-review-action from 3.1.1 to 3.1.2 +* `#25092 `__: BLD: Fix features.h detection and blocklist complex trig funcs... +* `#25094 `__: BUG: Avoid intp conversion regression in Cython 3 +* `#25099 `__: DOC: Fix license identifier for OpenBLAS +* `#25101 `__: API: Add ``outer`` to ``numpy.linalg`` [Array API] +* `#25102 `__: MAINT: Print towncrier output file location +* `#25104 `__: ENH: Add str_len & count ufuncs for unicode and bytes dtypes +* `#25105 `__: API: Remove ``__array_prepare__`` +* `#25111 `__: TST: Use ``meson`` for testing ``f2py`` +* `#25123 `__: MAINT,BUG: Never import distutils above 3.12 [f2py] +* `#25124 `__: DOC: ``f2py`` CLI documentation enhancements +* `#25127 `__: DOC: angle: update documentation of convention when magnitude... +* `#25129 `__: BUG: Fix FP overflow error in division when the divisor is scalar +* `#25131 `__: MAINT: Update main after 1.26.2 release. +* `#25133 `__: DOC: std/var: improve documentation of ``ddof`` +* `#25136 `__: BUG: Fix -fsanitize=alignment issue in numpy/_core/src/multiarray/arraytypes.c.src +* `#25138 `__: API: Remove The MapIter API from public +* `#25139 `__: MAINT: Bump actions/dependency-review-action from 3.1.2 to 3.1.3 +* `#25140 `__: DOC: clarify boolean index error message +* `#25141 `__: TST: Explicitly pass NumPy path to cython during tests (also... +* `#25144 `__: DOC: Fix typo in NumPy 2.0 migration guide +* `#25145 `__: API: Add ``cross`` to ``numpy.linalg`` [Array API] +* `#25146 `__: BUG: fix issues with ``newaxis`` and ``linalg.solve`` in ``numpy.array_api`` +* `#25149 `__: API: bump MAXDIMS/MAXARGS to 64 introduce NPY_AXIS_RAVEL +* `#25151 `__: BLD, CI: revert pinning scipy-openblas +* `#25152 `__: ENH: Add strip/lstrip/rstrip ufuncs for unicode and bytes +* `#25154 `__: MAINT: Cleanup mapiter struct a bit +* `#25155 `__: API: Add ``matrix_norm``, ``vector_norm``, ``vecdot`` and ``matrix_transpose`` [Array API] +* `#25156 `__: API: Remove PyArray_REFCNT and NPY_REFCOUNT +* `#25157 `__: DOC: ``np.sort`` doc fix contiguous axis +* `#25158 `__: API: Make ``encoding=None`` the default in loadtxt +* `#25160 `__: BUG: Fix moving compiled executable to root with f2py -c on Windows +* `#25161 `__: API: Remove ``PyArray_GetCastFunc`` and any guarantee that ``->castfuncs``... +* `#25162 `__: NEP: Update NEP 55 +* `#25165 `__: DOC: mention submodule init in source install instructions +* `#25167 `__: MAINT: Add ``array-api-tests`` CI stage, add ``ndarray.__array_namespace__`` +* `#25168 `__: API: Introduce ``copy`` argument for ``np.asarray`` [Array API] +* `#25169 `__: API: Introduce ``correction`` argument for ``np.var`` and ``np.std``... +* `#25171 `__: ENH: Add replace ufunc for bytes and unicode dtypes +* `#25176 `__: DOC: replace integer overflow example +* `#25181 `__: BUG: Disallow shadowed modulenames +* `#25184 `__: MAINT,DOC: Fix inline licenses ``f2py`` +* `#25185 `__: MAINT: Fix sneaky typo [f2py] +* `#25186 `__: BUG: Handle ``common`` blocks with ``kind`` specifications from modules +* `#25193 `__: MAINT: Kill all instances of f2py.compile +* `#25194 `__: DOC: try to be nicer about f2py.compile +* `#25195 `__: BUG: Fix single to half-precision conversion on PPC64/VSX3 +* `#25196 `__: DOC: ``f2py`` rewrite with ``meson`` details +* `#25198 `__: MAINT: Replace deprecated ctypes.ARRAY(item_type, size) with... +* `#25209 `__: ENH: Expose abstract DType classes in the experimental DType... +* `#25212 `__: BUG: Don't try to grab callback modules +* `#25221 `__: TST: f2py: fix issue in test skip condition +* `#25222 `__: DOC: Fix wrong return type for PyArray_CastScalarToCType +* `#25223 `__: MAINT: Bump mymindstorm/setup-emsdk from 12 to 13 +* `#25226 `__: BUG: Handle ``iso_c_type`` mappings more consistently +* `#25228 `__: DOC: Improve description of ``axis`` parameter for ``np.median`` +* `#25230 `__: BUG: Raise error in ``np.einsum_path`` when output subscript is... +* `#25232 `__: DEV: Enable the ``spin lldb`` +* `#25233 `__: API: Add ``device`` and ``to_device`` to ``numpy.ndarray`` [Array... +* `#25238 `__: MAINT: do not use ``long`` type +* `#25243 `__: BUG: Fix non-contiguous 32-bit memory load when ARM/Neon is enabled +* `#25246 `__: CI: Add CI test for riscv64 +* `#25247 `__: ENH: Enable SVE detection for Highway VQSort +* `#25248 `__: DOC: Add release note for Highway VQSort on AArch64 +* `#25250 `__: DOC: fix typo (alignment) +* `#25253 `__: CI: streamline macos_arm64 test +* `#25254 `__: BUG: mips doesn't use REAL(10) +* `#25255 `__: ENH: add new wheel builds using Accelerate on macOS >=14 +* `#25257 `__: TST: PyPy needs another gc.collect on latest versions +* `#25259 `__: BUG: Fix output dtype when calling np.char methods with empty... +* `#25261 `__: MAINT: Bump conda-incubator/setup-miniconda from 2.2.0 to 3.0.0 +* `#25264 `__: MAINT: Bump actions/dependency-review-action from 3.1.3 to 3.1.4 +* `#25267 `__: BUG: Fix module name bug in signature files [urgent] [f2py] +* `#25271 `__: API: Shrink MultiIterObject and make ``NPY_MAXARGS`` a runtime... +* `#25272 `__: DOC: Mention installing threadpoolctl in issue template [skip... +* `#25276 `__: MAINT: Bump actions/checkout from 3 to 4 +* `#25280 `__: TST: Fix fp_noncontiguous and fpclass on riscv64 +* `#25282 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.0 to 3.0.1 +* `#25284 `__: CI: Install Lapack runtime on Cygwin. +* `#25287 `__: BUG: Handle .pyf.src and fix SciPy [urgent] +* `#25291 `__: MAINT: Allow initializing new-style dtypes inside numpy +* `#25292 `__: API: C-API removals +* `#25295 `__: MAINT: expose and use dtype classes in internal API +* `#25297 `__: BUG: enable linking of external libraries in the f2py Meson backend +* `#25299 `__: MAINT: Performance improvement of polyutils.as_series +* `#25300 `__: DOC: Document how to check for a specific dtype +* `#25302 `__: DOC: Clarify virtualenv setup and dependency installation +* `#25308 `__: MAINT: Update environment.yml to match *_requirements.txt +* `#25309 `__: DOC: Fix path to svg logo files +* `#25310 `__: DOC: Improve documentation for fill_diagonal +* `#25313 `__: BUG: Don't use the _Complex extension in C++ mode +* `#25314 `__: MAINT: Bump actions/setup-python from 4.7.1 to 4.8.0 +* `#25315 `__: MAINT: expose PyUFunc_AddPromoter in the internal ufunc API +* `#25316 `__: CI: remove no-blas=true from spin command on macos_arm64 ci [skip... +* `#25317 `__: ENH: Add fft optional extension submodule to numpy.array_api +* `#25321 `__: MAINT: Run f2py's meson backend with the same python that runs... +* `#25322 `__: DOC: Add examples for ``np.char`` functions +* `#25324 `__: DOC: Add examples for ``np.polynomial.polynomial`` functions +* `#25326 `__: DOC: Add examples to functions in ``np.polynomial.hermite`` +* `#25328 `__: DOC: Add ``np.polynomial.laguerre`` examples +* `#25329 `__: BUG: fix refcounting for dtypemeta aliases +* `#25331 `__: MAINT: Bump actions/setup-python from 4.8.0 to 5.0.0 +* `#25335 `__: BUG: Fix np.char for scalars and add tests +* `#25336 `__: API: make arange ``start`` argument positional-only +* `#25338 `__: BLD: update vendored Meson for AIX shared library fix +* `#25339 `__: DOC: fix some rendering and formatting issues in ``unique_*`` docstrings +* `#25340 `__: DOC: devguide cleanup: remove Gitwash and too verbose Git details +* `#25342 `__: DOC: Add more ``np.char`` documentation +* `#25346 `__: ENH: Enable 16-bit VQSort routines on AArch64 +* `#25347 `__: API: Introduce stringdtype [NEP 55] +* `#25350 `__: DOC: add "building from source" docs +* `#25354 `__: DOC: Add example for ``np.random.default_rng().binomial()`` +* `#25355 `__: DOC: Fix typo in ``np.random.default_rng().logistic()`` +* `#25356 `__: DOC: Add example for ``np.random.default_rng().exponential()`` +* `#25357 `__: DOC: Add example for ``np.random.default_rng().geometric()`` +* `#25361 `__: BUG: Fix regression with ``f2py`` wrappers when modules and subroutines... +* `#25364 `__: ENH,BUG: Handle includes for meson backend +* `#25367 `__: DOC: Fix refguide check script +* `#25368 `__: MAINT: add npy_gil_error to acquire the GIL and set an error +* `#25369 `__: DOC: Correct documentation for polyfit() +* `#25370 `__: ENH: Make numpy.array_api more portable +* `#25372 `__: BUG: Fix failing test_features on SapphireRapids +* `#25376 `__: BUG: Fix build issues on SPR and avx512_qsort float16 +* `#25383 `__: MAINT: Init ``base`` in cpu_avx512_kn +* `#25384 `__: MAINT: Add missing modules to refguide test +* `#25388 `__: API: Adjust ``linalg.pinv`` and ``linalg.cholesky`` to Array... +* `#25389 `__: BUG: ufunc api: update multiarray_umath import path +* `#25394 `__: MAINT: Bump actions/upload-artifact from 3.1.3 to 4.0.0 +* `#25397 `__: BUG, SIMD: Fix quicksort build error when Highway/SVE is enabled +* `#25398 `__: DOC: Plot exact distributions in logistic, logseries and weibull... +* `#25404 `__: DOC: Improve ``np.histogram`` docs +* `#25409 `__: API,MAINT: Reorganize array-wrap calling and introduce ``return_scalar`` +* `#25412 `__: DOC: Clean up of ``_generator.pyx`` +* `#25413 `__: DOC: Add example to ``rng.beta(...)`` +* `#25414 `__: DOC: Add missing examples to ``np.ma`` +* `#25416 `__: ENH: define a gufunc for vecdot (with BLAS support) +* `#25417 `__: MAINT: Bump actions/setup-node from 3.8.1 to 4.0.1 +* `#25418 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25425 `__: BUG: Fix two errors related to not checking for failed allocations +* `#25426 `__: BUG: avoid seg fault from OOB access in RandomState.set_state() +* `#25430 `__: TST: Fix test_numeric on riscv64 +* `#25431 `__: DOC: Improve ``np.mean`` documentation of the out argument +* `#25432 `__: DOC: Add ``numpy.lib`` docs page +* `#25434 `__: API,BUG,DEP: treat trailing comma as a tuple and thus a structured... +* `#25437 `__: API: Add ``rtol`` to ``matrix_rank`` and ``stable`` [Array API] +* `#25438 `__: DEV: add ``ninja`` to ``test_requirements.txt`` and clean up... +* `#25439 `__: BLD: remove ``-fno-strict-aliasing``, ``--strip-debug`` from cibuildwheel... +* `#25440 `__: CI: show meson-log.txt in Cirrus wheel builds +* `#25441 `__: API,ENH: Change definition of complex sign +* `#25443 `__: TST: fix issue with dtype conversion in ``test_avx_based_ufunc`` +* `#25444 `__: TST: remove ``TestNewBufferProtocol.test_error_too_many_dims`` +* `#25446 `__: Downgrade Highway to latest released version (1.0.7) +* `#25448 `__: TYP: Adjust type annotations for Numpy 2.0 changes +* `#25449 `__: TYP,CI: bump mypy from 1.5.1 to 1.7.1 +* `#25450 `__: MAINT: make the import-time check for old Accelerate more specific +* `#25451 `__: DOC: Fix names of subroutines. +* `#25453 `__: TYP,MAINT: Change more overloads to play nice with pyright +* `#25454 `__: DOC: fix typo ``v_stack`` in 2.0 migration guide +* `#25455 `__: BUG: fix macOS version checks for Accelerate support +* `#25456 `__: BLD: optimize BLAS and LAPACK search order +* `#25459 `__: BLD: fix uninitialized variable warnings from simd/neon/memory.h +* `#25462 `__: TST: skip two tests in aarch64 linux wheel builds +* `#25463 `__: ENH: Add np.strings namespace +* `#25473 `__: MAINT: use cholesky_up gufunc for upper Cholesky decomposition +* `#25484 `__: BUG: handle scalar input in np.char.replace +* `#25492 `__: DOC: update signature of PyArray_Conjugate +* `#25495 `__: API: adjust nD fft ``s`` param to array API +* `#25501 `__: DOC: Update a few interpreted text to verbatim/code. +* `#25503 `__: BLD: unpin cibuildwheel [wheel build] +* `#25504 `__: DOC: add pickleshare to doc dependencies +* `#25505 `__: BLD: replace uses of openblas_support with openblas wheels [wheel... +* `#25507 `__: DOC: mention string, bytes, and void dtypes in dtype intro +* `#25510 `__: BUG:Fix incorrect 'inner' method type annotation in __array_ufunc_ +* `#25511 `__: DOC: np.any: add multidimensional example +* `#25512 `__: DOC: add a section for dealing with NumPy 2.0 for downstream... +* `#25515 `__: BUG: three string ufunc bugs, one leading to segfault +* `#25516 `__: MAINT,BUG: Fix ``--dep`` when ``-L -l`` are present +* `#25520 `__: DOC: unambiguous np.histogram dtype description +* `#25521 `__: DOC: Improve error messages for random.choice +* `#25522 `__: BUG: fix incorrect strcmp implementation for unequal length strings +* `#25524 `__: MAINT: Update main after 1.26.3 release. +* `#25525 `__: MAINT: optimization and broadcasting for .replace() method for... +* `#25527 `__: DOC: Improve ``polynomial`` docs +* `#25528 `__: DOC: Add notes to ``rng.bytes()`` +* `#25529 `__: DOC: Add ``rng.f()`` plot +* `#25530 `__: DOC: Add ``rng.chisquare()`` plot +* `#25531 `__: API: allow building in cython with Py_LIMITED_API +* `#25533 `__: DOC: Improve ``poisson`` plot +* `#25534 `__: DOC: Indicate order is kwarg-only for ndarray.reshape. +* `#25535 `__: MAINT: fix ufunc debug tracing +* `#25536 `__: MAINT, ENH: Implement calling pocketfft via gufunc and allow... +* `#25538 `__: MAINT: Bump actions/dependency-review-action from 3.1.4 to 3.1.5 +* `#25540 `__: DOC: Fix typo in random.geometric docstring +* `#25542 `__: NEP: add NEP 56 on array API standard support in main namespace +* `#25545 `__: MAINT: Update copyright to 2024 (LICENSE & DOC) +* `#25549 `__: DOC: Using ``f2py`` with ``fypp`` +* `#25553 `__: BUG: Fix return shape of inverse_indices in unique_inverse +* `#25554 `__: BUG: support axes argument in np.linalg.tensordot +* `#25555 `__: MAINT, BLD: Fix unused inline functions warnings on clang +* `#25558 `__: ENH: Add replace ufunc to np.strings +* `#25560 `__: BUG: np.linalg.vector_norm: return correct shape for keepdims +* `#25563 `__: SIMD: Extend the enabled targets for Google Highway quicksort +* `#25569 `__: DOC: Fix a typo +* `#25570 `__: ENH: change list-of-array to tuple-of-array returns (Numba compat) +* `#25571 `__: MAINT: Return size_t from num_codepoints in string ufuncs Buffer... +* `#25573 `__: MAINT: add a C alias for the default integer DType +* `#25574 `__: DOC: ensure that docstrings for np.ndarray.copy, np.copy and... +* `#25575 `__: ENH: Wrap string ufuncs in np.strings to allow default arguments +* `#25579 `__: MAINT: Bump actions/upload-artifact from 4.0.0 to 4.1.0 +* `#25582 `__: CI: Bump azure pipeline timeout to 120 minutes +* `#25592 `__: BUG: Fix undefined behavior when converting NaN float16 to datetime... +* `#25593 `__: DOC: fix typos in 2.0 migration guide +* `#25594 `__: MAINT: replace uses of cython numpy.math.pxd with native routines +* `#25595 `__: BUG: Allow ``None`` as ``api_version`` in ``__array_namespace__``... +* `#25598 `__: BLD: include fix for MinGW platform detection +* `#25603 `__: DOC: Update tensordot documentation +* `#25608 `__: MAINT: skip installing rtools on azure +* `#25609 `__: DOC: fft: correct docs about recent deprecations +* `#25610 `__: ENH: Vectorize argsort and argselect with AVX2 +* `#25613 `__: BLD: fix building for windows ARM64 +* `#25614 `__: MAINT: Bump actions/dependency-review-action from 3.1.5 to 4.0.0 +* `#25615 `__: MAINT: add ``newaxis`` to ``__all__`` in ``numpy.array_api`` +* `#25625 `__: NEP: update NEP 55 text to match current stringdtype implementation +* `#25627 `__: TST: Fix f2py doc test collection in editable installs +* `#25628 `__: TST: Fix test_warning_calls on Python 3.12 +* `#25629 `__: TST: Bump pytz to 2023.3.post1 +* `#25631 `__: BUG: Use large file fallocate on 32 bit linux platforms +* `#25636 `__: MAINT: Move np.char methods to np.strings +* `#25638 `__: MAINT: Bump actions/upload-artifact from 4.1.0 to 4.2.0 +* `#25641 `__: DOC: Remove a duplicated argument ``shape`` in ``empty_like`` +* `#25646 `__: DOC: Fix links to f2py codes +* `#25648 `__: DOC: fix syntax highlighting issues in added f2py docs +* `#25650 `__: DOC: improve structure of reference guide +* `#25651 `__: ENH: Allow strings in logical ufuncs +* `#25652 `__: BUG: Fix AVX512 build flags on Intel Classic Compiler +* `#25656 `__: DOC: add autosummary API reference for DType clases. +* `#25657 `__: MAINT: fix warning about visibility tag on clang +* `#25660 `__: MAINT: Bump mymindstorm/setup-emsdk from 13 to 14 +* `#25662 `__: BUG: Allow NumPy int scalars to be divided by out-of-bound Python... +* `#25664 `__: DOC: minor improvement to the partition() docstrings +* `#25668 `__: BUG: correct irfft with n=1 on larger input +* `#25669 `__: BLD: fix potential issue with escape sequences in ``__config__.py`` +* `#25671 `__: MAINT: Bump actions/upload-artifact from 4.2.0 to 4.3.0 +* `#25672 `__: BUG: check for overflow when converting a string to an int scalar +* `#25673 `__: BUG: Ensure meson updates generated umath doc correctly. +* `#25674 `__: DOC: add a section on NumPy's module structure to the refguide +* `#25676 `__: NEP: add note on Python integer "exceptions" to NEP 50 +* `#25678 `__: DOC: fix docstring of quantile and percentile +* `#25680 `__: DOC: replace autosummary for numpy.dtypes with enumerated list +* `#25683 `__: DOC: Try add a section on NEP 50 to migration guide +* `#25687 `__: Update to OpenBLAS 0.3.26 +* `#25689 `__: MAINT: Simplify scalar int division a bit (no need for helper... +* `#25692 `__: DOC: Clarify deprecated width Parameter in numpy.binary_repr... +* `#25695 `__: DOC: empty: standardize notes about uninitialized values +* `#25697 `__: CI: add pinning for scipy-openblas wheels +* `#25699 `__: DOC: Fix some references in document +* `#25707 `__: DOC: fix a small np.einsum example +* `#25709 `__: MAINT: Include header defining backtrace +* `#25710 `__: TST: marks on a fixture have no effect +* `#25711 `__: ENH: support float and longdouble in FFT using C++ pocketfft... +* `#25712 `__: API: Make any and all return booleans by default +* `#25715 `__: [MAINT] Add regression test for np.geomspace +* `#25716 `__: CI: pin cygwin python to 3.9.16-1 [skip cirrus][skip azp][skip... +* `#25717 `__: DOC: Fix some minor formatting errors in NEPs +* `#25721 `__: DEP: Finalize future warning move in lstsq default +* `#25723 `__: NEP: Mark NEP 55 accepted +* `#25727 `__: DOC: Remove function name without signature in ``ma`` +* `#25730 `__: ENH: add a pkg-config file and a ``numpy-config`` script +* `#25732 `__: CI: use version 0.3.26.0.2 of scipy-openblas wheels +* `#25734 `__: DOC: Fix markups of code literals in ``polynomial`` +* `#25735 `__: MAINT: Bump pypa/cibuildwheel from 2.16.4 to 2.16.5 +* `#25736 `__: MAINT: Bump actions/cache from 3 to 4 +* `#25738 `__: MAINT: add ``trapezoid`` as the new name for ``trapz`` +* `#25739 `__: TST: run macos_arm64 test on Github Actions +* `#25740 `__: DOC: Fix doctest failure in ``polynomial`` +* `#25745 `__: DEV: add .editorconfig for C/C++ +* `#25751 `__: DOC: Update ruff rule instruction +* `#25753 `__: DOC: Fix ``ufunc.reduceat`` doc for ``dtype`` +* `#25754 `__: API: Expose the dtype C API +* `#25758 `__: DOC: Fix summary table in linalg routines document +* `#25761 `__: DEP: Finalize future warning for shape=1 descriptor dropping... +* `#25763 `__: CI/BLD: fix bash script tests for cibw +* `#25768 `__: DOC: in ufuncs ``dtype`` is not ignored when ``out`` is passed +* `#25772 `__: MAINT: Update main after 1.26.4 release. +* `#25774 `__: DOC: Update docs build dependencies install cmd +* `#25775 `__: ENH: Add index/rindex ufuncs for unicode and bytes dtypes +* `#25776 `__: DOC: Add missing ``np.size`` entry to routines +* `#25779 `__: MAINT: Bump actions/upload-artifact from 4.3.0 to 4.3.1 +* `#25780 `__: MAINT: Bump larsoner/circleci-artifacts-redirector-action from... +* `#25783 `__: DOC: Remove references to ``distutils`` in simd document +* `#25785 `__: MAINT: Bump actions/setup-node from 4.0.1 to 4.0.2 +* `#25788 `__: ENH: Improve performance of np.tensordot +* `#25789 `__: MAINT,API: Always export static inline version of array accessor. +* `#25790 `__: MAINT: Private device struct shouldn't be in public header +* `#25791 `__: ENH: Add rest of unary ufuncs for unicode/bytes dtypes +* `#25792 `__: API: Create ``PyArray_DescrProto`` for legacy descriptor registration +* `#25793 `__: MAINT: update docstrings of string ufuncs to mention StringDType +* `#25794 `__: DEP: expire some deprecations +* `#25795 `__: DOC: fix docstring example in f2py.get_include +* `#25796 `__: MAINT: combine string ufuncs by passing on auxilliary data +* `#25797 `__: MAINT: Move ``NPY_VSTRING`` and make ``NPY_NTYPES NPY_TYPES_LEGACY`` +* `#25800 `__: REV: revert tuple/list return type changes for ``*split`` functions +* `#25801 `__: DOC: Update ``np.char.array`` docstring +* `#25802 `__: MAINT,API: Make metadata, c_metadata, fields, and names only... +* `#25803 `__: BLD: restore 'setup-args=-Duse-ilp64=true' in cibuildwheel [wheel... +* `#25804 `__: MAINT: Use preprocessor directive rather than code when adding... +* `#25806 `__: DOC: Update the CPU build options document +* `#25807 `__: DOC: Fix code-block formatting for new PyArray_RegisterDataType... +* `#25812 `__: API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs`` +* `#25813 `__: DOC: Update genfromtxt documentation +* `#25814 `__: MAINT: Use ``_ITEMSIZE`` rather than ``_DESCR(arr)->elsize`` +* `#25816 `__: API: Introduce ``PyDataType_FLAGS`` accessor for public access +* `#25817 `__: ENH: Add more const qualifiers to C API arguments +* `#25821 `__: BUG: ensure that FFT routines can deal with integer and bool... +* `#25822 `__: BLD: use homebrew gfortran +* `#25825 `__: MAINT: Bump actions/dependency-review-action from 4.0.0 to 4.1.0 +* `#25827 `__: DOC: run towncrier to consolidate the 2.0.0 release notes to... +* `#25828 `__: DOC: two minor fixes for DType API doc formatting +* `#25830 `__: DOC: Fix typo in nep 0052 +* `#25832 `__: DOC: add back 2.0.0 release note snippets that went missing +* `#25833 `__: DOC: Fix some reference warnings +* `#25834 `__: BUG: ensure static_string.buf is never NULL for a non-null string +* `#25837 `__: DEP: removed deprecated product/cumproduct/alltrue/sometrue +* `#25838 `__: MAINT: Update pinned setuptools for Python < 3.12 +* `#25839 `__: TST: fix Cython compile test which invokes ``meson`` +* `#25842 `__: DOC: Fix some incorrect rst markups +* `#25843 `__: BUG: ensure empty cholesky upper does not hang. +* `#25845 `__: DOC: Fix some typos +* `#25847 `__: MAINT: Adjust rest of string ufuncs to static_data approach +* `#25851 `__: DOC: Fix some reference warnings +* `#25852 `__: ENH: Support exotic installation of nvfortran +* `#25854 `__: BUG: Correctly refcount array descr in empty_like +* `#25855 `__: MAINT: Bump actions/dependency-review-action from 4.1.0 to 4.1.2 +* `#25856 `__: MAINT: Remove unnnecessary size argument in StringDType initializer +* `#25861 `__: CI: make chocolatey fail when a dependency doesn't install +* `#25862 `__: Revert "API: Make ``descr->f`` only accessible through ``PyDataType_GetArrFuncs``" +* `#25864 `__: ENH: Implement multiply ufunc for unicode & bytes +* `#25865 `__: ENH: print traceback after printing ABI mismatch error +* `#25866 `__: API: Fix compat header and add new import helpers +* `#25868 `__: MAINT: Bump actions/dependency-review-action from 4.1.2 to 4.1.3 +* `#25870 `__: BUG: use print to actually output something +* `#25873 `__: Update Highway to 1.1.0 +* `#25874 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.1 to 3.0.2 +* `#25876 `__: API: Remove no-op C API functions +* `#25877 `__: BUG: Include broadcasting for ``rtol`` argument in ``matrix_rank`` +* `#25879 `__: DOC: Add a document entry of ``PyArray_DescrProto`` +* `#25880 `__: DOC: README.md: point to user-friendly OpenSSF ScoreCard display +* `#25881 `__: BUG: Fix gh-25867 for used functions and subroutines +* `#25883 `__: BUG: fix typo in 'message' static variable of TestDeprecatedDTypeParenthesizedRepeatCount +* `#25884 `__: BUG: Fix typo in LEGACY_CONS_NON_NEGATVE_INBOUNDS_LONG +* `#25885 `__: DOC: fix typos +* `#25886 `__: MAINT: fix code comment typos in numpy/ directory +* `#25887 `__: BUG: Fix ``PyArray_FILLWBYTE`` Cython declaration +* `#25889 `__: CI: run apt update before apt-install in linux-blas workflow +* `#25890 `__: MAINT: refactor StringDType static_string implementation a bit. +* `#25891 `__: ENH: Add expandtabs ufunc for string & unicode dtypes +* `#25894 `__: CI, BLD, TST: Re-enable Emscripten/Pyodide CI job for NumPy +* `#25896 `__: ENH: implement stringdtype <-> timedelta roundtrip casts +* `#25897 `__: API: Make descr->f only accessible through ``PyDataType_GetArrFuncs`` +* `#25900 `__: CI, MAINT: use ``fetch-tags: true`` to speed up NumPy checkouts +* `#25901 `__: BLD: Add meson check to test presence of pocketfft git submodule +* `#25902 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.2 to 3.0.3 +* `#25905 `__: CI: allow job matrixes to run all jobs even when one fails +* `#25911 `__: MAINT: remove ``numpy.array_api`` module +* `#25912 `__: MAINT: Bump actions/cache from 4.0.0 to 4.0.1 +* `#25914 `__: API: Remove broadcasting ambiguity from np.linalg.solve +* `#25915 `__: DOC: Fix some document build errors about rst markups +* `#25919 `__: BUG: Ensure non-array logspace base does not influence dtype... +* `#25920 `__: NEP: update status fields of many NEPs +* `#25921 `__: DOC: update and copy-edit 2.0.0 release notes +* `#25922 `__: BUG: fix handling of copy keyword argument when calling __array__ +* `#25924 `__: BUG: remove vestiges of array_api [wheel build] +* `#25928 `__: DOC: Add note about np.char & np.strings in 2.0 migration guide +* `#25929 `__: DOC: Add mention of complex number changes to migration guide +* `#25931 `__: BUG: fix reference leak in PyArray_FromArrayAttr_int +* `#25932 `__: TST: skip rather than xfail a few tests to address CI log pollution +* `#25933 `__: MAINT: ensure towncrier can be run >1x, and is included in ``spin``... +* `#25937 `__: DOC: 2.0 release highlights and compat notes changes +* `#25939 `__: DOC: Add entries of ``npy_datetime`` and ``npy_timedelta`` +* `#25943 `__: API: Restructure the dtype struct to be new dtype friendly +* `#25944 `__: BUG: avoid incorrect stringdtype allocator sharing from array... +* `#25945 `__: BLD: try to build most macOS wheels on GHA +* `#25946 `__: DOC: Add and fixup/move docs for descriptor changes +* `#25947 `__: DOC: Fix incorrect rst markups of c function directives +* `#25948 `__: MAINT: Introduce NPY_FEATURE_VERSION_STRING and report it in... +* `#25950 `__: BUG: Fix reference leak in niche user old user dtypes +* `#25952 `__: BLD: use hash for mamba action +* `#25954 `__: API: Expose ``PyArray_Pack`` +* `#25955 `__: API: revert position-only 'start' in 'np.arange' +* `#25956 `__: Draft: [BUG] Fix Polynomial representation tests +* `#25958 `__: BUG: avoid incorrect type punning in NpyString_acquire_allocators +* `#25961 `__: TST, MAINT: Loosen tolerance in fft test. +* `#25962 `__: DOC: fix typos and rearrange CI +* `#25965 `__: CI: fix wheel tags for Cirrus macOS arm64 +* `#25973 `__: DOC: Backport gh-25971 and gh-25972 +* `#25977 `__: REL: Prepare for the NumPy 2.0.0b1 release [wheel build] +* `#25983 `__: CI: fix last docbuild warnings +* `#25986 `__: BLD: push a tag builds a wheel +* `#25987 `__: REL: Prepare for the NumPy 2.0.0b1 release (2) [wheel build] +* `#25994 `__: DOC: remove reverted release blurb [skip actions][skip azp][skip... +* `#25996 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25997 `__: REL: Prepare for the NumPy 2.0.0b1 release (3) +* `#26008 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26009 `__: MAINT: Remove sdist task from pavement.py +* `#26022 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#26023 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26034 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26035 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26036 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26040 `__: BUG: Filter out broken Highway platform +* `#26041 `__: BLD: omit pp39-macosx_arm64 from matrix [wheel build] +* `#26042 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26047 `__: ENH: install StringDType promoter for add +* `#26048 `__: MAINT: avoid use of flexible array member in public header +* `#26049 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26050 `__: BUG: fix reference count leak in __array__ internals +* `#26051 `__: BUG: add missing error handling in string to int cast internals +* `#26052 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26053 `__: CI: clean up some unused ``choco install`` invocations +* `#26068 `__: DOC: Backport np.strings docstrings +* `#26073 `__: DOC clarifications on debugging numpy +* `#26074 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26075 `__: BUG: Allow the new string dtype summation to work +* `#26076 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26085 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26109 `__: BUG: adapt cython files to new complex declarations (#26080) +* `#26110 `__: TYP: Adjust ``np.random.integers`` and ``np.random.randint`` +* `#26111 `__: API: Require reduce promoters to start with None to match +* `#26118 `__: MAINT: install all-string promoter for multiply +* `#26122 `__: BUG: fix reference counting error in stringdtype setup +* `#26124 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26127 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26131 `__: MAINT: add missing noexcept clauses +* `#26154 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26167 `__: MAINT: Escalate import warning to an import error +* `#26169 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26170 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26171 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26173 `__: DOC, TST: make ``numpy.version`` officially public +* `#26186 `__: MAINT: Update Pyodide to 0.25.1 +* `#26192 `__: BUG: Infinite Loop in numpy.base_repr +* `#26193 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26194 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26205 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26231 `__: API: Readd np.bool_ typing stub +* `#26256 `__: MAINT: Update array-api-tests job +* `#26259 `__: DOC: Backport various documentation fixes +* `#26262 `__: BLD: update to OpenBLAS 0.3.27.0.1 +* `#26265 `__: MAINT: Fix some typos +* `#26272 `__: BUG: Fixes for ``np.vectorize``. +* `#26283 `__: DOC: correct PR referenced in __array_wraps__ change note +* `#26293 `__: BUG: Ensure seed sequences are restored through pickling (#26260) +* `#26297 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26305 `__: DOC: Bump pydata-sphinx-theme version +* `#26306 `__: MAINT: Robust string meson template substitution +* `#26307 `__: BLD: use newer openblas wheels [wheel build] +* `#26312 `__: DOC: Follow-up fixes for new theme +* `#26330 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26331 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26332 `__: BUG: use PyArray_SafeCast in array_astype +* `#26334 `__: MAINT: Disable compiler sanitizer tests on 2.0.x +* `#26351 `__: ENH: introduce a notion of "compatible" stringdtype instances... +* `#26357 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26358 `__: BUG: Fix rfft for even input length. +* `#26360 `__: MAINT: Simplify bugfix for even rfft +* `#26373 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26374 `__: ENH: add support for nan-like null strings in string replace +* `#26393 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26400 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26402 `__: DOC: Add missing methods to numpy.strings docs +* `#26403 `__: DOC: Fix links in random documentation. +* `#26417 `__: BUG: support nan-like null strings in [l,r]strip +* `#26423 `__: DOC: Fix some typos and incorrect markups +* `#26424 `__: DOC: add reference docs for NpyString C API +* `#26425 `__: REL: Prepare for the NumPy 2.0.0rc2 release [wheel build] +* `#26427 `__: TYP: Fix ``fromrecords`` type hint and bump mypy to 1.10.0. +* `#26457 `__: MAINT: Various CI fixes +* `#26458 `__: BUG: Use Python pickle protocol version 4 for np.save (#26388) +* `#26459 `__: BUG: fixes for three related stringdtype issues (#26436) +* `#26460 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26461 `__: BUG: int32 and intc should both appear in sctypes +* `#26482 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26527 `__: DOC: fix NEP 50 reference +* `#26536 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI... +* `#26539 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26540 `__: BLD: Make NumPy build reproducibly +* `#26541 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26543 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26544 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26554 `__: BUG: Fix in1d fast-path range +* `#26555 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26569 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26583 `__: BUG: Fix memory leaks found with valgrind +* `#26584 `__: MAINT: Unpin pydata-sphinx-theme +* `#26587 `__: DOC: Added web docs for missing ma and strings routines +* `#26591 `__: BUG: Fix memory leaks found by valgrind +* `#26592 `__: DOC: Various documentation updates +* `#26635 `__: DOC: update 2.0 docs +* `#26651 `__: DOC: Update 2.0 migration guide +* `#26652 `__: BUG: Disallow string inputs for copy keyword in np.array and... +* `#26653 `__: BUG: Fix F77 ! comment handling +* `#26654 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to... +* `#26657 `__: BUG: fix memory leaks found with valgrind (next) +* `#26659 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26673 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26674 `__: MNT: catch invalid fixed-width dtype sizes +* `#26677 `__: CI: Use default llvm on Windows. +* `#26694 `__: DOC: document workaround for deprecation of dim-2 inputs to `cross` +* `#26695 `__: BUG: Adds asanyarray to start of linalg.cross (#26667) +* `#26696 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26697 `__: BUG: Fix bug in numpy.pad() + diff --git a/doc/changelog/2.0.1-changelog.rst b/doc/changelog/2.0.1-changelog.rst new file mode 100644 index 000000000000..5a0b9dd207fc --- /dev/null +++ b/doc/changelog/2.0.1-changelog.rst @@ -0,0 +1,52 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/changelog/2.0.2-changelog.rst b/doc/changelog/2.0.2-changelog.rst new file mode 100644 index 000000000000..6622407dd8f6 --- /dev/null +++ b/doc/changelog/2.0.2-changelog.rst @@ -0,0 +1,45 @@ + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types diff --git a/doc/changelog/2.1.0-changelog.rst b/doc/changelog/2.1.0-changelog.rst new file mode 100644 index 000000000000..af7f5a3b07c7 --- /dev/null +++ b/doc/changelog/2.1.0-changelog.rst @@ -0,0 +1,592 @@ + +Contributors +============ + +A total of 110 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* !ogidig5 + +* !partev +* !vahidmech + +* !h-vetinari +* Aaron Meurer +* Adrin Jalali + +* Agriya Khetarpal +* Ajay Kumar Janapareddi + +* Alex Herbert + +* Andras Deak +* Andrej Zhilenkov + +* Andrew Nelson +* Anne Gunn + +* Antony Lee +* Arnaud Ma + +* Arun Kannawadi + +* Arun Pa + +* Bas van Beek +* Ben Woodruff + +* Bruno Oliveira + +* Carlos Henrique Hermanny Moreira da Silva + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christian Lorentzen +* Christopher Sidebottom +* Christopher Titchen + +* Clément Robert +* Cobalt Yang + +* Devyani Chavan + +* Dimitri Papadopoulos Orfanos +* Ebigide Jude + +* Eric Xie + +* Evgeni Burovski +* Fabian Vogt + +* Francisco Sousa + +* GUAN MING + +* Gabriel Fougeron + +* Gagandeep Singh +* Giovanni Del Monte + +* Gonzalo Tornaría + +* Gonçalo Bárias + +* Hugo van Kemenade +* Jakob Stevens Haas + +* Jakob Unfried + +* James Joseph Thomas + +* Jean Lecordier + +* Joren Hammudoglu + +* Joris Van den Bossche +* Julia Poo + +* Justus Magin +* Jyn Spring 琴春 +* KIU Shueng Chuan +* Karthik Gangula + +* Karthik Kaiplody + +* Kevin Sheppard +* Kristoffer Pedersen + +* Leo Singer +* Liang Yan +* Liangyu Zhang + +* Lucas Colley +* Luiz Eduardo Amaral + +* Lysandros Nikolaou +* Marcel Loose + +* Marten van Kerkwijk +* Mateusz Sokół +* Matt Haberland +* Matt Thompson + +* Matthew Roeschke + +* Matthew Thompson + +* Matthias Bussonnier +* Matti Picus +* Melissa Weber Mendonça +* Milica Dančuk + +* Moritz Schreiber + +* Nathan Goldbaum +* Olivier Grisel +* Patrick J. Roddy + +* Paul Juma Otieno + +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Raquel Braunschweig + +* Robert Kern +* Rohit Goswami +* Romain Geissler + +* Ross Barnowski +* Rostan Tabet + +* Sam Morley + +* Sayed Adel +* Sean Cheah +* Sebastian Berg +* Serge Guelton +* Slobodan + +* Stefan van der Walt +* Thomas A Caswell +* Thomas Li +* Timo Röhling + +* Tsvika Shapira + +* Tuhin Sharma + +* Tyler Reddy +* Victor Eijkhout + +* Warren Weckesser +* Warrick Ball +* Xiangyi Wang + +* Yair Chuchem + +* Yang Liu + +* Yannik Wicke + +* Yevhen Amelin + +* Yuki K + +Pull requests merged +==================== + +A total of 469 pull requests were merged for this release. + +* `#12150 `__: ENH: When histogramming data with integer dtype, force bin width... +* `#24448 `__: TST: add some tests of np.log for complex input. +* `#25704 `__: DOC: quantile: correct/simplify documentation +* `#25705 `__: DOC: Add documentation explaining our promotion rules +* `#25781 `__: ENH: Convert fp32 sin/cos from C universal intrinsics to C++... +* `#25908 `__: ENH: Add center/ljust/rjust/zfill ufuncs for unicode and bytes +* `#25913 `__: NEP: NEP 55 updates and add @mhvk as an author +* `#25963 `__: BUG: Fix bug in numpy.pad() +* `#25964 `__: CI: fix last docbuild warnings +* `#25970 `__: MAINT: Prepare main for NumPy 2.1.0 development +* `#25971 `__: DOC: Fix a note section markup in ``dtype.rst`` +* `#25972 `__: DOC: Fix module setting of ``MaskedArray`` +* `#25974 `__: BUG: Raise error for negative-sized fixed-width dtype +* `#25975 `__: BUG: Fixes np.put receiving empty array causes endless loop +* `#25981 `__: BLD: push a tag builds a wheel +* `#25985 `__: BLD: omit pp39-macosx_arm64 from matrix +* `#25988 `__: DOC: Remove unused parameter description +* `#25990 `__: CI: clean up some unused `choco install` invocations +* `#25995 `__: CI: don't use ``fetch-tags`` in wheel build jobs +* `#25999 `__: BUG: fix kwarg handling in assert_warn [skip cirrus][skip azp] +* `#26000 `__: BUG: Filter out broken Highway platform +* `#26003 `__: MAINT: Bump pypa/cibuildwheel from 2.16.5 to 2.17.0 +* `#26005 `__: DOC: indicate stringdtype support in docstrings for string operations +* `#26006 `__: TST: remove usage of ProcessPoolExecutor in stringdtype tests +* `#26007 `__: MAINT: Remove sdist task from pavement.py +* `#26011 `__: DOC: mention the ``exceptions`` namespace in the 2.0.0 release... +* `#26012 `__: ENH: install StringDType promoter for add +* `#26014 `__: MAINT: remove the now-unused ``NPY_NO_SIGNAL`` +* `#26015 `__: MAINT: remove now-unused ``NPY_USE_C99_FORMAT`` +* `#26016 `__: MAINT: handle ``NPY_ALLOW_THREADS`` and related build option... +* `#26017 `__: MAINT: avoid use of flexible array member in public header +* `#26024 `__: BUG: raise error trying to coerce object arrays containing timedelta64('NaT')... +* `#26025 `__: BUG: fix reference count leak in __array__ internals +* `#26027 `__: BUG: add missing error handling in string to int cast internals +* `#26033 `__: MAINT: Remove partition and split-like functions from numpy.strings +* `#26045 `__: ENH: Optimize np.power for integer type +* `#26055 `__: ENH: Optimize np.power(x, 2) for double and float type +* `#26063 `__: MAINT,API: Const qualify some new API (mostly new DType API) +* `#26064 `__: MAINT: Make PyArrayMultiIterObject struct "smaller" +* `#26066 `__: BUG: Allow the new string dtype summation to work +* `#26067 `__: DOC: note stringdtype output support in np.strings docstrings +* `#26070 `__: DOC clarifications on debugging numpy +* `#26071 `__: BUG: fix logic error in stringdtype maximum/minimum ufunc +* `#26080 `__: BUG: adapt cython files to new complex declarations +* `#26081 `__: TYP: Make array _ShapeType bound and covariant +* `#26082 `__: ENH: Add partition/rpartition ufunc for string dtypes +* `#26083 `__: MAINT: Bump actions/cache from 4.0.1 to 4.0.2 +* `#26089 `__: TYP: Adjust typing for ``np.random.integers`` and ``np.random.randint`` +* `#26090 `__: API: Require reduce promoters to start with None to match +* `#26095 `__: MAINT: Bump actions/dependency-review-action from 4.1.3 to 4.2.3 +* `#26097 `__: DOC: Mention ``copy=True`` for ``__array__`` method in the migration... +* `#26099 `__: DOC: fix typo in doc/source/user/absolute_beginners.rst +* `#26103 `__: API: Default to hidden visibility for API tables +* `#26105 `__: MAINT: install all-string promoter for multiply +* `#26108 `__: MAINT: Remove unnecessarily defensive code from dlpack deleter +* `#26112 `__: TST: fix incorrect dtype in test +* `#26113 `__: BLD: Do not use -O3 flag when building in debug mode +* `#26116 `__: ENH: inherit numerical dtypes from abstract ones. +* `#26119 `__: BUG: fix reference counting error in stringdtype setup +* `#26123 `__: BUG: update pocketfft to unconditionaly disable use of aligned_alloc +* `#26125 `__: DOC: Bump pydata-sphinx-theme version +* `#26128 `__: DOC: Update absolute_beginners.rst +* `#26129 `__: MAINT: add missing noexcept clauses +* `#26130 `__: ENH: Optimize performance of np.atleast_1d +* `#26133 `__: MAINT: Bump actions/dependency-review-action from 4.2.3 to 4.2.4 +* `#26134 `__: CI, BLD: Push NumPy's Emscripten/Pyodide wheels nightly to Anaconda.org... +* `#26135 `__: BUG: masked array division should ignore all FPEs in mask calculation +* `#26136 `__: BUG: fixed datetime64[ns] conversion issue in numpy.vectorize,... +* `#26138 `__: MAINT: Bump actions/setup-python from 5.0.0 to 5.1.0 +* `#26139 `__: MAINT: Bump actions/dependency-review-action from 4.2.4 to 4.2.5 +* `#26142 `__: BUG,MAINT: Fix __array__ bugs and simplify code +* `#26147 `__: BUG: introduce PyArray_SafeCast to fix issues around stringdtype... +* `#26149 `__: MAINT: Escalate import warning to an import error +* `#26151 `__: BUG: Fix test_impossible_feature_enable failing without BASELINE_FEAT +* `#26155 `__: NEP: add NEP 56 mailing list resolution +* `#26160 `__: ENH: Improve performance of np.broadcast_arrays and np.broadcast_shapes +* `#26162 `__: BUG: Infinite Loop in numpy.base_repr +* `#26168 `__: DOC: mention np.lib.NumPyVersion in the 2.0 migration guide +* `#26172 `__: DOC, TST: make ``numpy.version`` officially public +* `#26174 `__: MAINT: Fix failure in routines.version.rst +* `#26182 `__: DOC: Update absolute_beginners.rst +* `#26185 `__: MAINT: Update Pyodide to 0.25.1 +* `#26187 `__: TST: Use platform.machine() for improved portability on riscv64 +* `#26189 `__: MNT: use pythoncapi_compat.h in npy_compat.h +* `#26190 `__: BUG: fix reference counting error in wrapping_method_resolve_descriptors +* `#26207 `__: TST: account for immortal objects in test_iter_refcount +* `#26210 `__: API: Readd ``np.bool_`` typing stub +* `#26212 `__: BENCH: Add benchmarks for np.power(x,2) and np.power(x,0.5) +* `#26213 `__: MNT: try updating pythoncapi-compat +* `#26215 `__: API: Enforce one copy for ``__array__`` when ``copy=True`` +* `#26219 `__: ENH: Enable RVV CPU feature detection +* `#26222 `__: MAINT: Drop Python 3.9 +* `#26227 `__: MAINT: utilize ufunc API const correctness internally +* `#26229 `__: TST: skip limited API test on nogil python build +* `#26232 `__: MAINT: fix typo in _add_newdoc_ufunc docstring +* `#26235 `__: Update numpy.any documentation example +* `#26237 `__: MAINT: Update ``array-api-tests`` job +* `#26239 `__: DOC: add versionadded for copy keyword in np.asarray docstring +* `#26241 `__: DOC: Fixup intp/uintp documentation for ssize_t/size_t changes +* `#26245 `__: DOC: Update ``__array__`` ``copy`` keyword docs +* `#26246 `__: MNT: migrate PyList_GetItem usages to PyList_GetItemRef +* `#26248 `__: MAINT,BUG: Robust string meson template substitution +* `#26251 `__: MNT: disable the allocator cache for nogil builds +* `#26258 `__: BLD: update to OpenBLAS 0.3.27 +* `#26260 `__: BUG: Ensure seed sequences are restored through pickling +* `#26261 `__: ENH: introduce a notion of "compatible" stringdtype instances +* `#26263 `__: MAINT: fix typo +* `#26264 `__: MAINT: fix typo in #include example +* `#26267 `__: MAINT: Update URL in nep 0014 - domain change +* `#26268 `__: API: Disallow 0D input arrays in ``nonzero`` +* `#26270 `__: BUG: ensure np.vectorize doesn't truncate fixed-width strings +* `#26273 `__: ENH: Bump Highway to HEAD and remove platform filter +* `#26274 `__: BLD: use install-tags to optionally install tests +* `#26280 `__: ENH: Speedup clip for floating point +* `#26281 `__: BUG: Workaround for Intel Compiler mask conversion bug +* `#26282 `__: MNT: replace _PyDict_GetItemStringWithError with PyDict_GetItemStringRef +* `#26284 `__: TST: run the smoke tests on more python versions +* `#26285 `__: ENH: Decrease wall time of ``ma.cov`` and ``ma.corrcoef`` +* `#26286 `__: BLD: ensure libnpymath and highway static libs use hidden visibility +* `#26292 `__: API: Add ``shape`` and ``copy`` arguments to ``numpy.reshape`` +* `#26294 `__: MNT: disable the coercion cache for the nogil build +* `#26295 `__: CI: add llvm/clang sanitizer tests +* `#26299 `__: MAINT: Pin sphinx to version 7.2.6 +* `#26302 `__: BLD: use newer openblas wheels [wheel build] +* `#26303 `__: DOC: add explanation of dtype to parameter values for np.append +* `#26304 `__: MAINT: address improper error handling and cleanup for ``spin`` +* `#26309 `__: MAINT: Bump actions/upload-artifact from 4.3.1 to 4.3.2 +* `#26311 `__: DOC: Follow-up fixes for new theme +* `#26313 `__: MAINT: Cleanup ``vecdot``'s signature, typing, and importing +* `#26317 `__: BUG: use PyArray_SafeCast in array_astype +* `#26319 `__: BUG: fix spin bench not running on Windows +* `#26320 `__: DOC: Add replacement NEP links in superseded, replaced-by fields +* `#26322 `__: DOC: Documentation and examples for conversion of np.timedelta64... +* `#26324 `__: BUG: Fix invalid constructor in string_fastsearch.h with C++... +* `#26325 `__: TST: Skip Cython test for editable install +* `#26329 `__: MAINT: Bump actions/upload-artifact from 4.3.2 to 4.3.3 +* `#26338 `__: MAINT: update x86-simd-sort to latest +* `#26340 `__: DOC: Added small clarification note, based on discussion in issue... +* `#26347 `__: MAINT: Bump conda-incubator/setup-miniconda from 3.0.3 to 3.0.4 +* `#26348 `__: NOGIL: Make loop data cache and dispatch cache thread-safe in... +* `#26353 `__: BUG: ensure text padding ufuncs handle stringdtype nan-like nulls +* `#26354 `__: BUG: Fix rfft for even input length. +* `#26355 `__: ENH: add support for nan-like null strings in string replace +* `#26359 `__: MAINT: Simplify bugfix for even rfft +* `#26362 `__: MAINT: Bump actions/dependency-review-action from 4.2.5 to 4.3.1 +* `#26363 `__: MAINT: Bump actions/dependency-review-action from 4.3.1 to 4.3.2 +* `#26364 `__: TST: static types are now immortal in the default build too +* `#26368 `__: [NOGIL] thread local promotion state +* `#26369 `__: DOC: fix np.unique release notes [skip cirrus] +* `#26372 `__: BUG: Make sure that NumPy scalars are supported by can_cast +* `#26377 `__: TYP: Fix incorrect type hint for creating a recarray from fromrecords +* `#26378 `__: DOC: Update internal links for generator.rst and related +* `#26384 `__: BUG: Fix incorrect return type of item with length 0 from chararray.__getitem__ +* `#26385 `__: DOC: Updated remaining links in random folder +* `#26386 `__: DOC: Improve example on array broadcasting +* `#26388 `__: BUG: Use Python pickle protocol version 4 for np.save +* `#26391 `__: DOC: Add missing methods to numpy.strings docs +* `#26392 `__: BUG: support nan-like null strings in [l,r]strip +* `#26396 `__: MNT: more gracefully handle spin adding arguments to functions... +* `#26399 `__: DOC: Update INSTALL.rst +* `#26413 `__: DOC: Fix some typos and incorrect markups +* `#26415 `__: MAINT: updated instructions to get MachAr byte pattern +* `#26416 `__: MAINT: Bump ossf/scorecard-action from 2.3.1 to 2.3.3 +* `#26418 `__: DOC: add reference docs for NpyString C API +* `#26419 `__: MNT: clean up references to array_owned==2 case in StringDType +* `#26426 `__: TYP,TST: Bump mypy to 1.10.0 +* `#26428 `__: MAINT: Bump pypa/cibuildwheel from 2.17.0 to 2.18.0 +* `#26429 `__: TYP: npyio: loadtxt: usecols: add None type +* `#26431 `__: TST: skip test_frompyfunc_leaks in the free-threaded build +* `#26432 `__: MAINT: Add some PR prefixes to the labeler. +* `#26436 `__: BUG: fixes for three related stringdtype issues +* `#26441 `__: BUG: int32 and intc should both appear in sctypes +* `#26442 `__: DOC: Adding links to polynomial table. +* `#26443 `__: TST: temporarily pin spin to work around issue in 0.9 release +* `#26444 `__: DOC: Remove outdated authentication instructions +* `#26445 `__: TST: fix xfailed tests on pypy 7.3.16 +* `#26447 `__: TST: attempt to fix intel SDE SIMD CI +* `#26449 `__: MAINT: fix typo +* `#26452 `__: DEP: Deprecate 'fix_imports' flag in numpy.save +* `#26456 `__: ENH: improve the error raised by ``numpy.isdtype`` +* `#26463 `__: TST: add basic free-threaded CI testing +* `#26464 `__: BLD: update vendored-meson to current Meson master (1.4.99) +* `#26469 `__: MAINT: Bump github/codeql-action from 2.13.4 to 3.25.5 +* `#26471 `__: BLD: cp313 [wheel build] +* `#26474 `__: BLD: Make NumPy build reproducibly +* `#26476 `__: DOC: Skip API documentation for numpy.distutils with Python 3.12... +* `#26478 `__: DOC: Set default as ``-j 1`` for spin docs and move ``-W`` to SPHINXOPTS +* `#26480 `__: TYP: fix type annotation for ``newbyteorder`` +* `#26481 `__: Improve documentation of numpy.ma.filled +* `#26486 `__: MAINT: Bump github/codeql-action from 3.25.5 to 3.25.6 +* `#26487 `__: MAINT: Bump pypa/cibuildwheel from 2.18.0 to 2.18.1 +* `#26488 `__: DOC: add examples to get_printoptions +* `#26489 `__: DOC: add example to get_include +* `#26492 `__: DOC: fix rng.random example in numpy-for-matlab-users +* `#26501 `__: ENH: Implement DLPack version 1 +* `#26503 `__: TST: work around flaky test on free-threaded build +* `#26504 `__: DOC: Copy-edit numpy 2.0 migration guide. +* `#26505 `__: DOC: update the NumPy Roadmap +* `#26507 `__: MAINT: mark temp elision address cache as thread local +* `#26511 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26512 `__: CI: enable free-threaded wheel builds [wheel build] +* `#26514 `__: MAINT: Avoid gcc compiler warning +* `#26515 `__: MAINT: Fix GCC -Wmaybe-uninitialized warning +* `#26517 `__: DOC: Add missing functions to the migration guide +* `#26519 `__: MAINT: Avoid by-pointer parameter passing for LINEARIZE_DATA_t... +* `#26520 `__: BUG: Fix handling of size=() in Generator.choice when a.ndim... +* `#26524 `__: BUG: fix incorrect error handling for dtype('a') deprecation +* `#26526 `__: BUG: fix assert in PyArry_ConcatenateArrays with StringDType +* `#26529 `__: BUG: ``PyDataMem_SetHandler`` check capsule name +* `#26531 `__: BUG: Fix entry-point of Texinfo docs +* `#26534 `__: BUG: cast missing in PyPy-specific f2py code, pin spin in CI +* `#26537 `__: BUG: Fix F77 ! comment handling +* `#26538 `__: DOC: Update ``gradient`` docstrings +* `#26546 `__: MAINT: Remove redundant print from bug report issue template +* `#26548 `__: BUG: Fix typo in array-wrap code that lead to memory leak +* `#26550 `__: BUG: Make Polynomial evaluation adhere to nep 50 +* `#26552 `__: BUG: Fix in1d fast-path range +* `#26558 `__: BUG: fancy indexing copy +* `#26559 `__: BUG: fix setxor1d when input arrays aren't 1D +* `#26562 `__: MAINT: Bump mamba-org/setup-micromamba from 1.8.1 to 1.9.0 +* `#26563 `__: BUG: Fix memory leaks found with valgrind +* `#26564 `__: CI, BLD: Upgrade to Pyodide 0.26.0 for Emscripten/Pyodide CI... +* `#26566 `__: DOC: update ufunc tutorials to use setuptools +* `#26567 `__: BUG: fix memory leaks found with valgrind (next) +* `#26568 `__: MAINT: Unpin pydata-sphinx-theme +* `#26571 `__: DOC: Added web docs for missing ma and strings routines +* `#26572 `__: ENH: Add array API inspection functions +* `#26579 `__: ENH: Add unstack() +* `#26580 `__: ENH: Add copy and device keyword to np.asanyarray to match np.asarray +* `#26582 `__: BUG: weighted nanpercentile, nanquantile and multi-dim q +* `#26585 `__: MAINT: Bump github/codeql-action from 3.25.6 to 3.25.7 +* `#26586 `__: BUG: Fix memory leaks found by valgrind +* `#26589 `__: BUG: catch invalid fixed-width dtype sizes +* `#26594 `__: DOC: Update constants.rst: fix URL redirect +* `#26597 `__: ENH: Better error message for axis=None in ``np.put_along_axis``... +* `#26599 `__: ENH: use size-zero dtype for broadcast-shapes +* `#26602 `__: TST: Re-enable int8/uint8 einsum tests +* `#26603 `__: BUG: Disallow string inputs for ``copy`` keyword in ``np.array``... +* `#26604 `__: refguide-check with pytest as a runner +* `#26605 `__: DOC: fix typos in numpy v2.0 documentation +* `#26606 `__: DOC: Update randn() to use rng.standard_normal() +* `#26607 `__: MNT: Reorganize non-constant global statics into structs +* `#26609 `__: DOC: Updated notes and examples for np.insert. +* `#26610 `__: BUG: np.take handle 64-bit indices on 32-bit platforms +* `#26611 `__: MNT: Remove ``set_string_function`` +* `#26614 `__: MAINT: Bump github/codeql-action from 3.25.7 to 3.25.8 +* `#26619 `__: TST: Re-enable ``test_shift_all_bits`` on clang-cl +* `#26626 `__: DOC: add ``getbufsize`` example +* `#26627 `__: DOC: add ``setbufsize`` example +* `#26628 `__: DOC: add ``matrix_transpose`` example +* `#26629 `__: DOC: add ``unique_all`` example +* `#26630 `__: DOC: add ``unique_counts`` example +* `#26631 `__: DOC: add ``unique_inverse`` example +* `#26632 `__: DOC: add ``unique_values`` example +* `#26633 `__: DOC: fix ``matrix_transpose`` doctest +* `#26634 `__: BUG: Replace dots with underscores in f2py meson backend for... +* `#26636 `__: MAINT: Bump actions/dependency-review-action from 4.3.2 to 4.3.3 +* `#26637 `__: BUG: fix incorrect randomized parameterization in bench_linalg +* `#26638 `__: MNT: use reproducible RNG sequences in benchmarks +* `#26639 `__: MNT: more benchmark cleanup +* `#26641 `__: DOC: Update 2.0 migration guide +* `#26644 `__: DOC: Added clean_dirs to spin docs to remove generated folders +* `#26645 `__: DOC: Enable web docs for numpy.trapezoid and add back links +* `#26646 `__: DOC: Update docstring for invert function +* `#26655 `__: CI: modified CI job to test editable install +* `#26658 `__: MAINT: Bump pypa/cibuildwheel from 2.18.1 to 2.19.0 +* `#26662 `__: DOC: add CI and NEP commit acronyms +* `#26664 `__: CI: build and upload free-threaded nightly wheels for macOS +* `#26667 `__: BUG: Adds asanyarray to start of linalg.cross +* `#26670 `__: MAINT: Bump github/codeql-action from 3.25.8 to 3.25.9 +* `#26672 `__: CI: upgrade FreeBSD Cirrus job from FreeBSD 13.2 to 14.0 +* `#26675 `__: CI: Use default llvm on Windows. +* `#26676 `__: MAINT: mark evil_global_disable_warn_O4O8_flag as thread-local +* `#26679 `__: DOC: add ``np.linalg`` examples +* `#26680 `__: remove doctesting from refguide-check, add ``spin check-tutorials`` +* `#26684 `__: MAINT: Bump pypa/cibuildwheel from 2.19.0 to 2.19.1 +* `#26685 `__: MAINT: Bump github/codeql-action from 3.25.9 to 3.25.10 +* `#26686 `__: MAINT: Add comment lost in previous PR. +* `#26691 `__: BUILD: check for scipy-doctest, remove it from requirements +* `#26692 `__: DOC: document workaround for deprecation of dim-2 inputs to ``cross`` +* `#26693 `__: BUG: allow replacement in the dispatch cache +* `#26702 `__: DOC: Added missing See Also sections in Polynomial module +* `#26703 `__: BUG: Handle ``--f77flags`` and ``--f90flags`` for ``meson`` +* `#26706 `__: TST: Skip an f2py module test on Windows +* `#26714 `__: MAINT: Update main after 2.0.0 release. +* `#26716 `__: DOC: Add clarifications np.argpartition +* `#26717 `__: DOC: Mention more error paths and try to consolidate import errors +* `#26721 `__: DOC, MAINT: Turn on version warning banner provided by PyData... +* `#26722 `__: DOC: Update roadmap a bit more +* `#26724 `__: ENH: Add Array API 2023.12 version support +* `#26737 `__: DOC: Extend release notes for #26611 +* `#26739 `__: DOC: Update NEPs statuses +* `#26741 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26742 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26744 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26750 `__: ENH: Add locking to umath_linalg if no lapack is detected at... +* `#26760 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26762 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26766 `__: ENH: Support integer dtype inputs in rounding functions +* `#26769 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26770 `__: DOC, NEP: Update NEP44 +* `#26771 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26776 `__: BUG: remove numpy.f2py from excludedimports +* `#26780 `__: MAINT: use an atomic load/store and a mutex to initialize the... +* `#26788 `__: TYP: fix missing ``sys`` import in numeric.pyi +* `#26789 `__: BUG: avoid side-effect of 'include complex.h' +* `#26790 `__: DOC: Update link to Python stdlib random. +* `#26795 `__: BUG: add order to out array of ``numpy.fft`` +* `#26797 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26799 `__: MNT: Update dlpack docs and typing stubs +* `#26802 `__: Missing meson pass-through argument +* `#26805 `__: DOC: Update 2.0 migration guide and release note +* `#26808 `__: DOC: Change selected hardlinks to NEPs to intersphinx mappings +* `#26811 `__: DOC: update notes on sign for complex numbers +* `#26812 `__: CI,TST: Fix meson tests needing gfortran [wheel build] +* `#26813 `__: TST: fix 'spin test single_test' for future versions of spin +* `#26814 `__: DOC: Add ``>>> import numpy as np`` stubs everywhere +* `#26815 `__: MAINT: Bump github/codeql-action from 3.25.10 to 3.25.11 +* `#26826 `__: DOC: remove hack to override _add_newdocs_scalars +* `#26827 `__: DOC: AI-Gen examples ctypeslib.as_ctypes_types +* `#26828 `__: DOC: AI generated examples for ma.left_shift. +* `#26829 `__: DOC: AI-Gen examples for ma.put +* `#26830 `__: DOC: AI generated examples for ma.reshape +* `#26831 `__: DOC: AI generated examples for ma.correlate. +* `#26833 `__: MAINT: Bump pypa/cibuildwheel from 2.19.1 to 2.19.2 +* `#26841 `__: BENCH: Missing ufunc in benchmarks +* `#26842 `__: BUILD: clean out py2 stuff from npy_3kcompat.h +* `#26846 `__: MAINT: back printoptions with a true context variable +* `#26847 `__: TYP: fix ``ufunc`` method type annotations +* `#26848 `__: TYP: include the ``|`` prefix for ``dtype`` char codes +* `#26849 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26858 `__: TYP: Annotate type aliases as ``typing.TypeAlias`` +* `#26866 `__: MAINT: Bump actions/upload-artifact from 4.3.3 to 4.3.4 +* `#26867 `__: TYP,BUG: fix ``numpy.__dir__`` annotations +* `#26871 `__: TYP: adopt ``typing.LiteralString`` and use more of ``typing.Literal`` +* `#26872 `__: TYP: use ``types.CapsuleType`` on python>=3.13 +* `#26873 `__: TYP: improved ``numpy._array_api_info`` typing +* `#26875 `__: TYP,BUG: Replace ``numpy._typing._UnknownType`` with ``typing.Never`` +* `#26877 `__: BUG: start applying ruff/flake8-implicit-str-concat rules (ISC) +* `#26879 `__: MAINT: start applying ruff/flake8-simplify rules (SIM) +* `#26880 `__: DOC: Fix small incorrect markup +* `#26881 `__: DOC, MAINT: fix typos found by codespell +* `#26882 `__: MAINT: start applying ruff/pyupgrade rules (UP) +* `#26883 `__: BUG: Make issctype always return bool. +* `#26884 `__: MAINT: Remove a redundant import from the generated __ufunc_api.h. +* `#26889 `__: API: Add ``device`` and ``to_device`` to scalars +* `#26891 `__: DOC: Add a note that one should free the proto struct +* `#26892 `__: ENH: Allow use of clip with Python integers to always succeed +* `#26894 `__: MAINT: Bump actions/setup-node from 4.0.2 to 4.0.3 +* `#26895 `__: DOC: Change documentation copyright strings to use a dynamic... +* `#26896 `__: DOC: Change NEP hardlinks to intersphinx mappings. +* `#26897 `__: TYP: type hint ``numpy.polynomial`` +* `#26901 `__: BUG: ``np.loadtxt`` return F_CONTIGUOUS ndarray if row size is... +* `#26902 `__: Apply some ruff/flake8-bugbear rules (B004 and B005) +* `#26903 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26904 `__: BUG,ENH: Fix generic scalar infinite recursion issues +* `#26905 `__: API: Do not consider subclasses for NEP 50 weak promotion +* `#26906 `__: MAINT: Bump actions/setup-python from 5.1.0 to 5.1.1 +* `#26908 `__: ENH: Provide a hook for gufuncs to process core dimensions. +* `#26913 `__: MAINT: declare that NumPy's C extensions support running without... +* `#26914 `__: API: Partially revert unique with return_inverse +* `#26919 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26923 `__: MAINT: Bump actions/dependency-review-action from 4.3.3 to 4.3.4 +* `#26924 `__: MAINT: Bump github/codeql-action from 3.25.11 to 3.25.12 +* `#26927 `__: TYP: Transparent ``__array__`` shape-type +* `#26928 `__: TYP: Covariant ``numpy.flatiter`` type parameter +* `#26929 `__: TYP: Positional-only dunder binop method parameters +* `#26930 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26931 `__: DOC, BUG: Fix running full test command in docstring +* `#26934 `__: MAINT: add PyArray_ZeroContiguousBuffer helper and use it in... +* `#26935 `__: BUG: fix ``f2py`` tests to work with v2 API +* `#26937 `__: TYP,BUG: Remove ``numpy.cast`` and ``numpy.disp`` from the typing... +* `#26938 `__: TYP,BUG: Fix ``dtype`` type alias specialization issue in ``__init__.pyi`` +* `#26942 `__: TYP: Improved ``numpy.generic`` rich comparison operator type... +* `#26943 `__: TYP,BUG: Remove non-existant ``numpy.__git_version__`` in the... +* `#26946 `__: TYP: Add missing typecodes in ``numpy._core.numerictypes.typecodes`` +* `#26950 `__: MAINT: add freethreading_compatible directive to cython build +* `#26953 `__: TYP: Replace ``typing.Union`` with ``|`` in ``numpy._typing`` +* `#26954 `__: TYP: Replace ``typing.Optional[T]`` with ``T | None`` in the... +* `#26964 `__: DOC: Issue template for static typing +* `#26968 `__: MAINT: add a 'tests' install tag to the `numpy._core._simd` extension... +* `#26969 `__: BUG: Fix unicode strip +* `#26972 `__: BUG: Off by one in memory overlap check +* `#26975 `__: TYP: Use ``Final`` and ``LiteralString`` for the constants in... +* `#26980 `__: DOC: add sphinx-copybutton +* `#26981 `__: ENH: add support in f2py to declare gil-disabled support +* `#26983 `__: TYP,BUG: Type annotations for ``numpy.trapezoid`` +* `#26984 `__: TYP,BUG: Fix potentially unresolved typevar in ``median`` and... +* `#26985 `__: BUG: Add object cast to avoid warning with limited API +* `#26989 `__: DOC: fix ctypes example +* `#26991 `__: MAINT: mark scipy-openblas nightly tests as allowed to fail +* `#26992 `__: TYP: Covariant ``numpy.ndenumerate`` type parameter +* `#26993 `__: TYP,BUG: FIx ``numpy.ndenumerate`` annotations for ``object_``... +* `#26996 `__: ENH: Add ``__slots__`` to private (sub-)classes in ``numpy.lib._index_tricks_impl`` +* `#27002 `__: MAINT: Update main after 2.0.1 release. +* `#27008 `__: TYP,BUG: Complete type stubs for ``numpy.dtypes`` +* `#27009 `__: TST, MAINT: Loosen required test precision +* `#27010 `__: DOC: update tutorials link +* `#27011 `__: MAINT: replace PyThread_type_lock with PyMutex on Python >= 3.13.0b3 +* `#27013 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27014 `__: BUG: fix gcd inf +* `#27015 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27017 `__: DOC: Release note for feature added in gh-26908. +* `#27019 `__: TYP: improved ``numpy.array`` type hints for array-like input +* `#27025 `__: DOC: Replace np.matrix in .view() docstring example. +* `#27026 `__: DOC: fix tiny typo +* `#27027 `__: BUG: Fix simd loadable stride logic +* `#27031 `__: DOC: document 'floatmode' and 'legacy' keys from np.get_printoptions'... +* `#27034 `__: BUG: random: Fix edge case of Johnk's algorithm for the beta... +* `#27041 `__: MAINT: Bump github/codeql-action from 3.25.12 to 3.25.14 +* `#27043 `__: CI: unify free-threaded wheel builds with other builds +* `#27046 `__: BUG: random: prevent zipf from hanging when parameter is large. +* `#27047 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27048 `__: BUG: random: Fix long delays/hangs with zipf(a) when a near 1. +* `#27050 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27051 `__: TST: Refactor to consistently use CompilerChecker +* `#27052 `__: TST: fix issues with tests that use numpy.testing.extbuild +* `#27055 `__: MAINT: Bump ossf/scorecard-action from 2.3.3 to 2.4.0 +* `#27056 `__: MAINT: Bump github/codeql-action from 3.25.14 to 3.25.15 +* `#27057 `__: BUG: fix another cast setup in array_assign_subscript +* `#27058 `__: DOC: Add some missing examples for ``np.strings`` methods +* `#27059 `__: ENH: Disable name suggestions on some AttributeErrors +* `#27060 `__: MAINT: linalg: Simplify some linalg gufuncs. +* `#27070 `__: BUG: Bump Highway to latest master +* `#27076 `__: DEP: lib: Deprecate acceptance of float (and more) in bincount. +* `#27079 `__: MAINT: 3.9/10 cleanups +* `#27081 `__: CI: Upgrade ``array-api-tests`` +* `#27085 `__: ENH: fixes for warnings on free-threaded wheel builds +* `#27087 `__: ENH: mark the dragon4 scratch space as thread-local +* `#27090 `__: DOC: update np.shares_memory() docs +* `#27091 `__: API,BUG: Fix copyto (and ufunc) handling of scalar cast safety +* `#27094 `__: DOC: Add release note about deprecation introduced in gh-27076. +* `#27095 `__: DOC: Fix indentation of a few release notes. +* `#27096 `__: BUG: Complex printing tests fail on Windows ARM64 +* `#27097 `__: MAINT: Bump actions/upload-artifact from 4.3.4 to 4.3.5 +* `#27098 `__: BUG: add missing error handling in public_dtype_api.c +* `#27102 `__: DOC: Fixup promotion doc +* `#27104 `__: BUG: Fix building NumPy in FIPS mode +* `#27108 `__: DOC: remove incorrect docstring comment +* `#27110 `__: BLD: cp313 cp313t linux_aarch64 [wheel build] +* `#27112 `__: BUG: Fix repr for integer scalar subclasses +* `#27113 `__: DEV: make linter.py runnable from outside the root of the repo +* `#27114 `__: MAINT: Bump pypa/cibuildwheel from 2.19.2 to 2.20.0 +* `#27115 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27117 `__: BUG: Ensure that scalar binops prioritize __array_ufunc__ +* `#27118 `__: BLD: update vendored Meson for cross-compilation patches +* `#27123 `__: BUG: Bump Highway to latest +* `#27124 `__: MAINT: Bump github/codeql-action from 3.25.15 to 3.26.0 +* `#27125 `__: MAINT: Bump actions/upload-artifact from 4.3.5 to 4.3.6 +* `#27127 `__: BUG: Fix missing error return in copyto +* `#27144 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27149 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27162 `__: BLD: use smaller scipy-openblas builds +* `#27166 `__: ENH: fix thread-unsafe C API usages +* `#27173 `__: MAINT: Bump pythoncapi-compat version. +* `#27176 `__: REL: Prepare for the NumPy 2.1.0rc1 release [wheel build] +* `#27180 `__: DOC: Add release notes for #26897 +* `#27181 `__: DOC: Add release notes for #27008 +* `#27190 `__: BUILD: use a shrunken version of scipy-openblas wheels [wheel... +* `#27193 `__: REV: Revert undef I and document it +* `#27196 `__: BUILD: improve download script +* `#27197 `__: MAINT: update default NPY_FEATURE_VERSION after dropping py39 +* `#27200 `__: DOC: add free-threading release notes +* `#27209 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27216 `__: TYP: Fixed & improved type hints for ``numpy.histogram2d`` +* `#27217 `__: TYP: Fix incompatible overrides in the ``numpy._typing._ufunc``... +* `#27229 `__: BUG: Fix ``PyArray_ZeroContiguousBuffer`` (resize) with struct... +* `#27233 `__: DOC: add docs on thread safety in NumPy +* `#27234 `__: BUG: Allow fitting of degree zero polynomials with Polynomial.fit diff --git a/doc/changelog/2.1.1-changelog.rst b/doc/changelog/2.1.1-changelog.rst new file mode 100644 index 000000000000..d18636771e1a --- /dev/null +++ b/doc/changelog/2.1.1-changelog.rst @@ -0,0 +1,30 @@ + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines diff --git a/doc/changelog/2.1.2-changelog.rst b/doc/changelog/2.1.2-changelog.rst new file mode 100644 index 000000000000..bd0f7bd2422c --- /dev/null +++ b/doc/changelog/2.1.2-changelog.rst @@ -0,0 +1,38 @@ + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/changelog/2.1.3-changelog.rst b/doc/changelog/2.1.3-changelog.rst new file mode 100644 index 000000000000..073bd002e7ca --- /dev/null +++ b/doc/changelog/2.1.3-changelog.rst @@ -0,0 +1,49 @@ + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters diff --git a/doc/neps/_static/nep-0055-arena-string-memory-layout.svg b/doc/neps/_static/nep-0055-arena-string-memory-layout.svg index 44a2bbb8d5ce..03b1c560df93 100644 --- a/doc/neps/_static/nep-0055-arena-string-memory-layout.svg +++ b/doc/neps/_static/nep-0055-arena-string-memory-layout.svg @@ -1,2004 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +
offset 
(size_t)
offset...

(7 byte uint)
(7 byte uint)...
4C
4C
09
09
0
0
0
0
0
0
0
0
0
0
0
0
0
0
1C
1C
0
0
0
0
0
0
0
0
0
0
80
80
flags
(char)
flags...
Arena-allocated String
Arena-allocated String
1
1
...
...
2380
2380
2381
2381
2382
2382
2383
2383
'N'
'N'
'u'
'u'
'm'
'm'
'p'
'p'
1F
1F
'e'
'e'
2379
2379
1C
1C
0
0
0
0
0
0
0
0
0
0
1
1
0
0
0
0
Arena
Allocator
Arena...
0
0
...
...
size_and_flags
(size_t)
size_and_flags...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/doc/neps/_static/nep-0055-heap-string-memory-layout.svg b/doc/neps/_static/nep-0055-heap-string-memory-layout.svg index 05813deeb0e7..97e97f41ea66 100644 --- a/doc/neps/_static/nep-0055-heap-string-memory-layout.svg +++ b/doc/neps/_static/nep-0055-heap-string-memory-layout.svg @@ -1,1992 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +
offset 
(uintptr_t)
offset...
D3
D3
D3
D3
D3
D3
0
0
0
0
D3
D3
04
04
0
0
0
0
1C
1C
0
0
0
0
0
0
0
0
0
0
Heap-allocated String
Heap-allocated String
PyMem Raw
Allocator Domain
PyMem Raw...
'N'
'N'
'u'
'u'
'm'
'm'
'p'
'p'
'y'
'y'
''
''
'i'
'i'
's'
's'
''
''
0
0
1
1
2
2
3
3
4
4
5
5
6
6
7
7
8
8
70
70
flags
(char)
flags...
0
0
0
0
0
0
0
0
0
0
1
1
1
1
1
1

(7 byte uint)
(7 byte uint)...
size_and_flags
(size_t)
size_and_flags...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/doc/neps/_static/nep-0055-short-string-memory-layout.svg b/doc/neps/_static/nep-0055-short-string-memory-layout.svg index 1a35f59b31e6..973e69b96e8e 100644 --- a/doc/neps/_static/nep-0055-short-string-memory-layout.svg +++ b/doc/neps/_static/nep-0055-short-string-memory-layout.svg @@ -1,1381 +1,4 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + +
direct_buffer
(char[15])
direct_buf...
'H'
'H'
'e'
'e'
'l'
'l'
'l'
'l'
'o'
'o'
''
''
'w'
'w'
'o'
'o'
'r'
'r'
'l'
'l'
'd'
'd'
''
''
''
''
''
''
''
''
6b
6b
Short String
Short String
flags
(word)
flags...
size
(4-bit uint)
size...
1
1
0
0
1
1
0
0
1
1
1
1
0
0
1
1
size_and_flags
(char)
size_and_flag...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/doc/neps/conf.py b/doc/neps/conf.py index 6cf97ddfe59f..ea8b5755d340 100644 --- a/doc/neps/conf.py +++ b/doc/neps/conf.py @@ -16,6 +16,7 @@ # documentation root, use os.path.abspath to make it absolute, like shown here. # import os +from datetime import datetime # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -48,7 +49,8 @@ # General information about the project. project = 'NumPy Enhancement Proposals' -copyright = '2017-2018, NumPy Developers' +year = datetime.now().year +copyright = f'2017-{year}, NumPy Developers' author = 'NumPy Developers' title = 'NumPy Enhancement Proposals Documentation' diff --git a/doc/neps/content.rst b/doc/neps/content.rst index a188deae2ab2..a6e9dace9853 100644 --- a/doc/neps/content.rst +++ b/doc/neps/content.rst @@ -16,10 +16,7 @@ Roadmap Index The Scope of NumPy Current roadmap - Wishlist (opens new window) |wishlist_link| + Wish list -.. |wishlist_link| raw:: html - - WishList diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst index 7b73b4741f27..cd6dfd4941e7 100644 --- a/doc/neps/nep-0000.rst +++ b/doc/neps/nep-0000.rst @@ -146,7 +146,8 @@ thread in the mailing list archives. NEPs can also be ``Superseded`` by a different NEP, rendering the original obsolete. The ``Replaced-By`` and ``Replaces`` headers -should be added to the original and new NEPs respectively. +containing references to the original and new NEPs, like +``:ref:`NEP#number``` should be added respectively. Process NEPs may also have a status of ``Active`` if they are never meant to be completed, e.g. NEP 0 (this NEP). diff --git a/doc/neps/nep-0014-dropping-python2.7-proposal.rst b/doc/neps/nep-0014-dropping-python2.7-proposal.rst index e14a173e2032..e08c3caf0ddc 100644 --- a/doc/neps/nep-0014-dropping-python2.7-proposal.rst +++ b/doc/neps/nep-0014-dropping-python2.7-proposal.rst @@ -52,6 +52,6 @@ to Python3 only, see the python3-statement_. For more information on porting your code to run on Python 3, see the python3-howto_. -.. _python3-statement: https://python3statement.org/ +.. _python3-statement: https://python3statement.github.io/ .. _python3-howto: https://docs.python.org/3/howto/pyporting.html diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst index a1682435272f..8eec748e3be1 100644 --- a/doc/neps/nep-0018-array-function-protocol.rst +++ b/doc/neps/nep-0018-array-function-protocol.rst @@ -141,7 +141,7 @@ The type of ``types`` is intentionally vague: instead for performance reasons. In any case, ``__array_function__`` implementations should not rely on the iteration order of ``types``, which would violate a well-defined "Type casting hierarchy" (as described in -`NEP-13 `_). +:ref:`NEP-13 `). Example for a project implementing the NumPy API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -300,7 +300,7 @@ are valid then which has precedence? For the most part, the rules for dispatch with ``__array_function__`` match those for ``__array_ufunc__`` (see -`NEP-13 `_). +:ref:`NEP-13 `). In particular: - NumPy will gather implementations of ``__array_function__`` from all @@ -819,7 +819,7 @@ don't think it makes sense to do so now, because code generation involves tradeoffs and NumPy's experience with type annotations is still `quite limited `_. Even if NumPy was Python 3 only (which will happen -`sometime in 2019 `_), +:ref:`sometime in 2019 `), we aren't ready to annotate NumPy's codebase directly yet. Support for implementation-specific arguments diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst index 358e280bd080..7fb8c9734900 100644 --- a/doc/neps/nep-0030-duck-array-protocol.rst +++ b/doc/neps/nep-0030-duck-array-protocol.rst @@ -7,7 +7,7 @@ NEP 30 — Duck typing for NumPy arrays - implementation :Author: Peter Andreas Entschev :Author: Stephan Hoyer :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2019-07-31 :Updated: 2019-07-31 @@ -176,7 +176,7 @@ Previous proposals and discussion --------------------------------- The duck typing protocol proposed here was described in a high level in -`NEP 22 `_. +:ref:`NEP 22 `. Additionally, longer discussions about the protocol and related proposals took place in diff --git a/doc/neps/nep-0031-uarray.rst b/doc/neps/nep-0031-uarray.rst index 3a2354bfe3ff..cb906248fde6 100644 --- a/doc/neps/nep-0031-uarray.rst +++ b/doc/neps/nep-0031-uarray.rst @@ -8,7 +8,7 @@ NEP 31 — Context-local and global overrides of the NumPy API :Author: Ralf Gommers :Author: Peter Bell :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2019-08-22 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ @@ -319,7 +319,7 @@ It has been formally realized (at least in part) that a backend system is needed for this, in the `NumPy roadmap `_. For ``numpy.random``, it's still necessary to make the C-API fit the one -proposed in `NEP-19 `_. +proposed in :ref:`NEP-19 `. This is impossible for `mkl-random`, because then it would need to be rewritten to fit that framework. The guarantees on stream compatibility will be the same as before, but if there's a backend that affects @@ -620,8 +620,8 @@ Discussion ---------- * ``uarray`` blogpost: https://labs.quansight.org/blog/2019/07/uarray-update-api-changes-overhead-and-comparison-to-__array_function__/ -* The discussion section of NEP-18: https://numpy.org/neps/nep-0018-array-function-protocol.html#discussion -* NEP-22: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +* The discussion section of :ref:`NEP18` +* :ref:`NEP22` * Dask issue #4462: https://github.com/dask/dask/issues/4462 * PR #13046: https://github.com/numpy/numpy/pull/13046 * Dask issue #4883: https://github.com/dask/dask/issues/4883 @@ -636,11 +636,11 @@ References and footnotes .. [1] uarray, A general dispatch mechanism for Python: https://uarray.readthedocs.io -.. [2] NEP 18 — A dispatch mechanism for NumPy’s high level array functions: https://numpy.org/neps/nep-0018-array-function-protocol.html +.. [2] :ref:`NEP18` -.. [3] NEP 22 — Duck typing for NumPy arrays – high level overview: https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html +.. [3] :ref:`NEP22` -.. [4] NEP 13 — A Mechanism for Overriding Ufuncs: https://numpy.org/neps/nep-0013-ufunc-overrides.html +.. [4] :ref:`NEP13` .. [5] Reply to Adding to the non-dispatched implementation of NumPy methods: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/5GUDMALWDIRHITG5YUOCV343J66QSX3U/#5GUDMALWDIRHITG5YUOCV343J66QSX3U @@ -650,7 +650,7 @@ References and footnotes .. [8] unumpy: NumPy, but implementation-independent: https://unumpy.readthedocs.io -.. [9] NEP 30 — Duck Typing for NumPy Arrays - Implementation: https://www.numpy.org/neps/nep-0030-duck-array-protocol.html +.. [9] :ref:`NEP30` .. [10] http://scipy.github.io/devdocs/fft.html#backend-control diff --git a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst index 63593277dd9a..09a376298245 100644 --- a/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst +++ b/doc/neps/nep-0035-array-creation-dispatch-with-array-function.rst @@ -430,17 +430,17 @@ Discussion References ---------- -.. [1] `NEP 18 - A dispatch mechanism for NumPy's high level array functions `_. +.. [1] :ref:`NEP18`. .. [2] `PEP 3102 — Keyword-Only Arguments `_. -.. [3] `NEP 30 — Duck Typing for NumPy Arrays - Implementation `_. +.. [3] :ref:`NEP30`. -.. [4] `NEP 31 — Context-local and global overrides of the NumPy API `_. +.. [4] :ref:`NEP31`. .. [5] `Array creation routines `_. -.. [6] `NEP 37 — A dispatch protocol for NumPy-like modules `_. +.. [6] :ref:`NEP37`. .. [7] `Implementation's pull request on GitHub `_ diff --git a/doc/neps/nep-0036-fair-play.rst b/doc/neps/nep-0036-fair-play.rst index 5d55c8aa25d5..022bf9435513 100644 --- a/doc/neps/nep-0036-fair-play.rst +++ b/doc/neps/nep-0036-fair-play.rst @@ -121,10 +121,8 @@ Fair play rules 4. *DO* use official mechanism to engage with the API. - Protocols such as `__array_ufunc__ - `__ and - `__array_function__ - `__ + Protocols such as :ref:`__array_ufunc__ ` and + :ref:`__array_function__ ` were designed to help external packages interact more easily with NumPy. E.g., the latter allows objects from foreign libraries to pass through NumPy. We actively encourage using any of diff --git a/doc/neps/nep-0037-array-module.rst b/doc/neps/nep-0037-array-module.rst index 248f1c79fd78..7777cc73c2a6 100644 --- a/doc/neps/nep-0037-array-module.rst +++ b/doc/neps/nep-0037-array-module.rst @@ -8,7 +8,7 @@ NEP 37 — A dispatch protocol for NumPy-like modules :Author: Hameer Abbasi :Author: Sebastian Berg :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2019-12-29 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ @@ -29,7 +29,7 @@ expect will make it easier to adopt. Why ``__array_function__`` hasn't been enough --------------------------------------------- -There are two broad ways in which NEP-18 has fallen short of its goals: +There are two broad ways in which :ref:`NEP-18 ` has fallen short of its goals: 1. **Backwards compatibility concerns**. `__array_function__` has significant implications for libraries that use it: @@ -64,7 +64,7 @@ There are two broad ways in which NEP-18 has fallen short of its goals: - **Array creation** routines (e.g., ``np.arange`` and those in ``np.random``) need some other mechanism for indicating what type of - arrays to create. `NEP 35 `_ + arrays to create. :ref:`NEP 35 ` proposed adding optional ``like=`` arguments to functions without existing array arguments. However, we still lack any mechanism to override methods on objects, such as those needed by @@ -72,8 +72,7 @@ There are two broad ways in which NEP-18 has fallen short of its goals: - **Array conversion** can't reuse the existing coercion functions like ``np.asarray``, because ``np.asarray`` sometimes means "convert to an exact ``np.ndarray``" and other times means "convert to something _like_ - a NumPy array." This led to the `NEP 30 - `_ proposal for + a NumPy array." This led to the :ref:`NEP 30 ` proposal for a separate ``np.duckarray`` function, but this still does not resolve how to cast one duck array into a type matching another duck array. @@ -144,8 +143,8 @@ we can simply pull out the appropriate submodule: noise = module.random.randn(*array.shape) return array + noise -We can also write the duck-array ``stack`` function from `NEP 30 -`_, without the need +We can also write the duck-array ``stack`` function from +:ref:`NEP 30 `, without the need for a new ``np.duckarray`` function: .. code:: python diff --git a/doc/neps/nep-0044-restructuring-numpy-docs.rst b/doc/neps/nep-0044-restructuring-numpy-docs.rst index 9c4721664fd4..d1a1a0827ad7 100644 --- a/doc/neps/nep-0044-restructuring-numpy-docs.rst +++ b/doc/neps/nep-0044-restructuring-numpy-docs.rst @@ -86,7 +86,8 @@ up-to-date official documentation that can be easily updated. Status and ideas of each type of doc content -------------------------------------------- -**Reference guide** +Reference guide +^^^^^^^^^^^^^^^ NumPy has a quite complete reference guide. All functions are documented, most have examples, and most are cross-linked well with *See Also* sections. Further @@ -94,7 +95,8 @@ improving the reference guide is incremental work that can be done (and is being done) by many people. There are, however, many explanations in the reference guide. These can be moved to a more dedicated Explanations section on the docs. -**How-to guides** +How-to guides +^^^^^^^^^^^^^ NumPy does not have many how-to's. The subclassing and array ducktyping section may be an example of a how-to. Others that could be added are: @@ -106,7 +108,8 @@ may be an example of a how-to. Others that could be added are: - Performance (memory layout, profiling, use with Numba, Cython, or Pythran) - Writing generic code that works with NumPy, Dask, CuPy, pydata/sparse, etc. -**Explanations** +Explanations +^^^^^^^^^^^^ There is a reasonable amount of content on fundamental NumPy concepts such as indexing, vectorization, broadcasting, (g)ufuncs, and dtypes. This could be @@ -114,7 +117,7 @@ organized better and clarified to ensure it's really about explaining the concep and not mixed with tutorial or how-to like content. There are few explanations about anything other than those fundamental NumPy -concepts. +concepts. Some examples of concepts that could be expanded: @@ -125,7 +128,8 @@ Some examples of concepts that could be expanded: In addition, there are many explanations in the Reference Guide, which should be moved to this new dedicated Explanations section. -**Tutorials** +Tutorials +^^^^^^^^^ There's a lot of scope for writing better tutorials. We have a new *NumPy for absolute beginners tutorial* [3]_ (GSoD project of Anne Bonner). In addition we @@ -154,19 +158,15 @@ propose a *How to write a tutorial* document, which would help users contribute new high-quality content to the documentation. Data sets ---------- +~~~~~~~~~ Using interesting data in the NumPy docs requires giving all users access to that data, either inside NumPy or in a separate package. The former is not the best idea, since it's hard to do without increasing the size of NumPy -significantly. Even for SciPy there has so far been no consensus on this (see -`scipy PR 8707 `_ on adding a new -``scipy.datasets`` subpackage). - -So we'll aim for a new (pure Python) package, named ``numpy-datasets`` or -``scipy-datasets`` or something similar. That package can take some lessons from -how, e.g., scikit-learn ships data sets. Small data sets can be included in the -repo, large data sets can be accessed via a downloader class or function. +significantly. + +Whenever possible, documentation pages should use examples from the +:mod:`scipy.datasets` package. Related work ============ diff --git a/doc/neps/nep-0047-array-api-standard.rst b/doc/neps/nep-0047-array-api-standard.rst index 495d823f79bc..78191eabdbd3 100644 --- a/doc/neps/nep-0047-array-api-standard.rst +++ b/doc/neps/nep-0047-array-api-standard.rst @@ -8,7 +8,7 @@ NEP 47 — Adopting the array API standard :Author: Stephan Hoyer :Author: Aaron Meurer :Status: Superseded -:Replaced-By: 56 +:Replaced-By: :ref:`NEP56` :Type: Standards Track :Created: 2021-01-21 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ diff --git a/doc/neps/nep-0050-scalar-promotion.rst b/doc/neps/nep-0050-scalar-promotion.rst index ec330ef03300..fc161ef9629f 100644 --- a/doc/neps/nep-0050-scalar-promotion.rst +++ b/doc/neps/nep-0050-scalar-promotion.rst @@ -191,7 +191,7 @@ arrays that are not 0-D, such as ``array([2])``. * - ``array([1], uint8) + int64(1)`` or ``array([1], uint8) + array(1, int64)`` - - ``array([2], unit8)`` + - ``array([2], uint8)`` - ``array([2], int64)`` [T2]_ * - ``array([1.], float32) + float64(1.)`` or diff --git a/doc/neps/nep-0052-python-api-cleanup.rst b/doc/neps/nep-0052-python-api-cleanup.rst index a161dbd91b8f..870877a91bf6 100644 --- a/doc/neps/nep-0052-python-api-cleanup.rst +++ b/doc/neps/nep-0052-python-api-cleanup.rst @@ -8,7 +8,7 @@ NEP 52 — Python API cleanup for NumPy 2.0 :Author: Stéfan van der Walt :Author: Nathan Goldbaum :Author: Mateusz Sokół -:Status: Accepted +:Status: Final :Type: Standards Track :Created: 2023-03-28 :Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/thread/QLMPFTWA67DXE3JCUQT2RIRLQ44INS4F/ diff --git a/doc/neps/nep-0054-simd-cpp-highway.rst b/doc/neps/nep-0054-simd-cpp-highway.rst index f06de05ca036..53f1816c4428 100644 --- a/doc/neps/nep-0054-simd-cpp-highway.rst +++ b/doc/neps/nep-0054-simd-cpp-highway.rst @@ -17,7 +17,7 @@ Abstract We are moving the SIMD intrinsic framework, Universal Intrinsics, from C to C++. We have also moved to Meson as the build system. The Google Highway intrinsics project is proposing we use Highway instead of our Universal -Intrinsics as described in `NEP 38`_. This is a complex and multi-faceted +Intrinsics as described in :ref:`NEP 38 `. This is a complex and multi-faceted decision - this NEP is an attempt to describe the trade-offs involved and what would need to be done. @@ -350,7 +350,6 @@ References and Footnotes this NEP as an example) or licensed under the `Open Publication License`_. .. _Open Publication License: https://www.opencontent.org/openpub/ -.. _`NEP 38`: https://numpy.org/neps/nep-0038-SIMD-optimizations.html .. _`gh-20866`: https://github.com/numpy/numpy/pull/20866 .. _`gh-21057`: https://github.com/numpy/numpy/pull/21057 .. _`gh-23096`: https://github.com/numpy/numpy/pull/23096 diff --git a/doc/neps/nep-0055-string_dtype.rst b/doc/neps/nep-0055-string_dtype.rst index e2803c8d9d35..7e29e1425e8c 100644 --- a/doc/neps/nep-0055-string_dtype.rst +++ b/doc/neps/nep-0055-string_dtype.rst @@ -6,7 +6,8 @@ NEP 55 — Add a UTF-8 variable-width string DType to NumPy :Author: Nathan Goldbaum :Author: Warren Weckesser -:Status: Accepted +:Author: Marten van Kerkwijk +:Status: Final :Type: Standards Track :Created: 2023-06-29 :Updated: 2024-01-18 @@ -146,9 +147,6 @@ we propose to: types related to string support, enabling a migration path for a future deprecation of ``np.char``. -* An update to the ``npy`` and ``npz`` file formats to allow storage of - arbitrary-length sidecar data. - The following is out of scope for this work: * Changing DType inference for string data. @@ -161,6 +159,9 @@ The following is out of scope for this work: * Implement SIMD optimizations for string operations. +* An update to the ``npy`` and ``npz`` file formats to allow storage of + arbitrary-length sidecar data. + While we're explicitly ruling out implementing these items as part of this work, adding a new string DType helps set up future work that does implement some of these items. @@ -406,7 +407,7 @@ Missing data can be represented using a sentinel: >>> np.isnan(arr) array([False, True, False]) >>> np.empty(3, dtype=dt) - array([nan, nan, nan]) + array(['', '', '']) We only propose supporting user-provided sentinels. By default, empty arrays will be populated with empty strings: @@ -454,23 +455,12 @@ following the behavior of sorting an array containing ``nan``. String Sentinels ++++++++++++++++ -A string missing data value is an instance of ``str`` or subtype of ``str`` and -will be used as the default value for empty arrays: - - >>> arr = np.empty(3, dtype=StringDType(na_object='missing')) - >>> arr - array(['missing', 'missing', 'missing']) - -If such an array is passed to a string operation or a cast, "missing" entries -will be treated as if they have a value given by the string sentinel: +A string missing data value is an instance of ``str`` or subtype of ``str``. - >>> np.char.upper(arr) - array(['MISSING', 'MISSING', 'MISSING']) - -Comparison operations will similarly use the sentinel value directly for missing -entries. This is the primary usage of this pattern we've found in downstream -code, where a missing data sentinel like ``"__nan__"`` is passed to a low-level -sorting or partitioning algorithm. +Operations will use the sentinel value directly for missing entries. This is the +primary usage of this pattern we've found in downstream code, where a missing +data sentinel like ``"__nan__"`` is passed to a low-level sorting or +partitioning algorithm. Other Sentinels +++++++++++++++ @@ -544,11 +534,33 @@ future NumPy or a downstream library may add locale-aware sorting, case folding, and normalization for NumPy unicode strings arrays, but we are not proposing adding these features at this time. -Two ``StringDType`` instances are considered identical if they are created with -the same ``na_object`` and ``coerce`` parameter. We propose checking for unequal -``StringDType`` instances in the ``resolve_descriptors`` function of binary -ufuncs that take two string arrays and raising an error if an operation is -performed with unequal ``StringDType`` instances. +Two ``StringDType`` instances are considered equal if they are created with the +same ``na_object`` and ``coerce`` parameter. For ufuncs that accept more than +one string argument we also introduce the concept of "compatible" +``StringDType`` instances. We allow distinct DType instances to be used in ufunc +operations together if have the same ``na_object`` or if only one +or the other DType has an ``na_object`` explicitly set. We do not consider +string coercion for determining whether instances are compatible, although if +the result of the operation is a string, the result will inherit the stricter +string coercion setting of the original operands. + +This notion of "compatible" instances will be enforced in the +``resolve_descriptors`` function of binary ufuncs. This choice makes it easier +to work with non-default ``StringDType`` instances, because python strings are +coerced to the default ``StringDType`` instance, so the following idiomatic +expression is allowed:: + + >>> arr = np.array(["hello", "world"], dtype=StringDType(na_object=None)) + >>> arr + "!" + array(['hello!', 'world!'], dtype=StringDType(na_object=None)) + +If we only considered equality of ``StringDType`` instances, this would +be an error, making for an awkward user experience. If the operands have +distinct ``na_object`` settings, NumPy will raise an error because the choice +for the result DType is ambiguous:: + + >>> arr + np.array("!", dtype=StringDType(na_object="")) + TypeError: Cannot find common instance for incompatible dtype instances ``np.strings`` namespace ************************ @@ -562,14 +574,15 @@ be populated with string ufuncs: True We feel ``np.strings`` is a more intuitive name than ``np.char``, and eventually -will replace ``np.char`` once downstream libraries that conform to SPEC-0 can -safely switch to ``np.strings`` without needing any logic conditional on the NumPy -version. +will replace ``np.char`` once the minimum NumPy version supported by downstream +libraries per `SPEC-0 `_ is new +enough that they can safely switch to ``np.strings`` without needing any logic +conditional on the NumPy version. Serialization ************* -Since string data are stored outside the array buffer, serialization top the +Since string data are stored outside the array buffer, serialization to the ``npy`` format would requires a format revision to support storing variable-width sidecare data. Rather than doing this as part of this effort, we do not plan on supporting serialization to the ``npy`` or ``npz`` format without @@ -905,10 +918,10 @@ endian-dependent layouts of these structs is an implementation detail and is not publicly exposed in the API. Whether or not a string is stored directly on the arena buffer or in the heap is -signaled by setting the ``NPY_STRING_SHORT`` flag on the string data. Because -the maximum size of a heap-allocated string is limited to the size of the -largest 7-byte unsized integer, this flag can never be set for a valid heap -string. +signaled by setting the ``NPY_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags on +the string data. Because the maximum size of a heap-allocated string is limited +to the size of the largest 7-byte unsized integer, these flags can never be set +for a valid heap string. See :ref:`memorylayoutexamples` for some visual examples of strings in each of these memory layouts. @@ -956,20 +969,29 @@ exponentially expanding buffer, with an expansion factor of 1.25. Each string entry in the arena is prepended by a size, stored either in a ``char`` or a ``size_t``, depending on the length of the string. Strings with lengths between 16 or 8 (depending on architecture) and 255 are stored with a -``char`` size. We refer to these as "medium" strings internally and strings -stored this way have the ``NPY_STRING_MEDIUM`` flag set. This choice reduces the -overhead for storing smaller strings on the heap by 7 bytes per medium-length -string. +``char`` size. We refer to these as "medium" strings internally. This choice +reduces the overhead for storing smaller strings on the heap by 7 bytes per +medium-length string. Strings in the arena with lengths longer than 255 bytes +have the ``NPY_STRING_LONG`` flag set. If the contents of a packed string are freed and then assigned to a new string with the same size or smaller than the string that was originally stored in the -packed string, the existing short string or arena allocation is re-used, with -padding zeros written to the end of the subset of the buffer reserved for the -string. If the string is enlarged, the existing space in the arena buffer cannot -be used, so instead we resort to allocating space directly on the heap via -``malloc`` and the ``NPY_STRING_ON_HEAP`` flag is set. Any pre-existing flags -are kept set to allow future use of the string to determine if there is space in -the arena buffer allocated for the string for possible re-use. +packed string, the existing short string or arena allocation is re-used. There +is one exception however, when a string in the arena is overwritten with a short +string, the arena metadata is lost and the arena allocation cannot be re-used. + +If the string is enlarged, the existing space in the arena buffer cannot be +used, so instead we resort to allocating space directly on the heap via +``malloc`` and the ``NPY_STRING_OUTSIDE_ARENA`` and ``NPY_STRING_LONG`` flags +are set. Note that ``NPY_STRING_LONG`` can be set even for strings with lengths +less than 255 bytes in this case. Since the heap address overwrites the arena +offset, and future string replacements will be stored on the heap or directly +in the array buffer as a short string. + +No matter where it is stored, once a string is initialized it is marked with the +``NPY_STRING_INITIALIZED`` flag. This lets us clearly distinguish between an +uninitialized empty string and a string that has been mutated into the empty +string. The size of the allocation is stored in the arena to allow reuse of the arena allocation if a string is mutated. In principle we could disallow re-use of the @@ -1022,13 +1044,7 @@ Freeing Strings Existing strings must be freed before discarding or re-using a packed string. The API is constructed to require this for all strings, even for short strings with no heap allocations. In all cases, all data in the packed string -are zeroed out, except for the flags, which are preserved except as noted below. - -For strings with data living in the arena allocation, the data for the string in -the arena buffer are zeroed out and the ``NPY_STRING_ARENA_FREED`` flag is set -on the packed string to indicate there is space in the arena for a later re-use -of the packed string. Heap strings have their heap allocation freed and the -``NPY_STRING_ON_HEAP`` flag removed. +are zeroed out, except for the flags, which are preserved. .. _memorylayoutexamples: @@ -1044,8 +1060,8 @@ Short strings store string data directly in the array buffer. On little-endian architectures, the string data appear first, followed by a single byte that allows space for four flags and stores the size of the string as an unsigned integer in the final 4 bits. In this example, the string contents are -"Hello world", with a size of 11. The only flag set indicates that this is a -short string. +"Hello world", with a size of 11. The flags indicate this string is stored +outside the arena and is initialized. .. image:: _static/nep-0055-arena-string-memory-layout.svg @@ -1058,9 +1074,8 @@ re-use of the arena allocation if a string is mutated. Also note that because the length of the string is small enough to fit in an ``unsigned char``, this is a "medium"-length string and the size requires only one byte in the arena allocation. An arena string larger than 255 bytes would need 8 bytes in the -arena to store the size in a ``size_t``. The only flag set indicates that this -is a such "medium"-length string with a size that fits in a ``unsigned -char``. Arena strings that are longer than 255 bytes have no flags set. +arena to store the size in a ``size_t``. The only flag set indicates this string +is initialized. .. image:: _static/nep-0055-heap-string-memory-layout.svg @@ -1068,24 +1083,28 @@ Heap strings store string data in a buffer returned by ``PyMem_RawMalloc`` and instead of storing an offset into an arena buffer, directly store the address of the heap address returned by ``malloc``. In this example, the string contents are "Numpy is a very cool library" and are stored at heap address -``0x4d3d3d3``. The string has one flag set, indicating that the allocation lives -directly on the heap rather than in the arena buffer. +``0x4d3d3d3``. The string has three flags set, indicating it is a "long" string +(e.g. not a short string) stored outside the arena, and is initialized. Note +that if this string were stored inside the arena, it would not have the long +string flag set because it requires less than 256 bytes to store. Empty Strings and Missing Data ++++++++++++++++++++++++++++++ The layout we have chosen has the benefit that newly created array buffer returned by ``calloc`` will be an array filled with empty strings by -construction, since a string with no flags set is a heap string with size -zero. This is not the only valid representation of an empty string, since other -flags may be set to indicate that the missing string is associated with a -pre-existing short string or arena string. Missing strings will have an -identical representation, except they will always have a flag, -``NPY_STRING_MISSING`` set in the flags field. Users will need to check if a -string is null before accessing an unpacked string buffer and we have set up the -C API in such a way as to force null-checking whenever a string is -unpacked. Both missing and empty strings are stored directly in the array buffer -and do not require additional heap storage. +construction, since a string with no flags set is an uninitialized zero-length +arena string. This is not the only valid representation of an empty string, since other +flags may be set to indicate that the empty string is associated with a +pre-existing short string or arena string. + +Missing strings will have an identical representation, except they will always +have a flag, ``NPY_STRING_MISSING`` set in the flags field. Users will need to +check if a string is null before accessing an unpacked string buffer and we have +set up the C API in such a way as to force null-checking whenever a string is +unpacked. Both missing and empty strings can be detected based on data in the +packed string representation and do not require corresponding room in the arena +allocation or extra heap allocations. Related work ------------ diff --git a/doc/neps/nep-0056-array-api-main-namespace.rst b/doc/neps/nep-0056-array-api-main-namespace.rst index ee1190c0ceff..41e070444e81 100644 --- a/doc/neps/nep-0056-array-api-main-namespace.rst +++ b/doc/neps/nep-0056-array-api-main-namespace.rst @@ -7,11 +7,11 @@ NEP 56 — Array API standard support in NumPy's main namespace :Author: Ralf Gommers :Author: Mateusz Sokół :Author: Nathan Goldbaum -:Status: Accepted -:Replaces: 30, 31, 37, 47 +:Status: Final +:Replaces: :ref:`NEP30`, :ref:`NEP31`, :ref:`NEP37`, :ref:`NEP47` :Type: Standards Track :Created: 2023-12-19 -:Resolution: TODO mailing list link (after acceptance) +:Resolution: https://mail.python.org/archives/list/numpy-discussion@python.org/message/Z6AA5CL47NHBNEPTFWYOTSUVSRDGHYPN/ Abstract @@ -302,7 +302,7 @@ three types of behavior rather than two - ``copy=None`` means "copy if needed". an exception because they use* ``copy=False`` *explicitly in their copy but a copy was previously made anyway, they have to inspect their code and determine whether the intent of the code was the old or the new semantics (both seem -rougly equally likely), and adapt the code as appropriate. We expect most cases +roughly equally likely), and adapt the code as appropriate. We expect most cases to be* ``np.array(..., copy=False)``, *because until a few years ago that had lower overhead than* ``np.asarray(...)``. *This was solved though, and* ``np.asarray(...)`` *is idiomatic NumPy usage.* diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst index 6a9761b05230..fb8602981661 100644 --- a/doc/neps/roadmap.rst +++ b/doc/neps/roadmap.rst @@ -18,25 +18,17 @@ may include (among other things) interoperability protocols, better duck typing support and ndarray subclass handling. The key goal is: *make it easy for code written for NumPy to also work with -other NumPy-like projects.* This will enable GPU support via, e.g, CuPy or JAX, +other NumPy-like projects.* This will enable GPU support via, e.g, CuPy, JAX or PyTorch, distributed array support via Dask, and writing special-purpose arrays (either from scratch, or as a ``numpy.ndarray`` subclass) that work well with SciPy, -scikit-learn and other such packages. +scikit-learn and other such packages. A large step forward in this area was +made in NumPy 2.0, with adoption of and compliance with the array API standard +(v2022.12, see :ref:`NEP47`). Future work in this direction will include +support for newer versions of the array API standard, and adding features as +needed based on real-world experience and needs. -The ``__array_ufunc__`` and ``__array_function__`` protocols are stable, but -do not cover the whole API. New protocols for overriding other functionality -in NumPy are needed. Work in this area aims to bring to completion one or more -of the following proposals: - -- :ref:`NEP30` -- :ref:`NEP31` -- :ref:`NEP35` -- :ref:`NEP37` - -In addition we aim to provide ways to make it easier for other libraries to -implement a NumPy-compatible API. This may include defining consistent subsets -of the API, as discussed in `this section of NEP 37 -`__. +In addition, the ``__array_ufunc__`` and ``__array_function__`` protocols +fulfill a role here - they are stable and used by several downstream projects. Performance @@ -46,17 +38,31 @@ Improvements to NumPy's performance are important to many users. We have focused this effort on Universal SIMD (see :ref:`NEP38`) intrinsics which provide nice improvements across various hardware platforms via an abstraction layer. The infrastructure is in place, and we welcome follow-on PRs to add -SIMD support across all relevant NumPy functions. +SIMD support across relevant NumPy functionality. + +Transitioning from C to C++, both in the SIMD infrastructure and in NumPy +internals more widely, is in progress. We have also started to make use of +Google Highway (see :ref:`NEP54`), and that usage is likely to expand. Work +towards support for newer SIMD instruction sets, like SVE on arm64, is ongoing. Other performance improvement ideas include: -- A better story around parallel execution. +- A better story around parallel execution (related is support for free-threaded + CPython, see further down). +- Enable the ability to allow NumPy to use faster, but less precise, + implementations for ufuncs. + Until now, the only state modifying ufunc behavior has been ``np.errstate``. + But, with NumPy 2.0 improvements in the ``np.errstate`` and the ufunc C + implementation make this type of addition easier. - Optimizations in individual functions. -- Reducing ufunc and ``__array_function__`` overhead. Furthermore we would like to improve the benchmarking system, in terms of coverage, -easy of use, and publication of the results (now -`here `__) as part of the docs or website. +easy of use, and publication of the results. Benchmarking PRs/branches compared +to the `main` branch is a primary purpose, and required for PRs that are +performance-focused (e.g., adding SIMD acceleration to a function). In +addition, we'd like a performance overview like the one we had `here +`__, set up in a way that is more +maintainable long-term. Documentation and website @@ -68,69 +74,154 @@ documentation on many topics are missing or outdated. See :ref:`NEP44` for planned improvements. Adding more tutorials is underway in the `numpy-tutorials repo `__. -Our website (https://numpy.org) was completely redesigned recently. We aim to -further improve it by adding translations, more case studies and other -high-level content, and more (see `this tracking issue `__). +We also intend to make all the example code in our documentation interactive - +work is underway to do so via ``jupyterlite-sphinx`` and Pyodide. + +Our website (https://numpy.org) is in good shape. Further work on expanding the +number of languages that the website is translated in is desirable. As are +improvements to the interactive notebook widget, through JupyterLite. Extensibility ------------- -We aim to make it much easier to extend NumPy. The primary topic here is to -improve the dtype system - see :ref:`NEP41` and related NEPs linked from it. -Concrete goals for the dtype system rewrite are: - -- Easier custom dtypes: +We aim to continue making it easier to extend NumPy. The primary topic here is to +improve the dtype system - see for example :ref:`NEP41` and related NEPs linked +from it. In NumPy 2.0, a `new C API for user-defined dtypes `__ +was made public. We aim to encourage its usage and improve this API further, +including support for writing a dtype in Python. - - Simplify and/or wrap the current C-API - - More consistent support for dtype metadata - - Support for writing a dtype in Python +Ideas for new dtypes that may be developed outside of the main NumPy repository +first, and that could potentially be upstreamed into NumPy later, include: -- Allow adding (a) new string dtype(s). This could be encoded strings with - fixed-width storage (e.g., ``utf8`` or ``latin1``), and/or a variable length - string dtype. The latter could share an implementation with ``dtype=object``, - but be explicitly type-checked. - One of these should probably be the default for text data. The current - string dtype support is neither efficient nor user friendly. +- A quad-precision (128-bit) dtype +- A ``bfloat16`` dtype +- A fixed-width string dtype which supports encodings (e.g., ``utf8`` or + ``latin1``) +- A unit dtype +We further plan to extend the ufunc C API as needs arise. +One possibility here is creating a new, more powerful, API to allow hooking +into existing NumPy ufunc implementations. User experience --------------- Type annotations ```````````````` -NumPy 1.20 adds type annotations for most NumPy functionality, so users can use -tools like `mypy`_ to type check their code and IDEs can improve their support +Type annotations for most NumPy functionality is complete (although some +submodules like ``numpy.ma`` are missing return types), so users can use tools +like `mypy`_ to type check their code and IDEs can improve their support for NumPy. Improving those type annotations, for example to support annotating -array shapes and dtypes, is ongoing. +array shapes (see `gh-16544 `__), +is ongoing. Platform support ```````````````` We aim to increase our support for different hardware architectures. This includes adding CI coverage when CI services are available, providing wheels on -PyPI for POWER8/9 (``ppc64le``), providing better build and install -documentation, and resolving build issues on other platforms like AIX. +PyPI for platforms that are in high enough demand (e.g., we added ``musllinux`` +ones for NumPy 2.0), and resolving build issues on platforms that we don't test +in CI (e.g., AIX). + +We intend to write a NEP covering the support levels we provide and what is +required for a platform to move to a higher tier of support, similar to +`PEP 11 `__. + +Further consistency fixes to promotion and scalar logic +``````````````````````````````````````````````````````` +NumPy 2.0 fixed many issues around promotion especially with respect to scalars. +We plan to continue fixing remaining inconsistencies. +For example, NumPy converts 0-D objects to scalars, and some promotions +still allowed by NumPy are problematic. + +Support for free-threaded CPython +````````````````````````````````` +CPython 3.13 will be the first release to offer a free-threaded build (i.e., +a CPython build with the GIL disabled). Work is in progress to support this +well in NumPy. After that is stable and complete, there may be opportunities to +actually make use of the potential for performance improvements from +free-threaded CPython, or make it easier to do so for NumPy's users. + +Binary size reduction +````````````````````` +The number of downloads of NumPy from PyPI and other platforms continues to +increase - as of May 2024 we're at >200 million downloads/month from PyPI +alone. Reducing the size of an installed NumPy package has many benefits: +faster installs, lower disk space usage, smaller load on PyPI, less +environmental impact, easier to fit more packages on top of NumPy in +resource-constrained environments and platforms like AWS Lambda, lower latency +for Pyodide users, and so on. We aim for significant reductions, as well as +making it easier for end users and packagers to produce smaller custom builds +(e.g., we added support for stripping tests before 2.1.0). See +`gh-25737 `__ for details. + +Support use of CPython's limited C API +`````````````````````````````````````` +Use of the CPython limited C API, allowing producing ``abi3`` wheels that use +the stable ABI and are hence independent of CPython feature releases, has +benefits for both downstream packages that use NumPy's C API and for NumPy +itself. In NumPy 2.0, work was done to enable using the limited C API with +the Cython support in NumPy (see `gh-25531 `__. -- A backend system for ``numpy.fft`` (so that e.g. ``fft-mkl`` doesn't need to monkeypatch numpy). - Write a strategy on how to deal with overlap between NumPy and SciPy for ``linalg``. -- Deprecate ``np.matrix`` (very slowly). +- Deprecate ``np.matrix`` (very slowly) - this is feasible once the switch-over + from sparse matrices to sparse arrays in SciPy is complete. - Add new indexing modes for "vectorized indexing" and "outer indexing" (see :ref:`NEP21`). - Make the polynomial API easier to use. -- Integrate an improved text file loader. -- Ufunc and gufunc improvements, see `gh-8892 `__ - and `gh-11492 `__. .. _`mypy`: https://mypy.readthedocs.io diff --git a/doc/neps/tools/build_index.py b/doc/neps/tools/build_index.py index 68212e110e8b..e8ca86e68c13 100644 --- a/doc/neps/tools/build_index.py +++ b/doc/neps/tools/build_index.py @@ -75,7 +75,7 @@ def nep_metadata(): f'NEP {nr} has been Superseded, but has no Replaced-By tag' ) - replaced_by = int(tags['Replaced-By']) + replaced_by = int(re.findall(r'\d+', tags['Replaced-By'])[0]) replacement_nep = neps[replaced_by] if not 'Replaces' in replacement_nep: @@ -105,13 +105,8 @@ def nep_metadata(): def parse_replaces_metadata(replacement_nep): """Handle :Replaces: as integer or list of integers""" - replaces = replacement_nep['Replaces'] - if ' ' in replaces: - # Replaces multiple NEPs, should be comma-separated ints - replaced_neps = [int(s) for s in replaces.split(', ')] - else: - replaced_neps = [int(replaces)] - + replaces = re.findall(r'\d+', replacement_nep['Replaces']) + replaced_neps = [int(s) for s in replaces] return replaced_neps diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst index 91b7f7e000a0..51ccd7690eff 100644 --- a/doc/release/upcoming_changes/README.rst +++ b/doc/release/upcoming_changes/README.rst @@ -40,7 +40,7 @@ So for example: ``123.new_feature.rst`` would have the content:: The ``my_new_feature`` option is now available for `my_favorite_function`. To use it, write ``np.my_favorite_function(..., my_new_feature=True)``. -``highlight`` is usually formatted as bulled points making the fragment +``highlight`` is usually formatted as bullet points making the fragment ``* This is a highlight``. Note the use of single-backticks to get an internal link (assuming diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css index 4a489474d9d7..180dec530649 100644 --- a/doc/source/_static/numpy.css +++ b/doc/source/_static/numpy.css @@ -9,132 +9,54 @@ body { font-family: 'Open Sans', sans-serif; + font-size: medium; } -pre, code { - font-size: 100%; - line-height: 155%; -} - -h1 { - font-family: "Lato", sans-serif; - color: #013243; /* warm black */ -} - -h2 { - color: #4d77cf; /* han blue */ - letter-spacing: -.03em; -} +/* Making sure the navbar shows correctly in one line + Reduces the space between the top-left logo and the navbar section titles */ -h3 { - color: #013243; /* warm black */ - letter-spacing: -.03em; +.col-lg-3 { + width: 15%; } -/* Style the active version button. - -- dev: orange -- stable: green -- old, PR: red - -Colors from: - -Wong, B. Points of view: Color blindness. -Nat Methods 8, 441 (2011). https://doi.org/10.1038/nmeth.1618 -*/ - -/* If the active version has the name "dev", style it orange */ -#version_switcher_button[data-active-version-name*="dev"] { - background-color: #E69F00; - border-color: #E69F00; - color:#000000; -} +/* Version switcher colors from PyData Sphinx Theme */ -/* green for `stable` */ -#version_switcher_button[data-active-version-name*="stable"] { - background-color: #009E73; - border-color: #009E73; +.version-switcher__button[data-active-version-name*="devdocs"] { + background-color: var(--pst-color-warning); + border-color: var(--pst-color-warning); + opacity: 0.9; } -/* red for `old` */ -#version_switcher_button:not([data-active-version-name*="stable"], [data-active-version-name*="dev"], [data-active-version-name=""]) { - background-color: #980F0F; - border-color: #980F0F; +.version-switcher__button:not([data-active-version-name*="stable"]):not([data-active-version-name*="dev"]):not([data-active-version-name*="pull"]) { + background-color: var(--pst-color-danger); + border-color: var(--pst-color-danger); + opacity: 0.9; } -/* Main page overview cards */ - -.sd-card { - background: #fff; - border-radius: 0; - padding: 30px 10px 20px 10px; - margin: 10px 0px; +.version-switcher__menu a.list-group-item { + font-size: small; } -.sd-card .sd-card-header { - text-align: center; +button.btn.version-switcher__button, +button.btn.version-switcher__button:hover { + color: black; + font-size: small; } -.sd-card .sd-card-header .sd-card-text { - margin: 0px; -} +/* Main index page overview cards */ .sd-card .sd-card-img-top { - height: 52px; - width: 52px; + height: 60px; + width: 60px; margin-left: auto; margin-right: auto; + margin-top: 10px; } -.sd-card .sd-card-header { - border: none; - background-color: white; - color: #150458 !important; - font-size: var(--pst-font-size-h5); - font-weight: bold; - padding: 2.5rem 0rem 0.5rem 0rem; -} - -.sd-card .sd-card-footer { - border: none; - background-color: white; -} +/* Main index page overview images */ -.sd-card .sd-card-footer .sd-card-text { - max-width: 220px; - margin-left: auto; - margin-right: auto; -} - -/* Dark theme tweaking */ html[data-theme=dark] .sd-card img[src*='.svg'] { - filter: invert(0.82) brightness(0.8) contrast(1.2); -} - -/* Main index page overview cards */ -html[data-theme=dark] .sd-card { - background-color:var(--pst-color-background); -} - -html[data-theme=dark] .sd-shadow-sm { - box-shadow: 0 .1rem 1rem rgba(250, 250, 250, .6) !important -} - -html[data-theme=dark] .sd-card .sd-card-header { - background-color:var(--pst-color-background); - color: #150458 !important; -} - -html[data-theme=dark] .sd-card .sd-card-footer { - background-color:var(--pst-color-background); -} - -html[data-theme=dark] h1 { - color: var(--pst-color-primary); -} - -html[data-theme=dark] h3 { - color: #0a6774; + filter: invert(0.82) brightness(0.8) contrast(1.2); } /* Legacy admonition */ @@ -143,15 +65,10 @@ div.admonition-legacy { border-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::before, -div.admonition>.admonition-title::before { +div.admonition-legacy>.admonition-title::after { color: var(--pst-color-warning); - content: var(--pst-icon-admonition-attention); - background-color: var(--pst-color-warning); } -.admonition-legacy.admonition>.admonition-title::after, -div.admonition>.admonition-title::after { - color: var(--pst-color-warning); - content: var(--pst-icon-admonition-default); +div.admonition-legacy>.admonition-title { + background-color: var(--pst-color-warning-bg); } \ No newline at end of file diff --git a/doc/source/building/blas_lapack.rst b/doc/source/building/blas_lapack.rst index 6ae5f3f78a82..73ab4ac301aa 100644 --- a/doc/source/building/blas_lapack.rst +++ b/doc/source/building/blas_lapack.rst @@ -16,20 +16,20 @@ plain ``libblas``/``liblapack``. This may vary per platform or over releases. That order, and which libraries are tried, can be changed through the ``blas-order`` and ``lapack-order`` build options, for example:: - $ python -m pip install . -C-Dblas-order=openblas,mkl,blis -C-Dlapack-order=openblas,mkl,lapack + $ python -m pip install . -Csetup-args=-Dblas-order=openblas,mkl,blis -Csetup-args=-Dlapack-order=openblas,mkl,lapack The first suitable library that is found will be used. In case no suitable library is found, the NumPy build will print a warning and then use (slow!) NumPy-internal fallback routines. In order to disallow use of those slow routines, the ``allow-noblas`` build option can be used:: - $ python -m pip install . -C-Dallow-noblas=false + $ python -m pip install . -Csetup-args=-Dallow-noblas=false By default the LP64 (32-bit integer) interface to BLAS and LAPACK will be used. For building against the ILP64 (64-bit integer) interface, one must use the ``use-ilp64`` build option:: - $ python -m pip install . -C-Duse-ilp64=true + $ python -m pip install . -Csetup-args=-Duse-ilp64=true .. _accelerated-blas-lapack-libraries: diff --git a/doc/source/conf.py b/doc/source/conf.py index 1e734c0134bc..2019529cb53b 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -4,6 +4,7 @@ import importlib from docutils import nodes from docutils.parsers.rst import Directive +from datetime import datetime # Minimum version, enforced by sphinx needs_sphinx = '4.3' @@ -41,10 +42,6 @@ class PyTypeObject(ctypes.Structure): ('tp_name', ctypes.c_char_p), ] - # prevent numpy attaching docstrings to the scalar types - assert 'numpy._core._add_newdocs_scalars' not in sys.modules - sys.modules['numpy._core._add_newdocs_scalars'] = object() - import numpy # change the __name__ of the scalar types @@ -58,9 +55,6 @@ class PyTypeObject(ctypes.Structure): c_typ = PyTypeObject.from_address(id(typ)) c_typ.tp_name = _name_cache[typ] = b"numpy." + name.encode('utf8') - # now generate the docstrings as usual - del sys.modules['numpy._core._add_newdocs_scalars'] - import numpy._core._add_newdocs_scalars replace_scalar_type_names() @@ -93,6 +87,7 @@ class PyTypeObject(ctypes.Structure): 'IPython.sphinxext.ipython_console_highlighting', 'IPython.sphinxext.ipython_directive', 'sphinx.ext.mathjax', + 'sphinx_copybutton', 'sphinx_design', ] @@ -114,7 +109,8 @@ class PyTypeObject(ctypes.Structure): # General substitutions. project = 'NumPy' -copyright = '2008-2024, NumPy Developers' +year = datetime.now().year +copyright = f'2008-{year}, NumPy Developers' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. @@ -143,6 +139,10 @@ class PyTypeObject(ctypes.Structure): # for source files. exclude_dirs = [] +exclude_patterns = [] +if sys.version_info[:2] >= (3, 12): + exclude_patterns += ["reference/distutils.rst"] + # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False @@ -239,23 +239,30 @@ def setup(app): switcher_version = f"{version}" html_theme_options = { - "logo": { - "image_light": "_static/numpylogo.svg", - "image_dark": "_static/numpylogo_dark.svg", - }, - "github_url": "https://github.com/numpy/numpy", - "collapse_navigation": True, - "external_links": [ - {"name": "Learn", "url": "https://numpy.org/numpy-tutorials/"}, - {"name": "NEPs", "url": "https://numpy.org/neps"} - ], - "header_links_before_dropdown": 6, - # Add light/dark mode and documentation version switcher: - "navbar_end": ["theme-switcher", "version-switcher", "navbar-icon-links"], - "switcher": { - "version_match": switcher_version, - "json_url": "https://numpy.org/doc/_static/versions.json", - }, + "logo": { + "image_light": "_static/numpylogo.svg", + "image_dark": "_static/numpylogo_dark.svg", + }, + "github_url": "https://github.com/numpy/numpy", + "collapse_navigation": True, + "external_links": [ + {"name": "Learn", "url": "https://numpy.org/numpy-tutorials/"}, + {"name": "NEPs", "url": "https://numpy.org/neps"}, + ], + "header_links_before_dropdown": 6, + # Add light/dark mode and documentation version switcher: + "navbar_end": [ + "search-button", + "theme-switcher", + "version-switcher", + "navbar-icon-links" + ], + "navbar_persistent": [], + "switcher": { + "version_match": switcher_version, + "json_url": "https://numpy.org/doc/_static/versions.json", + }, + "show_version_warning_banner": True, } html_title = "%s v%s Manual" % (project, version) @@ -279,6 +286,9 @@ def setup(app): plot_html_show_formats = False plot_html_show_source_link = False +# sphinx-copybutton configurations +copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " +copybutton_prompt_is_regexp = True # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- @@ -378,7 +388,7 @@ def setup(app): # ----------------------------------------------------------------------------- texinfo_documents = [ - ("contents", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy', + ("index", 'numpy', 'NumPy Documentation', _stdauthor, 'NumPy', "NumPy: array processing for numbers, strings, records, and objects.", 'Programming', 1), @@ -585,3 +595,12 @@ class NumPyLexer(CLexer): breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml")) breathe_default_project = "numpy" breathe_default_members = ("members", "undoc-members", "protected-members") + +# See https://github.com/breathe-doc/breathe/issues/696 +nitpick_ignore = [ + ('c:identifier', 'FILE'), + ('c:identifier', 'size_t'), + ('c:identifier', 'PyHeapTypeObject'), +] + + diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index 6164eef4db26..e5165f213ce7 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -114,6 +114,15 @@ argument to pytest:: $ spin test -v -t numpy/_core/tests/test_multiarray.py -- -k "MatMul and not vector" +To run "doctests" -- to check that the code examples in the documentation is correct -- +use the `check-docs` spin command. It relies on the `scipy-docs` package, which +provides several additional features on top of the standard library ``doctest`` +package. Install ``scipy-doctest`` and run one of:: + + $ spin check-docs -v + $ spin check-docs numpy/linalg + $ spin check-docs -v -- -k 'det and not slogdet' + .. note:: Remember that all tests of NumPy should pass before committing your changes. @@ -205,7 +214,7 @@ since the linter runs as part of the CI pipeline. For more details on Style Guidelines: - `Python Style Guide`_ -- `C Style Guide`_ +- :ref:`NEP45` Rebuilding & cleaning the workspace ----------------------------------- @@ -247,7 +256,10 @@ of Python is encouraged, see :ref:`advanced_debugging`. In terms of debugging, NumPy also needs to be built in a debug mode. You need to use ``debug`` build type and disable optimizations to make sure ``-O0`` flag is used -during object building. To generate source-level debug information within the build process run:: +during object building. Note that NumPy should NOT be installed in your environment +before you build with the ``spin build`` command. + +To generate source-level debug information within the build process run:: $ spin build --clean -- -Dbuildtype=debug -Ddisable-optimization=true @@ -271,13 +283,14 @@ you want to debug. For instance ``mytest.py``:: x = np.arange(5) np.empty_like(x) -Now, you can run:: +Note that your test file needs to be outside the NumPy clone you have. Now, you can +run:: - $ spin gdb mytest.py + $ spin gdb /path/to/mytest.py In case you are using clang toolchain:: - $ lldb python mytest.py + $ spin lldb /path/to/mytest.py And then in the debugger:: @@ -323,7 +336,6 @@ typically packaged as ``python-dbg``) is highly recommended. .. _Waf: https://code.google.com/p/waf/ .. _`match test names using python operators`: https://docs.pytest.org/en/latest/usage.html#specifying-tests-selecting-tests .. _`Python Style Guide`: https://www.python.org/dev/peps/pep-0008/ -.. _`C Style Guide`: https://numpy.org/neps/nep-0045-c_style_guide.html Understanding the code & getting started ---------------------------------------- diff --git a/doc/source/dev/development_workflow.rst b/doc/source/dev/development_workflow.rst index d3ee762445e6..a0a247c10957 100644 --- a/doc/source/dev/development_workflow.rst +++ b/doc/source/dev/development_workflow.rst @@ -166,16 +166,20 @@ Standard acronyms to start the commit message with are:: BENCH: changes to the benchmark suite BLD: change related to building numpy BUG: bug fix + CI: continuous integration DEP: deprecate something, or remove a deprecated object DEV: development tool or utility DOC: documentation ENH: enhancement MAINT: maintenance commit (refactoring, typos, etc.) + MNT: alias for MAINT + NEP: NumPy enhancement proposals + REL: related to releasing numpy REV: revert an earlier commit STY: style fix (whitespace, PEP8) TST: addition or modification of tests TYP: static typing - REL: related to releasing numpy + WIP: work in progress, do not merge Commands to skip continuous integration ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/dev/howto-docs.rst b/doc/source/dev/howto-docs.rst index 5f23d544145f..1eea77041740 100644 --- a/doc/source/dev/howto-docs.rst +++ b/doc/source/dev/howto-docs.rst @@ -79,7 +79,7 @@ ideas and feedback. If you want to alert us to a gap, If you're looking for subjects, our formal roadmap for documentation is a *NumPy Enhancement Proposal (NEP)*, -`NEP 44 - Restructuring the NumPy Documentation `__. +:ref:`NEP44`. It identifies areas where our docs need help and lists several additions we'd like to see, including :ref:`Jupyter notebooks `. @@ -376,7 +376,7 @@ membergroups and members-only options: :outline: :no-link: -Checkout the `doxygenclass documentation _` +Checkout the `doxygenclass documentation `__ for more details and to see it in action. ``doxygennamespace`` diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 6c9ebd577b2e..d2846f48b833 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -88,9 +88,6 @@ Here's the short summary, complete TOC links are below: git push origin linspace-speedups - * Enter your GitHub username and password (repeat contributors or advanced - users can remove this step by connecting to GitHub with SSH). - * Go to GitHub. The new branch will show up with a green Pull Request button. Make sure the title and message are clear, concise, and self- explanatory. Then click the button to submit it. diff --git a/doc/source/f2py/code/var.pyf b/doc/source/f2py/code/var.pyf index 8275ff3afe21..b7c080682a62 100644 --- a/doc/source/f2py/code/var.pyf +++ b/doc/source/f2py/code/var.pyf @@ -5,7 +5,7 @@ python module var ''' interface usercode ''' - PyDict_SetItemString(d,"BAR",PyInt_FromLong(BAR)); + PyDict_SetItemString(d,"BAR",PyLong_FromLong(BAR)); ''' end interface end python module diff --git a/doc/source/f2py/f2py-testing.rst b/doc/source/f2py/f2py-testing.rst index 945b4ccaa338..c6680749c7c5 100644 --- a/doc/source/f2py/f2py-testing.rst +++ b/doc/source/f2py/f2py-testing.rst @@ -50,6 +50,17 @@ functions will be appended to ``self.module`` data member. Thus, the child class be able to access the fortran functions specified in source file by calling ``self.module.[fortran_function_name]``. +.. versionadded:: v2.0.0b1 + +Each of the ``f2py`` tests should run without failure if no Fortran compilers +are present on the host machine. To facilitate this, the ``CompilerChecker`` is +used, essentially providing a ``meson`` dependent set of utilities namely +``has_{c,f77,f90,fortran}_compiler()``. + +For the CLI tests in ``test_f2py2e``, flags which are expected to call ``meson`` +or otherwise depend on a compiler need to call ``compiler_check_f2pycli()`` +instead of ``f2pycli()``. + Example ~~~~~~~ @@ -77,4 +88,4 @@ A test can be implemented as follows:: We override the ``sources`` data member to provide the source file. The source files are compiled and subroutines are attached to module data member when the class object -is created. The ``test_module`` function calls the subroutines and tests their results. \ No newline at end of file +is created. The ``test_module`` function calls the subroutines and tests their results. diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst index 859a2c38be5f..635455fdb58a 100644 --- a/doc/source/f2py/usage.rst +++ b/doc/source/f2py/usage.rst @@ -86,6 +86,13 @@ Here ```` may also contain signature files. Among other options ``--wrap-functions`` is default because it ensures maximum portability and compiler independence. +``--[no-]freethreading-compatible`` + Create a module that declares it does or doesn't require the GIL. The default + is ``--no-freethreading-compatible`` for backwards compatibility. Inspect the + fortran code you are wrapping for thread safety issues before passing + ``--freethreading-compatible``, as ``f2py`` does not analyze fortran code for + thread safety issues. + ``--include-paths ":..."`` Search include files from given directories. diff --git a/doc/source/index.rst b/doc/source/index.rst index b80d65ce2c4e..02f3a8dc12b0 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -21,10 +21,10 @@ NumPy documentation `Historical versions of documentation `_ **Useful links**: -`Installation `_ | -`Source Repository `_ | -`Issue Tracker `_ | -`Q&A Support `_ | +`Installation `_ | +`Source Repository `_ | +`Issue Tracker `_ | +`Q&A Support `_ | `Mailing List `_ NumPy is the fundamental package for scientific computing in Python. It is a @@ -36,13 +36,15 @@ basic statistical operations, random simulation and much more. -.. grid:: 2 +.. grid:: 1 1 2 2 + :gutter: 2 3 4 4 .. grid-item-card:: :img-top: ../source/_static/index-images/getting_started.svg + :text-align: center Getting started - ^^^^^^^^^^^^^^^ + ^^^ New to NumPy? Check out the Absolute Beginner's Guide. It contains an introduction to NumPy's main concepts and links to additional tutorials. @@ -58,9 +60,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/user_guide.svg + :text-align: center User guide - ^^^^^^^^^^ + ^^^ The user guide provides in-depth information on the key concepts of NumPy with useful background information and explanation. @@ -76,9 +79,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/api.svg + :text-align: center API reference - ^^^^^^^^^^^^^ + ^^^ The reference guide contains a detailed description of the functions, modules, and objects included in NumPy. The reference describes how the @@ -96,9 +100,10 @@ basic statistical operations, random simulation and much more. .. grid-item-card:: :img-top: ../source/_static/index-images/contributor.svg + :text-align: center Contributor's guide - ^^^^^^^^^^^^^^^^^^^ + ^^^ Want to add to the codebase? Can help add translation or a flowchart to the documentation? The contributing guidelines will guide you through the diff --git a/doc/source/numpy_2_0_migration_guide.rst b/doc/source/numpy_2_0_migration_guide.rst index 0bbd68242524..55d4696a114d 100644 --- a/doc/source/numpy_2_0_migration_guide.rst +++ b/doc/source/numpy_2_0_migration_guide.rst @@ -19,11 +19,11 @@ Ruff plugin =========== Many of the changes covered in the 2.0 release notes and in this migration -guide can be automatically adapted to in downstream code with a dedicated +guide can be automatically adapted in downstream code with a dedicated `Ruff `__ rule, namely rule `NPY201 `__. -You should install ``ruff>=0.2.0`` and add the ``NPY201`` rule to your +You should install ``ruff>=0.4.8`` and add the ``NPY201`` rule to your ``pyproject.toml``:: [tool.ruff.lint] @@ -43,9 +43,8 @@ NumPy 2.0 changes promotion (the result of combining dissimilar data types) as per :ref:`NEP 50 `. Please see the NEP for details on this change. It includes a table of example changes and a backwards compatibility section. -The largest backwards compatibility change of this is that it means that -the precision of scalars is now preserved consistently. -Two examples are: +The largest backwards compatibility change is that the precision of scalars +is now preserved consistently. Two examples are: * ``np.float32(3) + 3.`` now returns a float32 when it previously returned a float64. @@ -97,7 +96,7 @@ C-API Changes ============= Some definitions were removed or replaced due to being outdated or -unmaintainable. Some new API definition will evaluate differently at +unmaintainable. Some new API definitions will evaluate differently at runtime between NumPy 2.0 and NumPy 1.x. Some are defined in ``numpy/_core/include/numpy/npy_2_compat.h`` (for example ``NPY_DEFAULT_INT``) which can be vendored in full or part @@ -116,10 +115,10 @@ The ``PyArray_Descr`` struct has been changed One of the most impactful C-API changes is that the ``PyArray_Descr`` struct is now more opaque to allow us to add additional flags and have itemsizes not limited by the size of ``int`` as well as allow improving -structured dtypes in the future and not burdon new dtypes with their fields. +structured dtypes in the future and not burden new dtypes with their fields. Code which only uses the type number and other initial fields is unaffected. -Most code will hopefull mainly access the ``->elsize`` field, when the +Most code will hopefully mainly access the ``->elsize`` field, when the dtype/descriptor itself is attached to an array (e.g. ``arr->descr->elsize``) this is best replaced with ``PyArray_ITEMSIZE(arr)``. @@ -127,7 +126,7 @@ Where not possible, new accessor functions are required: * ``PyDataType_ELSIZE`` and ``PyDataType_SET_ELSIZE`` (note that the result is now ``npy_intp`` and not ``int``). -* ``PyDataType_ALIGNENT`` +* ``PyDataType_ALIGNMENT`` * ``PyDataType_FIELDS``, ``PyDataType_NAMES``, ``PyDataType_SUBARRAY`` * ``PyDataType_C_METADATA`` @@ -146,12 +145,12 @@ or adding ``npy2_compat.h`` into your code base and explicitly include it when compiling with NumPy 1.x (as they are new API). Including the file has no effect on NumPy 2. -Please do not hesitate to open a NumPy issue, if you require assistence or +Please do not hesitate to open a NumPy issue, if you require assistance or the provided functions are not sufficient. **Custom User DTypes:** -Existing user dtypes must now use ``PyArray_DescrProto`` to define their -dtype and slightly modify the code. See note in `PyArray_RegisterDataType`. +Existing user dtypes must now use :c:type:`PyArray_DescrProto` to define +their dtype and slightly modify the code. See note in :c:func:`PyArray_RegisterDataType`. Functionality moved to headers requiring ``import_array()`` ----------------------------------------------------------- @@ -180,7 +179,7 @@ Functionality which previously did not require import includes: Increased maximum number of dimensions -------------------------------------- -The maximum number of dimensions (and arguments) was increased to 64, this +The maximum number of dimensions (and arguments) was increased to 64. This affects the ``NPY_MAXDIMS`` and ``NPY_MAXARGS`` macros. It may be good to review their use, and we generally encourage you to not use these macros (especially ``NPY_MAXARGS``), so that a future version of @@ -203,17 +202,37 @@ native C99 types. While the memory layout of those types remains identical to the types used in NumPy 1.x, the API is slightly different, since direct field access (like ``c.real`` or ``c.imag``) is no longer possible. -It is recommended to use the functions `npy_creal` and `npy_cimag` (and the -corresponding float and long double variants) to retrieve +It is recommended to use the functions ``npy_creal`` and ``npy_cimag`` +(and the corresponding float and long double variants) to retrieve the real or imaginary part of a complex number, as these will work with both -NumPy 1.x and with NumPy 2.x. New functions `npy_csetreal` and `npy_csetimag`, -along with compatibility macros `NPY_CSETREAL` and `NPY_CSETIMAG` (and the -corresponding float and long double variants), have been -added for setting the real or imaginary part. +NumPy 1.x and with NumPy 2.x. New functions ``npy_csetreal`` and +``npy_csetimag``, along with compatibility macros ``NPY_CSETREAL`` and +``NPY_CSETIMAG`` (and the corresponding float and long double variants), +have been added for setting the real or imaginary part. The underlying type remains a struct under C++ (all of the above still remains valid). +This has implications for Cython. It is recommended to always use the native +typedefs ``cfloat_t``, ``cdouble_t``, ``clongdouble_t`` rather than the NumPy +types ``npy_cfloat``, etc, unless you have to interface with C code written +using the NumPy types. You can still write cython code using the ``c.real`` and +``c.imag`` attributes (using the native typedefs), but you can no longer use +in-place operators ``c.imag += 1`` in Cython's c++ mode. + +Because NumPy 2 now includes ``complex.h`` code that uses a variable named +``I`` may see an error such as + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +to use the name ``I`` requires an ``#undef I`` now. + +.. note:: + NumPy 2.0.1 briefly included the ``#undef I`` to help users not already + including ``complex.h``. + Changes to namespaces ===================== @@ -225,12 +244,12 @@ private. Please see the tables below for guidance on migration. For most changes this means replacing it with a backwards compatible alternative. -Please refer to `NEP 52 `_ for more details. +Please refer to :ref:`NEP52` for more details. Main namespace -------------- -About 100 members of the main ``np`` namespace has been deprecated, removed, or +About 100 members of the main ``np`` namespace have been deprecated, removed, or moved to a new place. It was done to reduce clutter and establish only one way to access a given attribute. The table below shows members that have been removed: @@ -240,13 +259,17 @@ removed member migration guideline add_docstring It's still available as ``np.lib.add_docstring``. add_newdoc It's still available as ``np.lib.add_newdoc``. add_newdoc_ufunc It's an internal function and doesn't have a replacement. +alltrue Use ``np.all`` instead. asfarray Use ``np.asarray`` with a float dtype instead. byte_bounds Now it's available under ``np.lib.array_utils.byte_bounds`` cast Use ``np.asarray(arr, dtype=dtype)`` instead. cfloat Use ``np.complex128`` instead. +charrarray It's still available as ``np.char.chararray``. clongfloat Use ``np.clongdouble`` instead. +compare_chararrays It's still available as ``np.char.compare_chararrays``. compat There's no replacement, as Python 2 is no longer supported. complex\_ Use ``np.complex128`` instead. +cumproduct Use ``np.cumprod`` instead. DataSource It's still available as ``np.lib.npyio.DataSource``. deprecate Emit ``DeprecationWarning`` with ``warnings.warn`` directly, or use ``typing.deprecated``. @@ -258,6 +281,7 @@ find_common_type Use ``numpy.promote_types`` or ``numpy.result_type`` ins To achieve semantics for the ``scalar_types`` argument, use ``numpy.result_type`` and pass the Python values ``0``, ``0.0``, or ``0j``. +format_parser It's still available as ``np.rec.format_parser``. get_array_wrap float\_ Use ``np.float64`` instead. geterrobj Use the np.errstate context manager instead. @@ -280,6 +304,7 @@ longfloat Use ``np.longdouble`` instead. lookfor Search NumPy's documentation directly. obj2sctype Use ``np.dtype(obj).type`` instead. PINF Use ``np.inf`` instead. +product Use ``np.prod`` instead. PZERO Use ``0.0`` instead. recfromcsv Use ``np.genfromtxt`` with comma delimiter instead. recfromtxt Use ``np.genfromtxt`` instead. @@ -295,6 +320,7 @@ set_string_function Use ``np.set_printoptions`` instead with a formatter for custom printing of NumPy objects. singlecomplex Use ``np.complex64`` instead. string\_ Use ``np.bytes_`` instead. +sometrue Use ``np.any`` instead. source Use ``inspect.getsource`` instead. tracemalloc_domain It's now available from ``np.lib``. unicode\_ Use ``np.str_`` instead. @@ -372,7 +398,6 @@ expired member migration guideline newbyteorder Use ``arr.view(arr.dtype.newbyteorder(order))`` instead. ptp Use ``np.ptp(arr, ...)`` instead. setitem Use ``arr[index] = value`` instead. -... ... ====================== ======================================================== @@ -405,14 +430,40 @@ The :ref:`copy keyword behavior changes ` in `~numpy.asarray`, `~numpy.array` and `ndarray.__array__ ` may require these changes: -1. Code using ``np.array(..., copy=False)`` can in most cases be changed to - ``np.asarray(...)``. Older code tended to use ``np.array`` like this because - it had less overhead than the default ``np.asarray`` copy-if-needed - behavior. This is no longer true, and ``np.asarray`` is the preferred function. -2. For code that explicitly needs to pass ``None``/``False`` meaning "copy if - needed" in a way that's compatible with NumPy 1.x and 2.x, see - `scipy#20172 `__ for an example - of how to do so. -3. For any ``__array__`` method on a non-NumPy array-like object, a - ``copy=None`` keyword can be added to the signature - this will work with - older NumPy versions as well. +* Code using ``np.array(..., copy=False)`` can in most cases be changed to + ``np.asarray(...)``. Older code tended to use ``np.array`` like this because + it had less overhead than the default ``np.asarray`` copy-if-needed + behavior. This is no longer true, and ``np.asarray`` is the preferred function. +* For code that explicitly needs to pass ``None``/``False`` meaning "copy if + needed" in a way that's compatible with NumPy 1.x and 2.x, see + `scipy#20172 `__ for an example + of how to do so. +* For any ``__array__`` method on a non-NumPy array-like object, ``dtype=None`` + and ``copy=None`` keywords must be added to the signature - this will work with older + NumPy versions as well (although older numpy versions will never pass in ``copy`` keyword). + If the keywords are added to the ``__array__`` signature, then for: + + * ``copy=True`` and any ``dtype`` value always return a new copy, + * ``copy=None`` create a copy if required (for example by ``dtype``), + * ``copy=False`` a copy must never be made. If a copy is needed to return a numpy array + or satisfy ``dtype``, then raise an exception (``ValueError``). + +Writing numpy-version-dependent code +------------------------------------ + +It should be fairly rare to have to write code that explicitly branches on the +``numpy`` version - in most cases, code can be rewritten to be compatible with +1.x and 2.0 at the same time. However, if it is necessary, here is a suggested +code pattern to use, using `numpy.lib.NumpyVersion`:: + + # example with AxisError, which is no longer available in + # the main namespace in 2.0, and not available in the + # `exceptions` namespace in <1.25.0 (example uses <2.0.0b1 + # for illustrative purposes): + if np.lib.NumpyVersion(np.__version__) >= '2.0.0b1': + from numpy.exceptions import AxisError + else: + from numpy import AxisError + +This pattern will work correctly including with NumPy release candidates, which +is important during the 2.0.0 release period. diff --git a/doc/source/reference/array_api.rst b/doc/source/reference/array_api.rst index 08bae3fec918..69b51215e555 100644 --- a/doc/source/reference/array_api.rst +++ b/doc/source/reference/array_api.rst @@ -13,12 +13,12 @@ NumPy aims to implement support for the `2023.12 version `__ and future versions of the standard - assuming that those future versions can be upgraded to given NumPy's -`backwards compatibility policy `__. +:ref:`backwards compatibility policy `. For usage guidelines for downstream libraries and end users who want to write code that will work with both NumPy and other array libraries, we refer to the documentation of the array API standard itself and to code and -developer-focused documention in SciPy and scikit-learn. +developer-focused documentation in SciPy and scikit-learn. Note that in order to use standard-complaint code with older NumPy versions (< 2.0), the `array-api-compat @@ -33,8 +33,8 @@ rather than anything NumPy-specific, the `array-api-strict standard, via a separate ``numpy.array_api`` submodule. This module was marked as experimental (it emitted a warning on import) and removed in NumPy 2.0 because full support was included in the main namespace. - `NEP 47 `__ and - `NEP 56 `__ + :ref:`NEP 47 ` and + :ref:`NEP 56 ` describe the motivation and scope for implementing the array API standard in NumPy. @@ -57,8 +57,23 @@ an entry point. .. rubric:: Footnotes .. [1] With a few very minor exceptions, as documented in - `NEP 56 `__. + :ref:`NEP 56 `. The ``sum``, ``prod`` and ``trace`` behavior adheres to the 2023.12 version instead, as do function signatures; the only known incompatibility that may remain is that the standard forbids unsafe casts for in-place operators while NumPy supports those. + +Inspection +========== + +NumPy implements the `array API inspection utilities +`__. +These functions can be accessed via the ``__array_namespace_info__()`` +function, which returns a namespace containing the inspection utilities. + +.. currentmodule:: numpy + +.. autosummary:: + :toctree: generated + + __array_namespace_info__ diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst index 04bced806587..3b2d0c4b2a02 100644 --- a/doc/source/reference/arrays.classes.rst +++ b/doc/source/reference/arrays.classes.rst @@ -409,24 +409,27 @@ alias for "matrix "in NumPy. Example 1: Matrix creation from a string ->>> a = np.asmatrix('1 2 3; 4 5 3') ->>> print((a*a.T).I) + >>> import numpy as np + >>> a = np.asmatrix('1 2 3; 4 5 3') + >>> print((a*a.T).I) [[ 0.29239766 -0.13450292] [-0.13450292 0.08187135]] -Example 2: Matrix creation from nested sequence +Example 2: Matrix creation from a nested sequence ->>> np.asmatrix([[1,5,10],[1.0,3,4j]]) -matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], - [ 1.+0.j, 3.+0.j, 0.+4.j]]) + >>> import numpy as np + >>> np.asmatrix([[1,5,10],[1.0,3,4j]]) + matrix([[ 1.+0.j, 5.+0.j, 10.+0.j], + [ 1.+0.j, 3.+0.j, 0.+4.j]]) Example 3: Matrix creation from an array ->>> np.asmatrix(np.random.rand(3,3)).T -matrix([[4.17022005e-01, 3.02332573e-01, 1.86260211e-01], - [7.20324493e-01, 1.46755891e-01, 3.45560727e-01], - [1.14374817e-04, 9.23385948e-02, 3.96767474e-01]]) + >>> import numpy as np + >>> np.asmatrix(np.random.rand(3,3)).T + matrix([[4.17022005e-01, 3.02332573e-01, 1.86260211e-01], + [7.20324493e-01, 1.46755891e-01, 3.45560727e-01], + [1.14374817e-04, 9.23385948e-02, 3.96767474e-01]]) Memory-mapped file arrays @@ -458,16 +461,20 @@ array actually get written to disk. Example: ->>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) ->>> a[10] = 10.0 ->>> a[30] = 30.0 ->>> del a ->>> b = np.fromfile('newfile.dat', dtype=float) ->>> print(b[10], b[30]) -10.0 30.0 ->>> a = np.memmap('newfile.dat', dtype=float) ->>> print(a[10], a[30]) -10.0 30.0 + >>> import numpy as np + + >>> a = np.memmap('newfile.dat', dtype=float, mode='w+', shape=1000) + >>> a[10] = 10.0 + >>> a[30] = 30.0 + >>> del a + + >>> b = np.fromfile('newfile.dat', dtype=float) + >>> print(b[10], b[30]) + 10.0 30.0 + + >>> a = np.memmap('newfile.dat', dtype=float) + >>> print(a[10], a[30]) + 10.0 30.0 Character arrays (:mod:`numpy.char`) @@ -602,15 +609,16 @@ This default iterator selects a sub-array of dimension :math:`N-1` from the array. This can be a useful construct for defining recursive algorithms. To loop over the entire array requires :math:`N` for-loops. ->>> a = np.arange(24).reshape(3,2,4)+10 ->>> for val in a: -... print('item:', val) -item: [[10 11 12 13] - [14 15 16 17]] -item: [[18 19 20 21] - [22 23 24 25]] -item: [[26 27 28 29] - [30 31 32 33]] + >>> import numpy as np + >>> a = np.arange(24).reshape(3,2,4) + 10 + >>> for val in a: + ... print('item:', val) + item: [[10 11 12 13] + [14 15 16 17]] + item: [[18 19 20 21] + [22 23 24 25]] + item: [[26 27 28 29] + [30 31 32 33]] Flat iteration @@ -625,13 +633,15 @@ As mentioned previously, the flat attribute of ndarray objects returns an iterator that will cycle over the entire array in C-style contiguous order. ->>> for i, val in enumerate(a.flat): -... if i%5 == 0: print(i, val) -0 10 -5 15 -10 20 -15 25 -20 30 + >>> import numpy as np + >>> a = np.arange(24).reshape(3,2,4) + 10 + >>> for i, val in enumerate(a.flat): + ... if i%5 == 0: print(i, val) + 0 10 + 5 15 + 10 20 + 15 25 + 20 30 Here, I've used the built-in enumerate iterator to return the iterator index as well as the value. @@ -648,12 +658,13 @@ N-dimensional enumeration Sometimes it may be useful to get the N-dimensional index while iterating. The ndenumerate iterator can achieve this. ->>> for i, val in np.ndenumerate(a): -... if sum(i)%5 == 0: print(i, val) -(0, 0, 0) 10 -(1, 1, 3) 25 -(2, 0, 3) 29 -(2, 1, 2) 32 + >>> import numpy as np + >>> for i, val in np.ndenumerate(a): + ... if sum(i)%5 == 0: print(i, val) + (0, 0, 0) 10 + (1, 1, 3) 25 + (2, 0, 3) 29 + (2, 1, 2) 32 Iterator for broadcasting @@ -670,9 +681,10 @@ objects as inputs and returns an iterator that returns tuples providing each of the input sequence elements in the broadcasted result. ->>> for val in np.broadcast([[1, 0], [2, 3]], [0, 1]): -... print(val) -(np.int64(1), np.int64(0)) -(np.int64(0), np.int64(1)) -(np.int64(2), np.int64(0)) -(np.int64(3), np.int64(1)) + >>> import numpy as np + >>> for val in np.broadcast([[1, 0], [2, 3]], [0, 1]): + ... print(val) + (np.int64(1), np.int64(0)) + (np.int64(0), np.int64(1)) + (np.int64(2), np.int64(0)) + (np.int64(3), np.int64(1)) diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst index 8a7b648281ba..a63fbdc6a910 100644 --- a/doc/source/reference/arrays.datetime.rst +++ b/doc/source/reference/arrays.datetime.rst @@ -61,6 +61,8 @@ letters, for a "Not A Time" value. A simple ISO date: + >>> import numpy as np + >>> np.datetime64('2005-02-25') np.datetime64('2005-02-25') @@ -95,6 +97,8 @@ datetime type with generic units. .. admonition:: Example + >>> import numpy as np + >>> np.array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64') array(['2007-07-13', '2006-01-13', '2010-08-13'], dtype='datetime64[D]') @@ -107,6 +111,8 @@ POSIX timestamps with the given unit. .. admonition:: Example + >>> import numpy as np + >>> np.array([0, 1577836800], dtype='datetime64[s]') array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'], dtype='datetime64[s]') @@ -122,6 +128,8 @@ example :func:`arange` can be used to generate ranges of dates. All the dates for one month: + >>> import numpy as np + >>> np.arange('2005-02', '2005-03', dtype='datetime64[D]') array(['2005-02-01', '2005-02-02', '2005-02-03', '2005-02-04', '2005-02-05', '2005-02-06', '2005-02-07', '2005-02-08', @@ -140,6 +148,8 @@ because the moment of time is still being represented exactly. .. admonition:: Example + >>> import numpy as np + >>> np.datetime64('2005') == np.datetime64('2005-01-01') True @@ -167,6 +177,8 @@ data type also accepts the string "NAT" in place of the number for a "Not A Time .. admonition:: Example + >>> import numpy as np + >>> np.timedelta64(1, 'D') np.timedelta64(1,'D') @@ -181,6 +193,8 @@ simple datetime calculations. .. admonition:: Example + >>> import numpy as np + >>> np.datetime64('2009-01-01') - np.datetime64('2008-01-01') np.timedelta64(366,'D') @@ -205,11 +219,17 @@ simple datetime calculations. There are two Timedelta units ('Y', years and 'M', months) which are treated specially, because how much time they represent changes depending on when they are used. While a timedelta day unit is equivalent to -24 hours, there is no way to convert a month unit into days, because -different months have different numbers of days. +24 hours, month and year units cannot be converted directly into days +without using 'unsafe' casting. + +The `numpy.ndarray.astype` method can be used for unsafe +conversion of months/years to days. The conversion follows +calculating the averaged values from the 400 year leap-year cycle. .. admonition:: Example + >>> import numpy as np + >>> a = np.timedelta64(1, 'Y') >>> np.timedelta64(a, 'M') @@ -220,6 +240,7 @@ different months have different numbers of days. File "", line 1, in TypeError: Cannot cast NumPy timedelta64 scalar from metadata [Y] to [D] according to the rule 'same_kind' + Datetime units ============== @@ -288,6 +309,8 @@ specified in business days to datetimes with a unit of 'D' (day). .. admonition:: Example + >>> import numpy as np + >>> np.busday_offset('2011-06-23', 1) np.datetime64('2011-06-24') @@ -302,6 +325,8 @@ The rules most typically used are 'forward' and 'backward'. .. admonition:: Example + >>> import numpy as np + >>> np.busday_offset('2011-06-25', 2) Traceback (most recent call last): File "", line 1, in @@ -326,6 +351,8 @@ is necessary to get a desired answer. The first business day on or after a date: + >>> import numpy as np + >>> np.busday_offset('2011-03-20', 0, roll='forward') np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') @@ -345,6 +372,8 @@ weekmask. .. admonition:: Example + >>> import numpy as np + >>> np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') np.datetime64('2012-05-13') @@ -359,6 +388,8 @@ To test a `datetime64` value to see if it is a valid day, use :func:`is_busday`. .. admonition:: Example + >>> import numpy as np + >>> np.is_busday(np.datetime64('2011-07-15')) # a Friday True >>> np.is_busday(np.datetime64('2011-07-16')) # a Saturday @@ -376,6 +407,8 @@ dates, use :func:`busday_count`: .. admonition:: Example + >>> import numpy as np + >>> np.busday_count(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')) 5 >>> np.busday_count(np.datetime64('2011-07-18'), np.datetime64('2011-07-11')) @@ -386,6 +419,8 @@ how many of them are valid dates, you can do this: .. admonition:: Example + >>> import numpy as np + >>> a = np.arange(np.datetime64('2011-07-11'), np.datetime64('2011-07-18')) >>> np.count_nonzero(np.is_busday(a)) 5 @@ -433,6 +468,8 @@ given below. 23:59:60.450 UTC" is a valid timestamp which is not parseable by `datetime64`: + >>> import numpy as np + >>> np.datetime64("2016-12-31 23:59:60.450") Traceback (most recent call last): File "", line 1, in @@ -446,14 +483,16 @@ given below. Compute the number of SI seconds between "2021-01-01 12:56:23.423 UTC" and "2001-01-01 00:00:00.000 UTC": + >>> import numpy as np + >>> ( ... np.datetime64("2021-01-01 12:56:23.423") ... - np.datetime64("2001-01-01") ... ) / np.timedelta64(1, "s") 631198583.423 - however correct answer is `631198588.423` SI seconds because there were 5 - leap seconds between 2001 and 2021. + However, the correct answer is `631198588.423` SI seconds, because there were + 5 leap seconds between 2001 and 2021. - Timedelta64 computations for dates in the past do not return SI seconds, as one would expect. @@ -464,16 +503,19 @@ given below. where UT is `universal time `_: + + >>> import numpy as np + >>> a = np.datetime64("0000-01-01", "us") >>> b = np.datetime64("1600-01-01", "us") >>> b - a numpy.timedelta64(50491123200000000,'us') - The computed results, `50491123200` seconds, is obtained as the elapsed - number of days (`584388`) times `86400` seconds; this is the number of - seconds of a clock in sync with earth rotation. The exact value in SI - seconds can only be estimated, e.g using data published in `Measurement of - the Earth's rotation: 720 BC to AD 2015, 2016, Royal Society's Proceedings - A 472, by Stephenson et.al. `_. A - sensible estimate is `50491112870 ± 90` seconds, with a difference of 10330 - seconds. + The computed results, `50491123200` seconds, are obtained as the elapsed + number of days (`584388`) times `86400` seconds; this is the number of + seconds of a clock in sync with the Earth's rotation. The exact value in SI + seconds can only be estimated, e.g., using data published in `Measurement of + the Earth's rotation: 720 BC to AD 2015, 2016, Royal Society's Proceedings + A 472, by Stephenson et.al. `_. A + sensible estimate is `50491112870 ± 90` seconds, with a difference of 10330 + seconds. diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index b2a6f5ab8a2d..8aa7170df065 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -68,15 +68,17 @@ Sub-arrays always have a C-contiguous memory layout. A simple data type containing a 32-bit big-endian integer: (see :ref:`arrays.dtypes.constructing` for details on construction) - >>> dt = np.dtype('>i4') - >>> dt.byteorder - '>' - >>> dt.itemsize - 4 - >>> dt.name - 'int32' - >>> dt.type is np.int32 - True + >>> import numpy as np + + >>> dt = np.dtype('>i4') + >>> dt.byteorder + '>' + >>> dt.itemsize + 4 + >>> dt.name + 'int32' + >>> dt.type is np.int32 + True The corresponding array scalar type is :class:`int32`. @@ -85,24 +87,28 @@ Sub-arrays always have a C-contiguous memory layout. A structured data type containing a 16-character string (in field 'name') and a sub-array of two 64-bit floating-point number (in field 'grades'): - >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) - >>> dt['name'] - dtype('>> dt['grades'] - dtype(('>> import numpy as np + + >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) + >>> dt['name'] + dtype('>> dt['grades'] + dtype(('` type that also has two fields: - >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) - >>> x[1] - ('John', [6., 7.]) - >>> x[1]['grades'] - array([6., 7.]) - >>> type(x[1]) - - >>> type(x[1]['grades']) - + >>> import numpy as np + + >>> x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt) + >>> x[1] + ('John', [6., 7.]) + >>> x[1]['grades'] + array([6., 7.]) + >>> type(x[1]) + + >>> type(x[1]['grades']) + .. _arrays.dtypes.constructing: @@ -148,8 +154,10 @@ Array-scalar types .. admonition:: Example - >>> dt = np.dtype(np.int32) # 32-bit integer - >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number + >>> import numpy as np + + >>> dt = np.dtype(np.int32) # 32-bit integer + >>> dt = np.dtype(np.complex128) # 128-bit complex floating-point number Generic types The generic hierarchical type objects convert to corresponding @@ -191,9 +199,11 @@ Built-in Python types .. admonition:: Example - >>> dt = np.dtype(float) # Python-compatible floating-point number - >>> dt = np.dtype(int) # Python-compatible integer - >>> dt = np.dtype(object) # Python object + >>> import numpy as np + + >>> dt = np.dtype(float) # Python-compatible floating-point number + >>> dt = np.dtype(int) # Python-compatible integer + >>> dt = np.dtype(object) # Python object .. note:: @@ -219,10 +229,12 @@ One-character strings .. admonition:: Example - >>> dt = np.dtype('b') # byte, native byte order - >>> dt = np.dtype('>H') # big-endian unsigned short - >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number + >>> import numpy as np + + >>> dt = np.dtype('b') # byte, native byte order + >>> dt = np.dtype('>H') # big-endian unsigned short + >>> dt = np.dtype('>> dt = np.dtype('d') # double-precision floating-point number Array-protocol type strings (see :ref:`arrays.interface`) The first character specifies the kind of data and the remaining @@ -249,11 +261,13 @@ Array-protocol type strings (see :ref:`arrays.interface`) .. admonition:: Example - >>> dt = np.dtype('i4') # 32-bit signed integer - >>> dt = np.dtype('f8') # 64-bit floating-point number - >>> dt = np.dtype('c16') # 128-bit complex floating-point number - >>> dt = np.dtype('S25') # 25-length zero-terminated bytes - >>> dt = np.dtype('U25') # 25-character string + >>> import numpy as np + + >>> dt = np.dtype('i4') # 32-bit signed integer + >>> dt = np.dtype('f8') # 64-bit floating-point number + >>> dt = np.dtype('c16') # 128-bit complex floating-point number + >>> dt = np.dtype('S25') # 25-length zero-terminated bytes + >>> dt = np.dtype('U25') # 25-character string .. _string-dtype-note: @@ -285,7 +299,8 @@ String with comma-separated fields of 64-bit floating-point numbers - field named ``f2`` containing a 32-bit floating-point number - >>> dt = np.dtype("i4, (2,3)f8, f4") + >>> import numpy as np + >>> dt = np.dtype("i4, (2,3)f8, f4") - field named ``f0`` containing a 3-character string - field named ``f1`` containing a sub-array of shape (3,) @@ -293,15 +308,18 @@ String with comma-separated fields - field named ``f2`` containing a 3 x 4 sub-array containing 10-character strings - >>> dt = np.dtype("S3, 3u8, (3,4)S10") + >>> import numpy as np + >>> dt = np.dtype("S3, 3u8, (3,4)S10") Type strings Any string name of a NumPy dtype, e.g.: .. admonition:: Example - >>> dt = np.dtype('uint32') # 32-bit unsigned integer - >>> dt = np.dtype('float64') # 64-bit floating-point number + >>> import numpy as np + + >>> dt = np.dtype('uint32') # 32-bit unsigned integer + >>> dt = np.dtype('float64') # 64-bit floating-point number .. index:: triple: dtype; construction; from tuple @@ -313,8 +331,10 @@ Type strings .. admonition:: Example - >>> dt = np.dtype((np.void, 10)) # 10-byte wide data block - >>> dt = np.dtype(('U', 10)) # 10-character unicode string + >>> import numpy as np + + >>> dt = np.dtype((np.void, 10)) # 10-byte wide data block + >>> dt = np.dtype(('U', 10)) # 10-character unicode string ``(fixed_dtype, shape)`` .. index:: @@ -330,8 +350,10 @@ Type strings .. admonition:: Example - >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array - >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 structured sub-array + >>> import numpy as np + + >>> dt = np.dtype((np.int32, (2,2))) # 2 x 2 integer sub-array + >>> dt = np.dtype(('i4, (2,3)f8, f4', (2,3))) # 2 x 3 structured sub-array .. index:: triple: dtype; construction; from list @@ -362,15 +384,17 @@ Type strings .. admonition:: Example - Data-type with fields ``big`` (big-endian 32-bit integer) and - ``little`` (little-endian 32-bit integer): + Data-type with fields ``big`` (big-endian 32-bit integer) and + ``little`` (little-endian 32-bit integer): - >>> dt = np.dtype([('big', '>i4'), ('little', '>> import numpy as np - Data-type with fields ``R``, ``G``, ``B``, ``A``, each being an - unsigned 8-bit integer: + >>> dt = np.dtype([('big', '>i4'), ('little', '>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) + Data-type with fields ``R``, ``G``, ``B``, ``A``, each being an + unsigned 8-bit integer: + + >>> dt = np.dtype([('R','u1'), ('G','u1'), ('B','u1'), ('A','u1')]) .. index:: triple: dtype; construction; from dict @@ -401,19 +425,21 @@ Type strings .. admonition:: Example - Data type with fields ``r``, ``g``, ``b``, ``a``, each being - an 8-bit unsigned integer: + Data type with fields ``r``, ``g``, ``b``, ``a``, each being + an 8-bit unsigned integer: + + >>> import numpy as np - >>> dt = np.dtype({'names': ['r','g','b','a'], - ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) + >>> dt = np.dtype({'names': ['r','g','b','a'], + ... 'formats': [np.uint8, np.uint8, np.uint8, np.uint8]}) - Data type with fields ``r`` and ``b`` (with the given titles), - both being 8-bit unsigned integers, the first at byte position - 0 from the start of the field and the second at position 2: + Data type with fields ``r`` and ``b`` (with the given titles), + both being 8-bit unsigned integers, the first at byte position + 0 from the start of the field and the second at position 2: - >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], - ... 'offsets': [0, 2], - ... 'titles': ['Red pixel', 'Blue pixel']}) + >>> dt = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'], + ... 'offsets': [0, 2], + ... 'titles': ['Red pixel', 'Blue pixel']}) ``{'field1': ..., 'field2': ..., ...}`` @@ -430,12 +456,14 @@ Type strings .. admonition:: Example - Data type containing field ``col1`` (10-character string at - byte position 0), ``col2`` (32-bit float at byte position 10), - and ``col3`` (integers at byte position 14): + Data type containing field ``col1`` (10-character string at + byte position 0), ``col2`` (32-bit float at byte position 10), + and ``col3`` (integers at byte position 14): - >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), - ... 'col3': (int, 14)}) + >>> import numpy as np + + >>> dt = np.dtype({'col1': ('U10', 0), 'col2': (np.float32, 10), + ... 'col3': (int, 14)}) ``(base_dtype, new_dtype)`` In NumPy 1.7 and later, this form allows `base_dtype` to be interpreted as @@ -453,20 +481,22 @@ Type strings .. admonition:: Example - 32-bit integer, whose first two bytes are interpreted as an integer - via field ``real``, and the following two bytes via field ``imag``. + 32-bit integer, whose first two bytes are interpreted as an integer + via field ``real``, and the following two bytes via field ``imag``. + + >>> import numpy as np - >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) + >>> dt = np.dtype((np.int32,{'real':(np.int16, 0),'imag':(np.int16, 2)})) - 32-bit integer, which is interpreted as consisting of a sub-array - of shape ``(4,)`` containing 8-bit integers: + 32-bit integer, which is interpreted as consisting of a sub-array + of shape ``(4,)`` containing 8-bit integers: - >>> dt = np.dtype((np.int32, (np.int8, 4))) + >>> dt = np.dtype((np.int32, (np.int8, 4))) - 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that - interpret the 4 bytes in the integer as four unsigned integers: + 32-bit integer, containing fields ``r``, ``g``, ``b``, ``a`` that + interpret the 4 bytes in the integer as four unsigned integers: - >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) + >>> dt = np.dtype(('i4', [('r','u1'),('g','u1'),('b','u1'),('a','u1')])) Checking the data type @@ -475,11 +505,13 @@ When checking for a specific data type, use ``==`` comparison. .. admonition:: Example - >>> a = np.array([1, 2], dtype=np.float32) - >>> a.dtype == np.float32 - True + >>> import numpy as np -As opposed to python types, a comparison using ``is`` should not be used. + >>> a = np.array([1, 2], dtype=np.float32) + >>> a.dtype == np.float32 + True + +As opposed to Python types, a comparison using ``is`` should not be used. First, NumPy treats data type specifications (everything that can be passed to the :class:`dtype` constructor) as equivalent to the data type object itself. @@ -487,31 +519,35 @@ This equivalence can only be handled through ``==``, not through ``is``. .. admonition:: Example - A :class:`dtype` object is equal to all data type specifications that are - equivalent to it. - - >>> a = np.array([1, 2], dtype=float) - >>> a.dtype == np.dtype(np.float64) - True - >>> a.dtype == np.float64 - True - >>> a.dtype == float - True - >>> a.dtype == "float64" - True - >>> a.dtype == "d" - True + A :class:`dtype` object is equal to all data type specifications that are + equivalent to it. + + >>> import numpy as np + + >>> a = np.array([1, 2], dtype=float) + >>> a.dtype == np.dtype(np.float64) + True + >>> a.dtype == np.float64 + True + >>> a.dtype == float + True + >>> a.dtype == "float64" + True + >>> a.dtype == "d" + True Second, there is no guarantee that data type objects are singletons. .. admonition:: Example - Do not use ``is`` because data type objects may or may not be singletons. + Do not use ``is`` because data type objects may or may not be singletons. + + >>> import numpy as np - >>> np.dtype(float) is np.dtype(float) - True - >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) - False + >>> np.dtype(float) is np.dtype(float) + True + >>> np.dtype([('a', float)]) is np.dtype([('a', float)]) + False :class:`dtype` ============== diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 5429a272569d..d03ebde361a2 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -32,35 +32,37 @@ objects implementing the :class:`memoryview` or :ref:`array .. admonition:: Example - A 2-dimensional array of size 2 x 3, composed of 4-byte integer - elements: + A 2-dimensional array of size 2 x 3, composed of 4-byte integer + elements: - >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) - >>> type(x) - - >>> x.shape - (2, 3) - >>> x.dtype - dtype('int32') + >>> import numpy as np - The array can be indexed using Python container-like syntax: + >>> x = np.array([[1, 2, 3], [4, 5, 6]], np.int32) + >>> type(x) + + >>> x.shape + (2, 3) + >>> x.dtype + dtype('int32') - >>> # The element of x in the *second* row, *third* column, namely, 6. - >>> x[1, 2] - 6 + The array can be indexed using Python container-like syntax: - For example :ref:`slicing ` can produce views of - the array: + >>> # The element of x in the *second* row, *third* column, namely, 6. + >>> x[1, 2] + 6 - >>> y = x[:,1] - >>> y - array([2, 5], dtype=int32) - >>> y[0] = 9 # this also changes the corresponding element in x - >>> y - array([9, 5], dtype=int32) - >>> x - array([[1, 9, 3], - [4, 5, 6]], dtype=int32) + For example :ref:`slicing ` can produce views of + the array: + + >>> y = x[:,1] + >>> y + array([2, 5], dtype=int32) + >>> y[0] = 9 # this also changes the corresponding element in x + >>> y + array([9, 5], dtype=int32) + >>> x + array([[1, 9, 3], + [4, 5, 6]], dtype=int32) Constructing arrays @@ -360,36 +362,38 @@ Many of these methods take an argument named *axis*. In such cases, .. admonition:: Example of the *axis* argument - A 3-dimensional array of size 3 x 3 x 3, summed over each of its - three axes - - >>> x = np.arange(27).reshape((3,3,3)) - >>> x - array([[[ 0, 1, 2], - [ 3, 4, 5], - [ 6, 7, 8]], - [[ 9, 10, 11], - [12, 13, 14], - [15, 16, 17]], - [[18, 19, 20], - [21, 22, 23], - [24, 25, 26]]]) - >>> x.sum(axis=0) - array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]) - >>> # for sum, axis is the first keyword, so we may omit it, - >>> # specifying only its value - >>> x.sum(0), x.sum(1), x.sum(2) - (array([[27, 30, 33], - [36, 39, 42], - [45, 48, 51]]), - array([[ 9, 12, 15], - [36, 39, 42], - [63, 66, 69]]), - array([[ 3, 12, 21], - [30, 39, 48], - [57, 66, 75]])) + A 3-dimensional array of size 3 x 3 x 3, summed over each of its + three axes: + + >>> import numpy as np + + >>> x = np.arange(27).reshape((3,3,3)) + >>> x + array([[[ 0, 1, 2], + [ 3, 4, 5], + [ 6, 7, 8]], + [[ 9, 10, 11], + [12, 13, 14], + [15, 16, 17]], + [[18, 19, 20], + [21, 22, 23], + [24, 25, 26]]]) + >>> x.sum(axis=0) + array([[27, 30, 33], + [36, 39, 42], + [45, 48, 51]]) + >>> # for sum, axis is the first keyword, so we may omit it, + >>> # specifying only its value + >>> x.sum(0), x.sum(1), x.sum(2) + (array([[27, 30, 33], + [36, 39, 42], + [45, 48, 51]]), + array([[ 9, 12, 15], + [36, 39, 42], + [63, 66, 69]]), + array([[ 3, 12, 21], + [30, 39, 48], + [57, 66, 75]])) The parameter *dtype* specifies the data type over which a reduction operation (like summing) should take place. The default reduce data diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst index d5d19d244e94..3c71a69e0fcd 100644 --- a/doc/source/reference/arrays.nditer.rst +++ b/doc/source/reference/arrays.nditer.rst @@ -32,11 +32,13 @@ using the standard Python iterator interface. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a): - ... print(x, end=' ') - ... - 0 1 2 3 4 5 + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a): + ... print(x, end=' ') + ... + 0 1 2 3 4 5 An important thing to be aware of for this iteration is that the order is chosen to match the memory layout of the array instead of using a @@ -48,16 +50,18 @@ of that transpose in C order. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a.T): - ... print(x, end=' ') - ... - 0 1 2 3 4 5 + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a.T): + ... print(x, end=' ') + ... + 0 1 2 3 4 5 - >>> for x in np.nditer(a.T.copy(order='C')): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 + >>> for x in np.nditer(a.T.copy(order='C')): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 The elements of both `a` and `a.T` get traversed in the same order, namely the order they are stored in memory, whereas the elements of @@ -76,15 +80,17 @@ order='C' for C order and order='F' for Fortran order. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, order='F'): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 - >>> for x in np.nditer(a.T, order='C'): - ... print(x, end=' ') - ... - 0 3 1 4 2 5 + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, order='F'): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 + >>> for x in np.nditer(a.T, order='C'): + ... print(x, end=' ') + ... + 0 3 1 4 2 5 .. _nditer-context-manager: @@ -111,17 +117,19 @@ context is exited. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> a - array([[0, 1, 2], - [3, 4, 5]]) - >>> with np.nditer(a, op_flags=['readwrite']) as it: - ... for x in it: - ... x[...] = 2 * x - ... - >>> a - array([[ 0, 2, 4], - [ 6, 8, 10]]) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> with np.nditer(a, op_flags=['readwrite']) as it: + ... for x in it: + ... x[...] = 2 * x + ... + >>> a + array([[ 0, 2, 4], + [ 6, 8, 10]]) If you are writing code that needs to support older versions of numpy, note that prior to 1.15, :class:`nditer` was not a context manager and @@ -150,16 +158,18 @@ elements each. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, flags=['external_loop']): - ... print(x, end=' ') - ... - [0 1 2 3 4 5] + >>> import numpy as np - >>> for x in np.nditer(a, flags=['external_loop'], order='F'): - ... print(x, end=' ') - ... - [0 3] [1 4] [2 5] + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, flags=['external_loop']): + ... print(x, end=' ') + ... + [0 1 2 3 4 5] + + >>> for x in np.nditer(a, flags=['external_loop'], order='F'): + ... print(x, end=' ') + ... + [0 3] [1 4] [2 5] Tracking an index or multi-index -------------------------------- @@ -176,26 +186,28 @@ progression of the index: .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> it = np.nditer(a, flags=['f_index']) - >>> for x in it: - ... print("%d <%d>" % (x, it.index), end=' ') - ... - 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> - - >>> it = np.nditer(a, flags=['multi_index']) - >>> for x in it: - ... print("%d <%s>" % (x, it.multi_index), end=' ') - ... - 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - - >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: - ... for x in it: - ... x[...] = it.multi_index[1] - it.multi_index[0] - ... - >>> a - array([[ 0, 1, 2], - [-1, 0, 1]]) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> for x in it: + ... print("%d <%d>" % (x, it.index), end=' ') + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> for x in it: + ... print("%d <%s>" % (x, it.multi_index), end=' ') + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... for x in it: + ... x[...] = it.multi_index[1] - it.multi_index[0] + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) Tracking an index or multi-index is incompatible with using an external loop, because it requires a different index value per element. If @@ -204,11 +216,13 @@ raise an exception. .. admonition:: Example - >>> a = np.zeros((2,3)) - >>> it = np.nditer(a, flags=['c_index', 'external_loop']) - Traceback (most recent call last): - File "", line 1, in - ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked + >>> import numpy as np + + >>> a = np.zeros((2,3)) + >>> it = np.nditer(a, flags=['c_index', 'external_loop']) + Traceback (most recent call last): + File "", line 1, in + ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked Alternative looping and element access -------------------------------------- @@ -222,29 +236,31 @@ produce identical results to the ones in the previous section. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> it = np.nditer(a, flags=['f_index']) - >>> while not it.finished: - ... print("%d <%d>" % (it[0], it.index), end=' ') - ... is_not_finished = it.iternext() - ... - 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> - - >>> it = np.nditer(a, flags=['multi_index']) - >>> while not it.finished: - ... print("%d <%s>" % (it[0], it.multi_index), end=' ') - ... is_not_finished = it.iternext() - ... - 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> - - >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: - ... while not it.finished: - ... it[0] = it.multi_index[1] - it.multi_index[0] - ... is_not_finished = it.iternext() - ... - >>> a - array([[ 0, 1, 2], - [-1, 0, 1]]) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> it = np.nditer(a, flags=['f_index']) + >>> while not it.finished: + ... print("%d <%d>" % (it[0], it.index), end=' ') + ... is_not_finished = it.iternext() + ... + 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5> + + >>> it = np.nditer(a, flags=['multi_index']) + >>> while not it.finished: + ... print("%d <%s>" % (it[0], it.multi_index), end=' ') + ... is_not_finished = it.iternext() + ... + 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)> + + >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it: + ... while not it.finished: + ... it[0] = it.multi_index[1] - it.multi_index[0] + ... is_not_finished = it.iternext() + ... + >>> a + array([[ 0, 1, 2], + [-1, 0, 1]]) Buffering the array elements ---------------------------- @@ -263,16 +279,18 @@ is enabled. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - >>> for x in np.nditer(a, flags=['external_loop'], order='F'): - ... print(x, end=' ') - ... - [0 3] [1 4] [2 5] + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) + >>> for x in np.nditer(a, flags=['external_loop'], order='F'): + ... print(x, end=' ') + ... + [0 3] [1 4] [2 5] - >>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'): - ... print(x, end=' ') - ... - [0 3 1 4 2 5] + >>> for x in np.nditer(a, flags=['external_loop','buffered'], order='F'): + ... print(x, end=' ') + ... + [0 3 1 4 2 5] Iterating as a specific data type --------------------------------- @@ -305,13 +323,15 @@ data type doesn't match precisely. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - 3 - >>> for x in np.nditer(a, op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand required copying or buffering, but neither copying nor buffering was enabled + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) - 3 + >>> for x in np.nditer(a, op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand required copying or buffering, but neither copying nor buffering was enabled In copying mode, 'copy' is specified as a per-operand flag. This is done to provide control in a per-operand fashion. Buffering mode is @@ -319,17 +339,19 @@ specified as an iterator flag. .. admonition:: Example - >>> a = np.arange(6).reshape(2,3) - 3 - >>> for x in np.nditer(a, op_flags=['readonly','copy'], - ... op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + >>> import numpy as np + + >>> a = np.arange(6).reshape(2,3) - 3 + >>> for x in np.nditer(a, op_flags=['readonly','copy'], + ... op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']): - ... print(np.sqrt(x), end=' ') - ... - 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['complex128']): + ... print(np.sqrt(x), end=' ') + ... + 1.7320508075688772j 1.4142135623730951j 1j 0j (1+0j) (1.4142135623730951+0j) The iterator uses NumPy's casting rules to determine whether a specific @@ -342,26 +364,28 @@ complex to float. .. admonition:: Example - >>> a = np.arange(6.) - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']): - ... print(x, end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('float32') according to the rule 'safe' - - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'], - ... casting='same_kind'): - ... print(x, end=' ') - ... - 0.0 1.0 2.0 3.0 4.0 5.0 - - >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'): - ... print(x, end=' ') - ... - Traceback (most recent call last): - File "", line 1, in - TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('int32') according to the rule 'same_kind' + >>> import numpy as np + + >>> a = np.arange(6.) + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32']): + ... print(x, end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('float32') according to the rule 'safe' + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['float32'], + ... casting='same_kind'): + ... print(x, end=' ') + ... + 0.0 1.0 2.0 3.0 4.0 5.0 + + >>> for x in np.nditer(a, flags=['buffered'], op_dtypes=['int32'], casting='same_kind'): + ... print(x, end=' ') + ... + Traceback (most recent call last): + File "", line 1, in + TypeError: Iterator operand 0 dtype could not be cast from dtype('float64') to dtype('int32') according to the rule 'same_kind' One thing to watch out for is conversions back to the original data type when using a read-write or write-only operand. A common case is @@ -373,14 +397,16 @@ would violate the casting rule. .. admonition:: Example - >>> a = np.arange(6) - >>> for x in np.nditer(a, flags=['buffered'], op_flags=['readwrite'], - ... op_dtypes=['float64'], casting='same_kind'): - ... x[...] = x / 2.0 - ... - Traceback (most recent call last): - File "", line 2, in - TypeError: Iterator requested dtype could not be cast from dtype('float64') to dtype('int64'), the operand 0 dtype, according to the rule 'same_kind' + >>> import numpy as np + + >>> a = np.arange(6) + >>> for x in np.nditer(a, flags=['buffered'], op_flags=['readwrite'], + ... op_dtypes=['float64'], casting='same_kind'): + ... x[...] = x / 2.0 + ... + Traceback (most recent call last): + File "", line 2, in + TypeError: Iterator requested dtype could not be cast from dtype('float64') to dtype('int64'), the operand 0 dtype, according to the rule 'same_kind' Broadcasting array iteration ============================ @@ -396,26 +422,30 @@ a two dimensional array together. .. admonition:: Example - >>> a = np.arange(3) - >>> b = np.arange(6).reshape(2,3) - >>> for x, y in np.nditer([a,b]): - ... print("%d:%d" % (x,y), end=' ') - ... - 0:0 1:1 2:2 0:3 1:4 2:5 + >>> import numpy as np + + >>> a = np.arange(3) + >>> b = np.arange(6).reshape(2,3) + >>> for x, y in np.nditer([a,b]): + ... print("%d:%d" % (x,y), end=' ') + ... + 0:0 1:1 2:2 0:3 1:4 2:5 When a broadcasting error occurs, the iterator raises an exception which includes the input shapes to help diagnose the problem. .. admonition:: Example - >>> a = np.arange(2) - >>> b = np.arange(6).reshape(2,3) - >>> for x, y in np.nditer([a,b]): - ... print("%d:%d" % (x,y), end=' ') - ... - Traceback (most recent call last): - ... - ValueError: operands could not be broadcast together with shapes (2,) (2,3) + >>> import numpy as np + + >>> a = np.arange(2) + >>> b = np.arange(6).reshape(2,3) + >>> for x, y in np.nditer([a,b]): + ... print("%d:%d" % (x,y), end=' ') + ... + Traceback (most recent call last): + ... + ValueError: operands could not be broadcast together with shapes (2,) (2,3) Iterator-allocated output arrays -------------------------------- @@ -432,14 +462,16 @@ parameter support. .. admonition:: Example - >>> def square(a): - ... with np.nditer([a, None]) as it: - ... for x, y in it: - ... y[...] = x*x - ... return it.operands[1] - ... - >>> square([1,2,3]) - array([1, 4, 9]) + >>> import numpy as np + + >>> def square(a): + ... with np.nditer([a, None]) as it: + ... for x, y in it: + ... y[...] = x*x + ... return it.operands[1] + ... + >>> square([1,2,3]) + array([1, 4, 9]) By default, the :class:`nditer` uses the flags 'allocate' and 'writeonly' for operands that are passed in as None. This means we were able to provide @@ -469,31 +501,33 @@ reasons. .. admonition:: Example - >>> def square(a, out=None): - ... it = np.nditer([a, out], - ... flags = ['external_loop', 'buffered'], - ... op_flags = [['readonly'], - ... ['writeonly', 'allocate', 'no_broadcast']]) - ... with it: - ... for x, y in it: - ... y[...] = x*x - ... return it.operands[1] - ... - - >>> square([1,2,3]) - array([1, 4, 9]) - - >>> b = np.zeros((3,)) - >>> square([1,2,3], out=b) - array([1., 4., 9.]) - >>> b - array([1., 4., 9.]) - - >>> square(np.arange(6).reshape(2,3), out=b) - Traceback (most recent call last): - ... - ValueError: non-broadcastable output operand with shape (3,) doesn't - match the broadcast shape (2,3) + >>> import numpy as np + + >>> def square(a, out=None): + ... it = np.nditer([a, out], + ... flags = ['external_loop', 'buffered'], + ... op_flags = [['readonly'], + ... ['writeonly', 'allocate', 'no_broadcast']]) + ... with it: + ... for x, y in it: + ... y[...] = x*x + ... return it.operands[1] + ... + + >>> square([1,2,3]) + array([1, 4, 9]) + + >>> b = np.zeros((3,)) + >>> square([1,2,3], out=b) + array([1., 4., 9.]) + >>> b + array([1., 4., 9.]) + + >>> square(np.arange(6).reshape(2,3), out=b) + Traceback (most recent call last): + ... + ValueError: non-broadcastable output operand with shape (3,) doesn't + match the broadcast shape (2,3) Outer product iteration ----------------------- @@ -525,22 +559,24 @@ Everything to do with the outer product is handled by the iterator setup. .. admonition:: Example - >>> a = np.arange(3) - >>> b = np.arange(8).reshape(2,4) - >>> it = np.nditer([a, b, None], flags=['external_loop'], - ... op_axes=[[0, -1, -1], [-1, 0, 1], None]) - >>> with it: - ... for x, y, z in it: - ... z[...] = x*y - ... result = it.operands[2] # same as z - ... - >>> result - array([[[ 0, 0, 0, 0], - [ 0, 0, 0, 0]], - [[ 0, 1, 2, 3], - [ 4, 5, 6, 7]], - [[ 0, 2, 4, 6], - [ 8, 10, 12, 14]]]) + >>> import numpy as np + + >>> a = np.arange(3) + >>> b = np.arange(8).reshape(2,4) + >>> it = np.nditer([a, b, None], flags=['external_loop'], + ... op_axes=[[0, -1, -1], [-1, 0, 1], None]) + >>> with it: + ... for x, y, z in it: + ... z[...] = x*y + ... result = it.operands[2] # same as z + ... + >>> result + array([[[ 0, 0, 0, 0], + [ 0, 0, 0, 0]], + [[ 0, 1, 2, 3], + [ 4, 5, 6, 7]], + [[ 0, 2, 4, 6], + [ 8, 10, 12, 14]]]) Note that once the iterator is closed we can not access :func:`operands ` and must use a reference created inside the context manager. @@ -557,17 +593,19 @@ For a simple example, consider taking the sum of all elements in an array. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> b = np.array(0) - >>> with np.nditer([a, b], flags=['reduce_ok'], - ... op_flags=[['readonly'], ['readwrite']]) as it: - ... for x,y in it: - ... y[...] += x - ... - >>> b - array(276) - >>> np.sum(a) - 276 + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> b = np.array(0) + >>> with np.nditer([a, b], flags=['reduce_ok'], + ... op_flags=[['readonly'], ['readwrite']]) as it: + ... for x,y in it: + ... y[...] += x + ... + >>> b + array(276) + >>> np.sum(a) + 276 Things are a little bit more tricky when combining reduction and allocated operands. Before iteration is started, any reduction operand must be @@ -576,22 +614,24 @@ sums along the last axis of `a`. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> it = np.nditer([a, None], flags=['reduce_ok'], - ... op_flags=[['readonly'], ['readwrite', 'allocate']], - ... op_axes=[None, [0,1,-1]]) - >>> with it: - ... it.operands[1][...] = 0 - ... for x, y in it: - ... y[...] += x - ... result = it.operands[1] - ... - >>> result - array([[ 6, 22, 38], - [54, 70, 86]]) - >>> np.sum(a, axis=2) - array([[ 6, 22, 38], - [54, 70, 86]]) + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> it = np.nditer([a, None], flags=['reduce_ok'], + ... op_flags=[['readonly'], ['readwrite', 'allocate']], + ... op_axes=[None, [0,1,-1]]) + >>> with it: + ... it.operands[1][...] = 0 + ... for x, y in it: + ... y[...] += x + ... result = it.operands[1] + ... + >>> result + array([[ 6, 22, 38], + [54, 70, 86]]) + >>> np.sum(a, axis=2) + array([[ 6, 22, 38], + [54, 70, 86]]) To do buffered reduction requires yet another adjustment during the setup. Normally the iterator construction involves copying the first @@ -610,21 +650,23 @@ buffering. .. admonition:: Example - >>> a = np.arange(24).reshape(2,3,4) - >>> it = np.nditer([a, None], flags=['reduce_ok', - ... 'buffered', 'delay_bufalloc'], - ... op_flags=[['readonly'], ['readwrite', 'allocate']], - ... op_axes=[None, [0,1,-1]]) - >>> with it: - ... it.operands[1][...] = 0 - ... it.reset() - ... for x, y in it: - ... y[...] += x - ... result = it.operands[1] - ... - >>> result - array([[ 6, 22, 38], - [54, 70, 86]]) + >>> import numpy as np + + >>> a = np.arange(24).reshape(2,3,4) + >>> it = np.nditer([a, None], flags=['reduce_ok', + ... 'buffered', 'delay_bufalloc'], + ... op_flags=[['readonly'], ['readwrite', 'allocate']], + ... op_axes=[None, [0,1,-1]]) + >>> with it: + ... it.operands[1][...] = 0 + ... it.reset() + ... for x, y in it: + ... y[...] += x + ... result = it.operands[1] + ... + >>> result + array([[ 6, 22, 38], + [54, 70, 86]]) .. for doctests Include Cython section separately. Those tests are skipped entirely via an diff --git a/doc/source/reference/arrays.promotion.rst b/doc/source/reference/arrays.promotion.rst new file mode 100644 index 000000000000..cd476815f55c --- /dev/null +++ b/doc/source/reference/arrays.promotion.rst @@ -0,0 +1,259 @@ +.. currentmodule:: numpy + +.. _arrays.promotion: + +**************************** +Data type promotion in NumPy +**************************** + +When mixing two different data types, NumPy has to determine the appropriate +dtype for the result of the operation. This step is referred to as *promotion* +or *finding the common dtype*. + +In typical cases, the user does not need to worry about the details of +promotion, since the promotion step usually ensures that the result will +either match or exceed the precision of the input. + +For example, when the inputs are of the same dtype, the dtype of the result +matches the dtype of the inputs: + + >>> np.int8(1) + np.int8(1) + np.int8(2) + +Mixing two different dtypes normally produces a result with the dtype of the +higher precision input: + + >>> np.int8(4) + np.int64(8) # 64 > 8 + np.int64(12) + >>> np.float32(3) + np.float16(3) # 32 > 16 + np.float32(6.0) + +In typical cases, this does not lead to surprises. However, if you work with +non-default dtypes like unsigned integers and low-precision floats, or if you +mix NumPy integers, NumPy floats, and Python scalars, some +details of NumPy promotion rules may be relevant. Note that these detailed +rules do not always match those of other languages [#hist-reasons]_. + +Numerical dtypes come in four "kinds" with a natural hierarchy. + +1. unsigned integers (``uint``) +2. signed integers (``int``) +3. float (``float``) +4. complex (``complex``) + +In addition to kind, NumPy numerical dtypes also have an associated precision, specified +in bits. Together, the kind and precision specify the dtype. For example, a +``uint8`` is an unsigned integer stored using 8 bits. + +The result of an operation will always be of an equal or higher kind of any of +the inputs. Furthermore, the result will always have a precision greater than +or equal to those of the inputs. Already, this can lead to some examples which +may be unexpected: + +1. When mixing floating point numbers and integers, the precision of the + integer may force the result to a higher precision floating point. For + example, the result of an operation involving ``int64`` and ``float16`` + is ``float64``. +2. When mixing unsigned and signed integers with the same precision, the + result will have *higher* precision than either inputs. Additionally, + if one of them has 64bit precision already, no higher precision integer + is available and for example an operation involving ``int64`` and ``uint64`` + gives ``float64``. + +Please see the `Numerical promotion` section and image below for details +on both. + +Detailed behavior of Python scalars +----------------------------------- +Since NumPy 2.0 [#NEP50]_, an important point in our promotion rules is +that although operations involving two NumPy dtypes never lose precision, +operations involving a NumPy dtype and a Python scalar (``int``, ``float``, +or ``complex``) *can* lose precision. For instance, it is probably intuitive +that the result of an operation between a Python integer and a NumPy integer +should be a NumPy integer. However, Python integers have arbitrary precision +whereas all NumPy dtypes have fixed precision, so the arbitrary precision +of Python integers cannot be preserved. + +More generally, NumPy considers the "kind" of Python scalars, but ignores +their precision when determining the result dtype. This is often convenient. +For instance, when working with arrays of a low precision dtype, it is usually +desirable for simple operations with Python scalars to preserve the dtype. + + >>> arr_float32 = np.array([1, 2.5, 2.1], dtype="float32") + >>> arr_float32 + 10.0 # undesirable to promote to float64 + array([11. , 12.5, 12.1], dtype=float32) + >>> arr_int16 = np.array([3, 5, 7], dtype="int16") + >>> arr_int16 + 10 # undesirable to promote to int64 + array([13, 15, 17], dtype=int16) + +In both cases, the result precision is dictated by the NumPy dtype. +Because of this, ``arr_float32 + 3.0`` behaves the same as +``arr_float32 + np.float32(3.0)``, and ``arr_int16 + 10`` behaves as +``arr_int16 + np.int16(10.)``. + +As another example, when mixing NumPy integers with a Python ``float`` +or ``complex``, the result always has type ``float64`` or ``complex128``: + + >> np.int16(1) + 1.0 + np.float64(2.0) + +However, these rules can also lead to surprising behavior when working with +low precision dtypes. + +First, since the Python value is converted to a NumPy one before the operation +can by performed, operations can fail with an error when the result seems +obvious. For instance, ``np.int8(1) + 1000`` cannot continue because ``1000`` +exceeds the maximum value of an ``int8``. When the Python scalar +cannot be coerced to the NumPy dtype, an error is raised: + + >>> np.int8(1) + 1000 + Traceback (most recent call last): + ... + OverflowError: Python integer 1000 out of bounds for int8 + >>> np.int64(1) * 10**100 + Traceback (most recent call last): + ... + OverflowError: Python int too large to convert to C long + >>> np.float32(1) + 1e300 + np.float32(inf) + ... RuntimeWarning: overflow encountered in cast + +Second, since the Python float or integer precision is always ignored, a low +precision NumPy scalar will keep using its lower precision unless explicitly +converted to a higher precision NumPy dtype or Python scalar (e.g. via ``int()``, +``float()``, or ``scalar.item()``). This lower precision may be detrimental to +some calculations or lead to incorrect results, especially in the case of integer +overflows: + + >>> np.int8(100) + 100 # the result exceeds the capacity of int8 + np.int8(-56) + ... RuntimeWarning: overflow encountered in scalar add + +Note that NumPy warns when overflows occur for scalars, but not for arrays; +e.g., ``np.array(100, dtype="uint8") + 100`` will *not* warn. + +Numerical promotion +------------------- + +The following image shows the numerical promotion rules with the kinds +on the vertical axis and the precision on the horizontal axis. + +.. figure:: figures/nep-0050-promotion-no-fonts.svg + :figclass: align-center + +The input dtype with the higher kind determines the kind of the result dtype. +The result dtype has a precision as low as possible without appearing to the +left of either input dtype in the diagram. + +Note the following specific rules and observations: + +1. When a Python ``float`` or ``complex`` interacts with a NumPy integer + the result will be ``float64`` or ``complex128`` (yellow border). + NumPy booleans will also be cast to the default integer.[#default-int] + This is not relevant when additionally NumPy floating point values are + involved. +2. The precision is drawn such that ``float16 < int16 < uint16`` because + large ``uint16`` do not fit ``int16`` and large ``int16`` will lose precision + when stored in a ``float16``. + This pattern however is broken since NumPy always considers ``float64`` + and ``complex128`` to be acceptable promotion results for any integer + value. +3. A special case is that NumPy promotes many combinations of signed and + unsigned integers to ``float64``. A higher kind is used here because no + signed integer dtype is sufficiently precise to hold a ``uint64``. + + +Exceptions to the general promotion rules +----------------------------------------- + +In NumPy promotion refers to what specific functions do with the result and +in some cases, this means that NumPy may deviate from what the `np.result_type` +would give. + +Behavior of ``sum`` and ``prod`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +**``np.sum`` and ``np.prod``:** Will alway return the default integer type +when summing over integer values (or booleans). This is usually an ``int64``. +The reason for this is that integer summations are otherwise very likely +to overflow and give confusing results. +This rule also applies to the underlying ``np.add.reduce`` and +``np.multiply.reduce``. + +Notable behavior with NumPy or Python integer scalars +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +NumPy promotion refers to the result dtype and operation precision, +but the operation will sometimes dictate that result. +Division always returns floating point values and comparison always booleans. + +This leads to what may appear as "exceptions" to the rules: + +* NumPy comparisons with Python integers or mixed precision integers always + return the correct result. The inputs will never be cast in a way which + loses precision. +* Equality comparisons between types which cannot be promoted will be + considered all ``False`` (equality) or all ``True`` (not-equal). +* Unary math functions like ``np.sin`` that always return floating point + values, accept any Python integer input by converting it to ``float64``. +* Division always returns floating point values and thus also allows divisions + between any NumPy integer with any Python integer value by casting both + to ``float64``. + +In principle, some of these exceptions may make sense for other functions. +Please raise an issue if you feel this is the case. + +Promotion of non-numerical datatypes +------------------------------------ + +NumPy extends the promotion to non-numerical types, although in many cases +promotion is not well defined and simply rejected. + +The following rules apply: + +* NumPy byte strings (``np.bytes_``) can be promoted to unicode strings + (``np.str_``). However, casting the bytes to unicode will fail for + non-ascii characters. +* For some purposes NumPy will promote almost any other datatype to strings. + This applies to array creation or concatenation. +* The array constructers like ``np.array()`` will use ``object`` dtype when + there is no viable promotion. +* Structured dtypes can promote when their field names and order matches. + In that case all fields are promoted individually. +* NumPy ``timedelta`` can in some cases promote with integers. + +.. note:: + Some of these rules are somewhat surprising, and are being considered for + change in the future. However, any backward-incompatible changes have to + be weighed against the risks of breaking existing code. Please raise an + issue if you have particular ideas about how promotion should work. + +Details of promoted ``dtype`` instances +--------------------------------------- +The above discussion has mainly dealt with the behavior when mixing different +DType classes. +A ``dtype`` instance attached to an array can carry additional information +such as byte-order, metadata, string length, or exact structured dtype layout. + +While the string length or field names of a structured dtype are important, +NumPy considers byte-order, metadata, and the exact layout of a structured +dtype as storage details. +During promotion NumPy does *not* take these storage details into account: +* Byte-order is converted to native byte-order. +* Metadata attached to the dtype may or may not be preserved. +* Resulting structured dtypes will be packed (but aligned if inputs were). + +This behaviors is the best behavior for most programs where storage details +are not relevant to the final results and where the use of incorrect byte-order +could drastically slow down evaluation. + + +.. [#hist-reasons] To a large degree, this may just be for choices made early + on in NumPy's predecessors. For more details, see `NEP 50 `. + +.. [#NEP50] See also :ref:`NEP 50 ` which changed the rules for + NumPy 2.0. Previous versions of NumPy would sometimes return higher + precision results based on the input value of Python scalars. + Further, previous versions of NumPy would typically ignore the higher + precision of NumPy scalars or 0-D arrays for promotion purposes. + +.. [#default-int] The default integer is marked as ``int64`` in the schema + but is ``int32`` on 32bit platforms. However, normal PCs are 64bit. diff --git a/doc/source/reference/arrays.rst b/doc/source/reference/arrays.rst index e8c9bc348f31..2e5869dc5379 100644 --- a/doc/source/reference/arrays.rst +++ b/doc/source/reference/arrays.rst @@ -41,6 +41,7 @@ of also more complicated arrangements of data. arrays.ndarray arrays.scalars arrays.dtypes + arrays.promotion arrays.nditer arrays.classes maskedarray diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 7789a47221b5..c80e3f932377 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -37,9 +37,8 @@ of the flexible itemsize array types (:class:`str_`, **Figure:** Hierarchy of type objects representing the array data types. Not shown are the two integer types :class:`intp` and - :class:`uintp` which just point to the integer type that holds a - pointer for the platform. All the number types can be obtained - using bit-width names as well. + :class:`uintp` which are used for indexing (the same as the + default integer since NumPy 2). .. TODO - use something like this instead of the diagram above, as it generates @@ -190,31 +189,33 @@ Inexact types `format_float_positional` and `format_float_scientific`. This means that variables with equal binary values but whose datatypes are of - different precisions may display differently:: - - >>> f16 = np.float16("0.1") - >>> f32 = np.float32(f16) - >>> f64 = np.float64(f32) - >>> f16 == f32 == f64 - True - >>> f16, f32, f64 - (0.1, 0.099975586, 0.0999755859375) - - Note that none of these floats hold the exact value :math:`\frac{1}{10}`; - ``f16`` prints as ``0.1`` because it is as close to that value as possible, - whereas the other types do not as they have more precision and therefore have - closer values. - - Conversely, floating-point scalars of different precisions which approximate - the same decimal value may compare unequal despite printing identically: - - >>> f16 = np.float16("0.1") - >>> f32 = np.float32("0.1") - >>> f64 = np.float64("0.1") - >>> f16 == f32 == f64 - False - >>> f16, f32, f64 - (0.1, 0.1, 0.1) + different precisions may display differently: + + >>> import numpy as np + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32(f16) + >>> f64 = np.float64(f32) + >>> f16 == f32 == f64 + True + >>> f16, f32, f64 + (0.1, 0.099975586, 0.0999755859375) + + Note that none of these floats hold the exact value :math:`\frac{1}{10}`; + ``f16`` prints as ``0.1`` because it is as close to that value as possible, + whereas the other types do not as they have more precision and therefore have + closer values. + + Conversely, floating-point scalars of different precisions which approximate + the same decimal value may compare unequal despite printing identically: + + >>> f16 = np.float16("0.1") + >>> f32 = np.float32("0.1") + >>> f64 = np.float64("0.1") + >>> f16 == f32 == f64 + False + >>> f16, f32, f64 + (0.1, 0.1, 0.1) Floating-point types ~~~~~~~~~~~~~~~~~~~~ @@ -377,21 +378,29 @@ are also provided. Alias for the signed integer type (one of `numpy.byte`, `numpy.short`, `numpy.intc`, `numpy.int_`, `numpy.long` and `numpy.longlong`) - that is the same size as a pointer. + that is used as a default integer and for indexing. - Compatible with the C ``intptr_t``. + Compatible with the C ``Py_ssize_t``. - :Character code: ``'p'`` + :Character code: ``'n'`` + + .. versionchanged:: 2.0 + Before NumPy 2, this had the same size as a pointer. In practice this + is almost always identical, but the character code ``'p'`` maps to the C + ``intptr_t``. The character code ``'n'`` was added in NumPy 2.0. .. attribute:: uintp - Alias for the unsigned integer type (one of `numpy.ubyte`, `numpy.ushort`, - `numpy.uintc`, `numpy.uint`, `numpy.ulong` and `numpy.ulonglong`) - that is the same size as a pointer. + Alias for the unsigned integer type that is the same size as ``intp``. + + Compatible with the C ``size_t``. - Compatible with the C ``uintptr_t``. + :Character code: ``'N'`` - :Character code: ``'P'`` + .. versionchanged:: 2.0 + Before NumPy 2, this had the same size as a pointer. In practice this + is almost always identical, but the character code ``'P'`` maps to the C + ``uintptr_t``. The character code ``'N'`` was added in NumPy 2.0. .. autoclass:: numpy.float16 diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst index 84549012e95b..e6f26f92cdf5 100644 --- a/doc/source/reference/c-api/array.rst +++ b/doc/source/reference/c-api/array.rst @@ -823,7 +823,7 @@ cannot not be accessed directly. .. c:function:: PyArray_ArrayDescr *PyDataType_SUBARRAY(PyArray_Descr *descr) - Information about a subarray dtype eqivalent to the Python `np.dtype.base` + Information about a subarray dtype equivalent to the Python `np.dtype.base` and `np.dtype.shape`. If this is non- ``NULL``, then this data-type descriptor is a @@ -1240,6 +1240,11 @@ User-defined data types With these two changes, the code should compile and work on both 1.x and 2.x or later. + In the unlikely case that you are heap allocating the dtype struct you + should free it again on NumPy 2, since a copy is made. + The struct is not a valid Python object, so do not use ``Py_DECREF`` + on it. + Register a data-type as a new user-defined data type for arrays. The type must have most of its entries filled in. This is not always checked and errors can produce segfaults. In @@ -1259,6 +1264,13 @@ User-defined data types registered (checked only by the address of the pointer), then return the previously-assigned type-number. + The number of user DTypes known to numpy is stored in + ``NPY_NUMUSERTYPES``, a static global variable that is public in the + C API. Accessing this symbol is inherently *not* thread-safe. If + for some reason you need to use this API in a multithreaded context, + you will need to add your own locking, NumPy does not ensure new + data types can be added in a thread-safe manner. + .. c:function:: int PyArray_RegisterCastFunc( \ PyArray_Descr* descr, int totype, PyArray_VectorUnaryFunc* castfunc) @@ -1682,8 +1694,8 @@ the functions that must be implemented for each slot. .. c:type:: NPY_CASTING (PyArrayMethod_ResolveDescriptors)( \ struct PyArrayMethodObject_tag *method, \ - PyArray_DTypeMeta **dtypes, \ - PyArray_Descr **given_descrs, \ + PyArray_DTypeMeta *const *dtypes, \ + PyArray_Descr *const *given_descrs, \ PyArray_Descr **loop_descrs, \ npy_intp *view_offset) @@ -1802,14 +1814,14 @@ the functions that must be implemented for each slot. "default" value that may differ from the "identity" value normally used. For example: - - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct - identity otherwise as it preserves the sign for ``sum([-0.0])``. - - We use no identity for object, but return the default of ``0`` and - ``1`` for the empty ``sum([], dtype=object)`` and - ``prod([], dtype=object)``. - This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. - - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least - ``INT_MIN`` not a good *default* when there are no items. + - ``0.0`` is the default for ``sum([])``. But ``-0.0`` is the correct + identity otherwise as it preserves the sign for ``sum([-0.0])``. + - We use no identity for object, but return the default of ``0`` and + ``1`` for the empty ``sum([], dtype=object)`` and + ``prod([], dtype=object)``. + This allows ``np.sum(np.array(["a", "b"], dtype=object))`` to work. + - ``-inf`` or ``INT_MIN`` for ``max`` is an identity, but at least + ``INT_MIN`` not a good *default* when there are no items. *initial* is a pointer to the data for the initial value, which should be filled in. Returns -1, 0, or 1 indicating error, no initial value, and the @@ -1857,7 +1869,7 @@ Typedefs for functions that users of the ArrayMethod API can implement are described below. .. c:type:: int (PyArrayMethod_TraverseLoop)( \ - void *traverse_context, PyArray_Descr *descr, char *data, \ + void *traverse_context, const PyArray_Descr *descr, char *data, \ npy_intp size, npy_intp stride, NpyAuxData *auxdata) A traverse loop working on a single array. This is similar to the general @@ -1880,7 +1892,7 @@ described below. passed through in the future (for structured dtypes). .. c:type:: int (PyArrayMethod_GetTraverseLoop)( \ - void *traverse_context, PyArray_Descr *descr, \ + void *traverse_context, const PyArray_Descr *descr, \ int aligned, npy_intp fixed_stride, \ PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, \ NPY_ARRAYMETHOD_FLAGS *flags) @@ -1920,7 +1932,8 @@ with the rest of the ArrayMethod API. attempt a new search for a matching loop/promoter. .. c:type:: int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, \ - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], \ + PyArray_DTypeMeta *const op_dtypes[], \ + PyArray_DTypeMeta *const signature[], \ PyArray_DTypeMeta *new_op_dtypes[]) Type of the promoter function, which must be wrapped into a @@ -3370,13 +3383,13 @@ Data-type descriptors can also be used with the "O&" character in PyArg_ParseTuple processing. -.. c:function:: int Pyarray_DescrAlignConverter( \ +.. c:function:: int PyArray_DescrAlignConverter( \ PyObject* obj, PyArray_Descr** dtype) Like :c:func:`PyArray_DescrConverter` except it aligns C-struct-like objects on word-boundaries as the compiler would. -.. c:function:: int Pyarray_DescrAlignConverter2( \ +.. c:function:: int PyArray_DescrAlignConverter2( \ PyObject* obj, PyArray_Descr** dtype) Like :c:func:`PyArray_DescrConverter2` except it aligns C-struct-like @@ -3386,7 +3399,7 @@ Data Type Promotion and Inspection ---------------------------------- .. c:function:: PyArray_DTypeMeta *PyArray_CommonDType( \ - PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) + const PyArray_DTypeMeta *dtype1, const PyArray_DTypeMeta *dtype2) This function defines the common DType operator. Note that the common DType will not be ``object`` (unless one of the DTypes is ``object``). Similar to @@ -3413,7 +3426,7 @@ Data Type Promotion and Inspection For example promoting ``float16`` with any other float, integer, or unsigned integer again gives a floating point number. -.. c:function:: PyArray_Descr *PyArray_GetDefaultDescr(PyArray_DTypeMeta *DType) +.. c:function:: PyArray_Descr *PyArray_GetDefaultDescr(const PyArray_DTypeMeta *DType) Given a DType class, returns the default instance (descriptor). This checks for a ``singleton`` first and only calls the ``default_descr`` function if @@ -3814,13 +3827,118 @@ Other conversions in the *vals* array. The sequence can be smaller then *maxvals* as the number of converted objects is returned. +.. _including-the-c-api: -Miscellaneous -------------- +Including and importing the C API +--------------------------------- +To use the NumPy C-API you typically need to include the +``numpy/ndarrayobject.h`` header and ``numpy/ufuncobject.h`` for some ufunc +related functionality (``arrayobject.h`` is an alias for ``ndarrayobject.h``). -Importing the API -~~~~~~~~~~~~~~~~~ +These two headers export most relevant functionality. In general any project +which uses the NumPy API must import NumPy using one of the functions +``PyArray_ImportNumPyAPI()`` or ``import_array()``. +In some places, functionality which requires ``import_array()`` is not +needed, because you only need type definitions. In this case, it is +sufficient to include ``numpy/ndarratypes.h``. + +For the typical Python project, multiple C or C++ files will be compiled into +a single shared object (the Python C-module) and ``PyArray_ImportNumPyAPI()`` +should be called inside it's module initialization. + +When you have a single C-file, this will consist of: + +.. code-block:: c + + #include "numpy/ndarrayobject.h" + + PyMODINIT_FUNC PyInit_my_module(void) + { + if (PyArray_ImportNumPyAPI() < 0) { + return NULL; + } + /* Other initialization code. */ + } + +However, most projects will have additional C files which are all +linked together into a single Python module. +In this case, the helper C files typically do not have a canonical place +where ``PyArray_ImportNumPyAPI`` should be called (although it is OK and +fast to call it often). + +To solve this, NumPy provides the following pattern that the the main +file is modified to define ``PY_ARRAY_UNIQUE_SYMBOL`` before the include: + +.. code-block:: c + + /* Main module file */ + #define PY_ARRAY_UNIQUE_SYMBOL MyModule + #include "numpy/ndarrayobject.h" + + PyMODINIT_FUNC PyInit_my_module(void) + { + if (PyArray_ImportNumPyAPI() < 0) { + return NULL; + } + /* Other initialization code. */ + } + +while the other files use: + +.. code-block:: C + + /* Second file without any import */ + #define NO_IMPORT_ARRAY + #define PY_ARRAY_UNIQUE_SYMBOL MyModule + #include "numpy/ndarrayobject.h" + +You can of course add the defines to a local header used throughout. +You just have to make sure that the main file does _not_ define +``NO_IMPORT_ARRAY``. + +For ``numpy/ufuncobject.h`` the same logic applies, but the unique symbol +mechanism is ``#define PY_UFUNC_UNIQUE_SYMBOL`` (both can match). + +Additionally, you will probably wish to add a +``#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION`` +to avoid warnings about possible use of old API. + +.. note:: + If you are experiencing access violations make sure that the NumPy API + was properly imported and the symbol ``PyArray_API`` is not ``NULL``. + When in a debugger, this symbols actual name will be + ``PY_ARRAY_UNIQUE_SYMBOL``+``PyArray_API``, so for example + ``MyModulePyArray_API`` in the above. + (E.g. even a ``printf("%p\n", PyArray_API);`` just before the crash.) + + +Mechanism details and dynamic linking +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The main part of the mechanism is that without NumPy needs to define +a ``void **PyArray_API`` table for you to look up all functions. +Depending on your macro setup, this takes different routes depending on +whether :c:macro:`NO_IMPORT_ARRAY` and :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` +are defined: + +* If neither is defined, the C-API is declared to + ``static void **PyArray_API``, so it is only visible within the + compilation unit/file using ``#include ``. +* If only ``PY_ARRAY_UNIQUE_SYMBOL`` is defined (it could be empty) then + the it is declared to a non-static ``void **`` allowing it to be used + by other files which are linked. +* If ``NO_IMPORT_ARRAY`` is defined, the table is declared as + ``extern void **``, meaning that it must be linked to a file which does not + use ``NO_IMPORT_ARRAY``. + +The ``PY_ARRAY_UNIQUE_SYMBOL`` mechanism additionally mangles the names to +avoid conflicts. + +.. versionchanged:: + NumPy 2.1 changed the headers to avoid sharing the table outside of a + single shared object/dll (this was always the case on Windows). + Please see :c:macro:`NPY_API_SYMBOL_ATTRIBUTE` for details. In order to make use of the C-API from another extension module, the :c:func:`import_array` function must be called. If the extension module is @@ -3844,31 +3962,46 @@ the C-API is needed then some additional steps must be taken. module that will make use of the C-API. It imports the module where the function-pointer table is stored and points the correct variable to it. + This macro includes a ``return NULL;`` on error, so that + ``PyArray_ImportNumPyAPI()`` is preferable for custom error checking. + You may also see use of ``_import_array()`` (a function, not + a macro, but you may want to raise a better error if it fails) and + the variations ``import_array1(ret)`` which customizes the return value. .. c:macro:: PY_ARRAY_UNIQUE_SYMBOL +.. c:macro:: NPY_API_SYMBOL_ATTRIBUTE + + .. versionadded:: 2.1 + + An additional symbol which can be used to share e.g. visibility beyond + shared object boundaries. + By default, NumPy adds the C visibility hidden attribute (if available): + ``void __attribute__((visibility("hidden"))) **PyArray_API;``. + You can change this by defining ``NPY_API_SYMBOL_ATTRIBUTE``, which will + make this: + ``void NPY_API_SYMBOL_ATTRIBUTE **PyArray_API;`` (with additional + name mangling via the unique symbol). + + Adding an empty ``#define NPY_API_SYMBOL_ATTRIBUTE`` will have the same + behavior as NumPy 1.x. + + .. note:: + Windows never had shared visibility although you can use this macro + to achieve it. We generally discourage sharing beyond shared boundary + lines since importing the array API includes NumPy version checks. + .. c:macro:: NO_IMPORT_ARRAY - Using these #defines you can use the C-API in multiple files for a - single extension module. In each file you must define - :c:macro:`PY_ARRAY_UNIQUE_SYMBOL` to some name that will hold the - C-API (*e.g.* myextension_ARRAY_API). This must be done **before** - including the numpy/arrayobject.h file. In the module - initialization routine you call :c:func:`import_array`. In addition, - in the files that do not have the module initialization - sub_routine define :c:macro:`NO_IMPORT_ARRAY` prior to including - numpy/arrayobject.h. - - Suppose I have two files coolmodule.c and coolhelper.c which need - to be compiled and linked into a single extension module. Suppose - coolmodule.c contains the required initcool module initialization - function (with the import_array() function called). Then, - coolmodule.c would have at the top: + Defining ``NO_IMPORT_ARRAY`` before the ``ndarrayobject.h`` include + indicates that the NumPy C API import is handled in a different file + and the include mechanism will not be added here. + You must have one file without ``NO_IMPORT_ARRAY`` defined. .. code-block:: c #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + #include On the other hand, coolhelper.c would contain at the top: @@ -3876,7 +4009,7 @@ the C-API is needed then some additional steps must be taken. #define NO_IMPORT_ARRAY #define PY_ARRAY_UNIQUE_SYMBOL cool_ARRAY_API - #include numpy/arrayobject.h + #include You can also put the common two last lines into an extension-local header file as long as you make sure that NO_IMPORT_ARRAY is @@ -3900,6 +4033,7 @@ the C-API is needed then some additional steps must be taken. defaults to ``PyArray_API``, to whatever the macro is #defined to. + Checking the API Version ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -3955,21 +4089,6 @@ extension with the lowest :c:data:`NPY_FEATURE_VERSION` as possible. :c:data:`NPY_FEATURE_VERSION` changes whenever the API changes (e.g. a function is added). A changed value does not always require a recompile. -Internal Flexibility -~~~~~~~~~~~~~~~~~~~~ - -.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr) - - This function allows you to alter the tp_str and tp_repr methods - of the array object to any Python function. Thus you can alter - what happens for all arrays when str(arr) or repr(arr) is called - from Python. The function to be called is passed in as *op*. If - *repr* is non-zero, then this function will be called in response - to repr(arr), otherwise the function will be called in response to - str(arr). No check on whether or not *op* is callable is - performed. The callable passed in to *op* should expect an array - argument and should return a string to be printed. - Memory management ~~~~~~~~~~~~~~~~~ @@ -3980,8 +4099,8 @@ Memory management .. c:function:: char* PyDataMem_RENEW(void * ptr, size_t newbytes) - Macros to allocate, free, and reallocate memory. These macros are used - internally to create arrays. + Functions to allocate, free, and reallocate memory. These are used + internally to manage array data memory unless overridden. .. c:function:: npy_intp* PyDimMem_NEW(int nd) @@ -4032,15 +4151,12 @@ variables), the GIL should be released so that other Python threads can run while the time-consuming calculations are performed. This can be accomplished using two groups of macros. Typically, if one macro in a group is used in a code block, all of them must be used in the same -code block. Currently, :c:data:`NPY_ALLOW_THREADS` is defined to the -python-defined :c:data:`WITH_THREADS` constant unless the environment -variable ``NPY_NOSMP`` is set in which case -:c:data:`NPY_ALLOW_THREADS` is defined to be 0. +code block. :c:data:`NPY_ALLOW_THREADS` is true (defined as ``1``) unless the +build option ``-Ddisable-threading`` is set to ``true`` - in which case +:c:data:`NPY_ALLOW_THREADS` is false (``0``). .. c:macro:: NPY_ALLOW_THREADS -.. c:macro:: WITH_THREADS - Group 1 ^^^^^^^ diff --git a/doc/source/reference/c-api/coremath.rst b/doc/source/reference/c-api/coremath.rst index ef91ab28e6aa..f8e0efb34d24 100644 --- a/doc/source/reference/c-api/coremath.rst +++ b/doc/source/reference/c-api/coremath.rst @@ -1,7 +1,7 @@ NumPy core math library ======================= -The numpy core math library ('npymath') is a first step in this direction. This +The numpy core math library (``npymath``) is a first step in this direction. This library contains most math-related C99 functionality, which can be used on platforms where C99 is not well supported. The core math functions have the same API as the C99 ones, except for the ``npy_*`` prefix. @@ -304,7 +304,7 @@ Linking against the core math library in an extension ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To use the core math library that NumPy ships as a static library in your own -Python extension, you need to add the npymath compile and link options to your +Python extension, you need to add the ``npymath`` compile and link options to your extension. The exact steps to take will depend on the build system you are using. The generic steps to take are: diff --git a/doc/source/reference/c-api/dtype.rst b/doc/source/reference/c-api/dtype.rst index 1d521e39a832..ce23c51aa9ea 100644 --- a/doc/source/reference/c-api/dtype.rst +++ b/doc/source/reference/c-api/dtype.rst @@ -170,14 +170,26 @@ Enumerated types .. c:enumerator:: NPY_INTP - The enumeration value for a signed integer type which is the same - size as a (void \*) pointer. This is the type used by all + The enumeration value for a signed integer of type ``Py_ssize_t`` + (same as ``ssize_t`` if defined). This is the type used by all arrays of indices. + .. versionchanged:: 2.0 + Previously, this was the same as ``intptr_t`` (same size as a + pointer). In practice, this is identical except on very niche + platforms. + You can use the ``'p'`` character code for the pointer meaning. + .. c:enumerator:: NPY_UINTP - The enumeration value for an unsigned integer type which is the - same size as a (void \*) pointer. + The enumeration value for an unsigned integer type that is identical + to a ``size_t``. + + .. versionchanged:: 2.0 + Previously, this was the same as ``uintptr_t`` (same size as a + pointer). In practice, this is identical except on very niche + platforms. + You can use the ``'P'`` character code for the pointer meaning. .. c:enumerator:: NPY_MASK @@ -287,14 +299,20 @@ all platforms for all the kinds of numeric types. Commonly 8-, 16-, types are available. -Integer that can hold a pointer -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Further integer aliases +~~~~~~~~~~~~~~~~~~~~~~~ -The constants **NPY_INTP** and **NPY_UINTP** refer to an -enumerated integer type that is large enough to hold a pointer on the -platform. Index arrays should always be converted to **NPY_INTP** -, because the dimension of the array is of type npy_intp. +The constants **NPY_INTP** and **NPY_UINTP** refer to an ``Py_ssize_t`` +and ``size_t``. +Although in practice normally true, these types are strictly speaking not +pointer sized and the character codes ``'p'`` and ``'P'`` can be used for +pointer sized integers. +(Before NumPy 2, ``intp`` was pointer size, but this almost never matched +the actual use, which is the reason for the name.) +Since NumPy 2, **NPY_DEFAULT_INT** is additionally defined. +The value of the macro is runtime dependent: Since NumPy 2, it maps to +``NPY_INTP`` while on earlier versions it maps to ``NPY_LONG``. C-type names ------------ @@ -390,7 +408,7 @@ to the front of the integer name. This is the correct integer for lengths or indexing. In practice this is normally the size of a pointer, but this is not guaranteed. - ..note:: + .. note:: Before NumPy 2.0, this was the same as ``Py_intptr_t``. While a better match, this did not match actual usage in practice. On the Python side, we still support ``np.dtype('p')`` to fetch a dtype diff --git a/doc/source/reference/c-api/generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst index 44b16f90eed4..b4750688b5e6 100644 --- a/doc/source/reference/c-api/generalized-ufuncs.rst +++ b/doc/source/reference/c-api/generalized-ufuncs.rst @@ -17,7 +17,7 @@ what the "core" dimensionality of the inputs is, as well as the corresponding dimensionality of the outputs (the element-wise ufuncs have zero core dimensions). The list of the core dimensions for all arguments is called the "signature" of a ufunc. For example, the -ufunc numpy.add has signature ``(),()->()`` defining two scalar inputs +ufunc ``numpy.add`` has signature ``(),()->()`` defining two scalar inputs and one scalar output. Another example is the function ``inner1d(a, b)`` with a signature of @@ -57,10 +57,12 @@ taken when calling such a function. An example would be the function ``euclidean_pdist(a)``, with signature ``(n,d)->(p)``, that given an array of ``n`` ``d``-dimensional vectors, computes all unique pairwise Euclidean distances among them. The output dimension ``p`` must therefore be equal to -``n * (n - 1) / 2``, but it is the caller's responsibility to pass in an -output array of the right size. If the size of a core dimension of an output +``n * (n - 1) / 2``, but by default, it is the caller's responsibility to pass +in an output array of the right size. If the size of a core dimension of an output cannot be determined from a passed in input or output array, an error will be -raised. +raised. This can be changed by defining a ``PyUFunc_ProcessCoreDimsFunc`` function +and assigning it to the ``proces_core_dims_func`` field of the ``PyUFuncObject`` +structure. See below for more details. Note: Prior to NumPy 1.10.0, less strict checks were in place: missing core dimensions were created by prepending 1's to the shape as necessary, core @@ -77,7 +79,7 @@ Elementary Function (e.g. adding two numbers is the most basic operation in adding two arrays). The ufunc applies the elementary function multiple times on different parts of the arrays. The input/output of elementary - functions can be vectors; e.g., the elementary function of inner1d + functions can be vectors; e.g., the elementary function of ``inner1d`` takes two vectors as input. Signature @@ -214,3 +216,116 @@ input/output arrays ``a``, ``b``, ``c``. Furthermore, ``dimensions`` will be ``[N, I, J]`` to define the size of ``N`` of the loop and the sizes ``I`` and ``J`` for the core dimensions ``i`` and ``j``. Finally, ``steps`` will be ``[a_N, b_N, c_N, a_i, a_j, b_i]``, containing all necessary strides. + +Customizing core dimension size processing +------------------------------------------ + +The optional function of type ``PyUFunc_ProcessCoreDimsFunc``, stored +on the ``process_core_dims_func`` attribute of the ufunc, provides the +author of the ufunc a "hook" into the processing of the core dimensions +of the arrays that were passed to the ufunc. The two primary uses of +this "hook" are: + +* Check that constraints on the core dimensions required + by the ufunc are satisfied (and set an exception if they are not). +* Compute output shapes for any output core dimensions that were not + determined by the input arrays. + +As an example of the first use, consider the generalized ufunc ``minmax`` +with signature ``(n)->(2)`` that simultaneously computes the minimum and +maximum of a sequence. It should require that ``n > 0``, because +the minimum and maximum of a sequence with length 0 is not meaningful. +In this case, the ufunc author might define the function like this: + + .. code-block:: c + + int minmax_process_core_dims(PyUFuncObject ufunc, + npy_intp *core_dim_sizes) + { + npy_intp n = core_dim_sizes[0]; + if (n == 0) { + PyExc_SetString("minmax requires the core dimension " + "to be at least 1."); + return -1; + } + return 0; + } + +In this case, the length of the array ``core_dim_sizes`` will be 2. +The second value in the array will always be 2, so there is no need +for the function to inspect it. The core dimension ``n`` is stored +in the first element. The function sets an exception and returns -1 +if it finds that ``n`` is 0. + +The second use for the "hook" is to compute the size of output arrays +when the output arrays are not provided by the caller and one or more +core dimension of the output is not also an input core dimension. +If the ufunc does not have a function defined on the +``process_core_dims_func`` attribute, an unspecified output core +dimension size will result in an exception being raised. With the +"hook" provided by ``process_core_dims_func``, the author of the ufunc +can set the output size to whatever is appropriate for the ufunc. + +In the array passed to the "hook" function, core dimensions that +were not determined by the input are indicating by having the value -1 +in the ``core_dim_sizes`` array. The function can replace the -1 with +whatever value is appropriate for the ufunc, based on the core dimensions +that occurred in the input arrays. + +.. warning:: + The function must never change a value in ``core_dim_sizes`` that + is not -1 on input. Changing a value that was not -1 will generally + result in incorrect output from the ufunc, and could result in the + Python interpreter crashing. + +For example, consider the generalized ufunc ``conv1d`` for which +the elementary function computes the "full" convolution of two +one-dimensional arrays ``x`` and ``y`` with lengths ``m`` and ``n``, +respectively. The output of this convolution has length ``m + n - 1``. +To implement this as a generalized ufunc, the signature is set to +``(m),(n)->(p)``, and in the "hook" function, if the core dimension +``p`` is found to be -1, it is replaced with ``m + n - 1``. If ``p`` +is *not* -1, it must be verified that the given value equals ``m + n - 1``. +If it does not, the function must set an exception and return -1. +For a meaningful result, the operation also requires that ``m + n`` +is at least 1, i.e. both inputs can't have length 0. + +Here's how that might look in code: + + .. code-block:: c + + int conv1d_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) + { + // core_dim_sizes will hold the core dimensions [m, n, p]. + // p will be -1 if the caller did not provide the out argument. + npy_intp m = core_dim_sizes[0]; + npy_intp n = core_dim_sizes[1]; + npy_intp p = core_dim_sizes[2]; + npy_intp required_p = m + n - 1; + + if (m == 0 && n == 0) { + // Disallow both inputs having length 0. + PyErr_SetString(PyExc_ValueError, + "conv1d: both inputs have core dimension 0; the function " + "requires that at least one input has size greater than 0."); + return -1; + } + if (p == -1) { + // Output array was not given in the call of the ufunc. + // Set the correct output size here. + core_dim_sizes[2] = required_p; + return 0; + } + // An output array *was* given. Validate its core dimension. + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "conv1d: the core dimension p of the out parameter " + "does not equal m + n - 1, where m and n are the " + "core dimensions of the inputs x and y; got m=%zd " + "and n=%zd so p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; + } diff --git a/doc/source/reference/c-api/index.rst b/doc/source/reference/c-api/index.rst index e7f86d3ff7a8..2a7a627fde3e 100644 --- a/doc/source/reference/c-api/index.rst +++ b/doc/source/reference/c-api/index.rst @@ -47,6 +47,7 @@ code. iterator ufunc generalized-ufuncs + strings coremath datetimes deprecations diff --git a/doc/source/reference/c-api/iterator.rst b/doc/source/reference/c-api/iterator.rst index 71bf44f4b239..50fbec96392a 100644 --- a/doc/source/reference/c-api/iterator.rst +++ b/doc/source/reference/c-api/iterator.rst @@ -715,7 +715,7 @@ Construction and destruction may not be repeated. The following example is how normal broadcasting applies to a 3-D array, a 2-D array, a 1-D array and a scalar. - **Note**: Before NumPy 1.8 ``oa_ndim == 0` was used for signalling + **Note**: Before NumPy 1.8 ``oa_ndim == 0`` was used for signalling that ``op_axes`` and ``itershape`` are unused. This is deprecated and should be replaced with -1. Better backward compatibility may be achieved by using :c:func:`NpyIter_MultiNew` for this case. diff --git a/doc/source/reference/c-api/strings.rst b/doc/source/reference/c-api/strings.rst new file mode 100644 index 000000000000..2e7dc34a337f --- /dev/null +++ b/doc/source/reference/c-api/strings.rst @@ -0,0 +1,268 @@ +NpyString API +============= + +.. sectionauthor:: Nathan Goldbaum + +.. versionadded:: 2.0 + +This API allows access to the UTF-8 string data stored in NumPy StringDType +arrays. See :ref:`NEP-55 ` for +more in-depth details into the design of StringDType. + +Examples +-------- + +Loading a String +^^^^^^^^^^^^^^^^ + +Say we are writing a ufunc implementation for ``StringDType``. If we are given +``const char *buf`` pointer to the beginning of a ``StringDType`` array entry, and a +``PyArray_Descr *`` pointer to the array descriptor, one can +access the underlying string data like so: + +.. code-block:: C + + npy_string_allocator *allocator = NpyString_acquire_allocator( + (PyArray_StringDTypeObject *)descr); + + npy_static_string sdata = {0, NULL}; + npy_packed_static_string *packed_string = (npy_packed_static_string *)buf; + int is_null = 0; + + is_null = NpyString_load(allocator, packed_string, &sdata); + + if (is_null == -1) { + // failed to load string, set error + return -1; + } + else if (is_null) { + // handle missing string + // sdata->buf is NULL + // sdata->size is 0 + } + else { + // sdata->buf is a pointer to the beginning of a string + // sdata->size is the size of the string + } + NpyString_release_allocator(allocator); + +Packing a String +^^^^^^^^^^^^^^^^ + +This example shows how to pack a new string entry into an array: + +.. code-block:: C + + char *str = "Hello world"; + size_t size = 11; + npy_packed_static_string *packed_string = (npy_packed_static_string *)buf; + + npy_string_allocator *allocator = NpyString_acquire_allocator( + (PyArray_StringDTypeObject *)descr); + + // copy contents of str into packed_string + if (NpyString_pack(allocator, packed_string, str, size) == -1) { + // string packing failed, set error + return -1; + } + + // packed_string contains a copy of "Hello world" + + NpyString_release_allocator(allocator); + +Types +----- + +.. c:type:: npy_packed_static_string + + An opaque struct that represents "packed" encoded strings. Individual + entries in array buffers are instances of this struct. Direct access + to the data in the struct is undefined and future version of the library may + change the packed representation of strings. + +.. c:type:: npy_static_string + + An unpacked string allowing access to the UTF-8 string data. + + .. code-block:: c + + typedef struct npy_unpacked_static_string { + size_t size; + const char *buf; + } npy_static_string; + + .. c:member:: size_t size + + The size of the string, in bytes. + + .. c:member:: const char *buf + + The string buffer. Holds UTF-8-encoded bytes. Does not currently end in + a null string but we may decide to add null termination in the + future, so do not rely on the presence or absence of null-termination. + + Note that this is a ``const`` buffer. If you want to alter an + entry in an array, you should create a new string and pack it + into the array entry. + +.. c:type:: npy_string_allocator + + An opaque pointer to an object that handles string allocation. + Before using the allocator, you must acquire the allocator lock and release + the lock after you are done interacting with strings managed by the + allocator. + +.. c:type:: PyArray_StringDTypeObject + + The C struct backing instances of StringDType in Python. Attributes store + the settings the object was created with, an instance of + ``npy_string_allocator`` that manages string allocations for arrays + associated with the DType instance, and several attributes caching + information about the missing string object that is commonly needed in cast + and ufunc loop implementations. + + .. code-block:: c + + typedef struct { + PyArray_Descr base; + PyObject *na_object; + char coerce; + char has_nan_na; + char has_string_na; + char array_owned; + npy_static_string default_string; + npy_static_string na_name; + npy_string_allocator *allocator; + } PyArray_StringDTypeObject; + + .. c:member:: PyArray_Descr base + + The base object. Use this member to access fields common to all + descriptor objects. + + .. c:member:: PyObject *na_object + + A reference to the object representing the null value. If there is no + null value (the default) this will be NULL. + + .. c:member:: char coerce + + 1 if string coercion is enabled, 0 otherwise. + + .. c:member:: char has_nan_na + + 1 if the missing string object (if any) is NaN-like, 0 otherwise. + + .. c:member:: char has_string_na + + 1 if the missing string object (if any) is a string, 0 otherwise. + + .. c:member:: char array_owned + + 1 if an array owns the StringDType instance, 0 otherwise. + + .. c:member:: npy_static_string default_string + + The default string to use in operations. If the missing string object + is a string, this will contain the string data for the missing string. + + .. c:member:: npy_static_string na_name + + The name of the missing string object, if any. An empty string + otherwise. + + .. c:member:: npy_string_allocator allocator + + The allocator instance associated with the array that owns this + descriptor instance. The allocator should only be directly accessed + after acquiring the allocator_lock and the lock should be released + immediately after the allocator is no longer needed + + +Functions +--------- + +.. c:function:: npy_string_allocator *NpyString_acquire_allocator( \ + const PyArray_StringDTypeObject *descr) + + Acquire the mutex locking the allocator attached to + ``descr``. ``NpyString_release_allocator`` must be called on the allocator + returned by this function exactly once. Note that functions requiring the + GIL should not be called while the allocator mutex is held, as doing so may + cause deadlocks. + +.. c:function:: void NpyString_acquire_allocators( \ + size_t n_descriptors, PyArray_Descr *const descrs[], \ + npy_string_allocator *allocators[]) + + Simultaneously acquire the mutexes locking the allocators attached to + multiple descriptors. Writes a pointer to the associated allocator in the + allocators array for each StringDType descriptor in the array. If any of + the descriptors are not StringDType instances, write NULL to the allocators + array for that entry. + + ``n_descriptors`` is the number of descriptors in the descrs array that + should be examined. Any descriptor after ``n_descriptors`` elements is + ignored. A buffer overflow will happen if the ``descrs`` array does not + contain n_descriptors elements. + + If pointers to the same descriptor are passed multiple times, only acquires + the allocator mutex once but sets identical allocator pointers appropriately. + The allocator mutexes must be released after this function returns, see + ``NpyString_release_allocators``. + + Note that functions requiring the GIL should not be called while the + allocator mutex is held, as doing so may cause deadlocks. + +.. c:function:: void NpyString_release_allocator( \ + npy_string_allocator *allocator) + + Release the mutex locking an allocator. This must be called exactly once + after acquiring the allocator mutex and all operations requiring the + allocator are done. + + If you need to release multiple allocators, see + NpyString_release_allocators, which can correctly handle releasing the + allocator once when given several references to the same allocator. + +.. c:function:: void NpyString_release_allocators( \ + size_t length, npy_string_allocator *allocators[]) + + Release the mutexes locking N allocators. ``length`` is the length of the + allocators array. NULL entries are ignored. + + If pointers to the same allocator are passed multiple times, only releases + the allocator mutex once. + +.. c:function:: int NpyString_load(npy_string_allocator *allocator, \ + const npy_packed_static_string *packed_string, \ + npy_static_string *unpacked_string) + + Extract the packed contents of ``packed_string`` into ``unpacked_string``. + + The ``unpacked_string`` is a read-only view onto the ``packed_string`` data + and should not be used to modify the string data. If ``packed_string`` is + the null string, sets ``unpacked_string.buf`` to the NULL + pointer. Returns -1 if unpacking the string fails, returns 1 if + ``packed_string`` is the null string, and returns 0 otherwise. + + A useful pattern is to define a stack-allocated npy_static_string instance + initialized to ``{0, NULL}`` and pass a pointer to the stack-allocated + unpacked string to this function. This function can be used to + simultaneously unpack a string and determine if it is a null string. + +.. c:function:: int NpyString_pack_null( \ + npy_string_allocator *allocator, \ + npy_packed_static_string *packed_string) + + Pack the null string into ``packed_string``. Returns 0 on success and -1 on + failure. + +.. c:function:: int NpyString_pack( \ + npy_string_allocator *allocator, \ + npy_packed_static_string *packed_string, \ + const char *buf, \ + size_t size) + + Copy and pack the first ``size`` entries of the buffer pointed to by ``buf`` + into the ``packed_string``. Returns 0 on success and -1 on failure. diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst index df32b3dfcd60..4565e602193f 100644 --- a/doc/source/reference/c-api/types-and-structures.rst +++ b/doc/source/reference/c-api/types-and-structures.rst @@ -215,12 +215,11 @@ The :c:data:`PyArray_Type` can also be sub-typed. .. tip:: - The ``tp_as_number`` methods use a generic approach to call whatever - function has been registered for handling the operation. When the - ``_multiarray_umath module`` is imported, it sets the numeric operations - for all arrays to the corresponding ufuncs. This choice can be changed with - :c:func:`PyUFunc_ReplaceLoopBySignature` The ``tp_str`` and ``tp_repr`` - methods can also be altered using :c:func:`PyArray_SetStringFunction`. + The :c:member:`tp_as_number ` methods use + a generic approach to call whatever function has been registered for + handling the operation. When the ``_multiarray_umath`` module is imported, + it sets the numeric operations for all arrays to the corresponding ufuncs. + This choice can be changed with :c:func:`PyUFunc_ReplaceLoopBySignature`. PyGenericArrType_Type --------------------- @@ -728,7 +727,7 @@ PyArrayMethod_Context and PyArrayMethod_Spec typedef struct { PyObject *caller; struct PyArrayMethodObject_tag *method; - PyArray_Descr **descriptors; + PyArray_Descr *const *descriptors; } PyArrayMethod_Context .. c:member:: PyObject *caller @@ -906,6 +905,30 @@ PyArray_DTypeMeta and PyArrayDTypeMeta_Spec of functions in the DType API. Slot IDs must be one of the DType slot IDs enumerated in :ref:`dtype-slots`. +Exposed DTypes classes (``PyArray_DTypeMeta`` objects) +------------------------------------------------------ + +For use with promoters, NumPy exposes a number of Dtypes following the +pattern ``PyArray_DType`` corresponding to those found in `np.dtypes`. + +Additionally, the three DTypes, ``PyArray_PyLongDType``, +``PyArray_PyFloatDType``, ``PyArray_PyComplexDType`` correspond to the +Python scalar values. These cannot be used in all places, but do allow +for example the common dtype operation and implementing promotion with them +may be necessary. + +Further, the following abstract DTypes are defined which cover both the +builtin NumPy ones and the python ones, and users can in principle subclass +from them (this does not inherit any DType specific functionality): +* ``PyArray_IntAbstractDType`` +* ``PyArray_FloatAbstractDType`` +* ``PyArray_ComplexAbstractDType`` + +.. warning:: + As of NumPy 2.0, the *only* valid use for these DTypes is registering a + promoter conveniently to e.g. match "any integers" (and subclass checks). + Because of this, they are not exposed to Python. + PyUFunc_Type and PyUFuncObject ------------------------------ @@ -1286,7 +1309,7 @@ PyArrayMultiIter_Type and PyArrayMultiIterObject npy_intp index; int nd; npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; - PyArrayIterObject *iters[NPY_MAXDIMS_LEGACY_ITERS]; + PyArrayIterObject *iters[]; } PyArrayMultiIterObject; .. c:macro: PyObject_HEAD @@ -1588,3 +1611,29 @@ for completeness and assistance in understanding the code. ``arrayobject.h`` header. This type is not exposed to Python and could be replaced with a C-structure. As a Python type it takes advantage of reference- counted memory management. + + +NumPy C-API and C complex +========================= +When you use the NumPy C-API, you will have access to complex real declarations +``npy_cdouble`` and ``npy_cfloat``, which are declared in terms of the C +standard types from ``complex.h``. Unfortunately, ``complex.h`` contains +`#define I ...`` (where the actual definition depends on the compiler), which +means that any downstream user that does ``#include `` +could get ``I`` defined, and using something like declaring ``double I;`` in +their code will result in an obscure compiler error like + +.. code-block::C + error: expected ‘)’ before ‘__extension__’ + double I, + +This error can be avoided by adding:: + + #undef I + +to your code. + +.. versionchanged:: 2.0 + The inclusion of ``complex.h`` was new in NumPy 2, so that code defining + a different ``I`` may not have required the ``#undef I`` on older versions. + NumPy 2.0.1 briefly included the ``#under I`` \ No newline at end of file diff --git a/doc/source/reference/constants.rst b/doc/source/reference/constants.rst index 9db0da787712..71ce0051bf13 100644 --- a/doc/source/reference/constants.rst +++ b/doc/source/reference/constants.rst @@ -28,7 +28,7 @@ NumPy includes several constants: .. rubric:: References - https://en.wikipedia.org/wiki/Euler-Mascheroni_constant + https://en.wikipedia.org/wiki/Euler%27s_constant .. data:: inf @@ -62,6 +62,7 @@ NumPy includes several constants: .. rubric:: Examples + >>> import numpy as np >>> np.inf inf >>> np.array([1]) / 0. @@ -88,10 +89,9 @@ NumPy includes several constants: NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. - `NaN` and `NAN` are aliases of `nan`. - .. rubric:: Examples + >>> import numpy as np >>> np.nan nan >>> np.log(-1) @@ -106,6 +106,7 @@ NumPy includes several constants: .. rubric:: Examples + >>> import numpy as np >>> np.newaxis is None True >>> x = np.arange(3) @@ -121,16 +122,16 @@ NumPy includes several constants: [[2]]]) >>> x[:, np.newaxis] * x array([[0, 0, 0], - [0, 1, 2], - [0, 2, 4]]) + [0, 1, 2], + [0, 2, 4]]) Outer product, same as ``outer(x, y)``: >>> y = np.arange(3, 6) >>> x[:, np.newaxis] * y array([[ 0, 0, 0], - [ 3, 4, 5], - [ 6, 8, 10]]) + [ 3, 4, 5], + [ 6, 8, 10]]) ``x[np.newaxis, :]`` is equivalent to ``x[np.newaxis]`` and ``x[None]``: diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst index d4640e65456f..72b61e3a94db 100644 --- a/doc/source/reference/distutils.rst +++ b/doc/source/reference/distutils.rst @@ -14,7 +14,7 @@ Packaging (:mod:`numpy.distutils`) .. warning:: Note that ``setuptools`` does major releases often and those may contain - changes that break ``numpy.distutils``, which will *not* be updated anymore + changes that break :mod:`numpy.distutils`, which will *not* be updated anymore for new ``setuptools`` versions. It is therefore recommended to set an upper version bound in your build configuration for the last known version of ``setuptools`` that works with your build. diff --git a/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg b/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg new file mode 100644 index 000000000000..579480132b3d --- /dev/null +++ b/doc/source/reference/figures/nep-0050-promotion-no-fonts.svg @@ -0,0 +1,1471 @@ + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/source/reference/global_state.rst b/doc/source/reference/global_state.rst index e0ab1bb2a7ba..e66c86faf1b3 100644 --- a/doc/source/reference/global_state.rst +++ b/doc/source/reference/global_state.rst @@ -1,14 +1,13 @@ .. _global_state: -************ -Global state -************ - -NumPy has a few import-time, compile-time, or runtime options -which change the global behaviour. -Most of these are related to performance or for debugging -purposes and will not be interesting to the vast majority -of users. +**************************** +Global Configuration Options +**************************** + +NumPy has a few import-time, compile-time, or runtime configuration +options which change the global behaviour. Most of these are related to +performance or for debugging purposes and will not be interesting to the +vast majority of users. Performance-related options diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst index ed9641409014..02e3248953fb 100644 --- a/doc/source/reference/index.rst +++ b/doc/source/reference/index.rst @@ -58,6 +58,7 @@ Other topics array_api simd/index + thread_safety global_state security distutils_status_migration diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst index 7121914b93e2..01ac67f42704 100644 --- a/doc/source/reference/maskedarray.baseclass.rst +++ b/doc/source/reference/maskedarray.baseclass.rst @@ -18,6 +18,8 @@ defines several constants. specific entry of a masked array is masked, or to mask one or several entries of a masked array:: + >>> import numpy as np + >>> x = ma.array([1, 2, 3], mask=[0, 1, 0]) >>> x[1] is ma.masked True diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst index 161ce14b76d2..3324269ee7aa 100644 --- a/doc/source/reference/maskedarray.generic.rst +++ b/doc/source/reference/maskedarray.generic.rst @@ -35,19 +35,19 @@ masked (invalid). The package ensures that masked entries are not used in computations. -As an illustration, let's consider the following dataset:: + As an illustration, let's consider the following dataset: >>> import numpy as np >>> import numpy.ma as ma >>> x = np.array([1, 2, 3, -1, 5]) -We wish to mark the fourth entry as invalid. The easiest is to create a masked -array:: + We wish to mark the fourth entry as invalid. The easiest is to create a masked + array:: >>> mx = ma.masked_array(x, mask=[0, 0, 0, 1, 0]) -We can now compute the mean of the dataset, without taking the invalid data -into account:: + We can now compute the mean of the dataset, without taking the invalid data + into account: >>> mx.mean() 2.75 @@ -62,17 +62,17 @@ class, which is a subclass of :class:`numpy.ndarray`. The class, its attributes and methods are described in more details in the :ref:`MaskedArray class ` section. -The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: :: +The :mod:`numpy.ma` module can be used as an addition to :mod:`numpy`: >>> import numpy as np >>> import numpy.ma as ma -To create an array with the second element invalid, we would do:: + To create an array with the second element invalid, we would do:: >>> y = ma.array([1, 2, 3], mask = [0, 1, 0]) -To create a masked array where all values close to 1.e20 are invalid, we would -do:: + To create a masked array where all values close to 1.e20 are invalid, we would + do: >>> z = ma.masked_values([1.0, 1.e20, 3.0, 4.0], 1.e20) @@ -108,17 +108,18 @@ There are several ways to construct a masked array. mask of the view is set to :attr:`nomask` if the array has no named fields, or an array of boolean with the same structure as the array otherwise. - >>> x = np.array([1, 2, 3]) - >>> x.view(ma.MaskedArray) - masked_array(data=[1, 2, 3], - mask=False, - fill_value=999999) - >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) - >>> x.view(ma.MaskedArray) - masked_array(data=[(1, 1.0), (2, 2.0)], - mask=[(False, False), (False, False)], - fill_value=(999999, 1e+20), - dtype=[('a', '>> import numpy as np + >>> x = np.array([1, 2, 3]) + >>> x.view(ma.MaskedArray) + masked_array(data=[1, 2, 3], + mask=False, + fill_value=999999) + >>> x = np.array([(1, 1.), (2, 2.)], dtype=[('a',int), ('b', float)]) + >>> x.view(ma.MaskedArray) + masked_array(data=[(1, 1.0), (2, 2.0)], + mask=[(False, False), (False, False)], + fill_value=(999999, 1e+20), + dtype=[('a', '>> import numpy as np >>> x = ma.array([[1, 2], [3, 4]], mask=[[0, 1], [1, 0]]) >>> x[~x.mask] masked_array(data=[1, 4], - mask=[False, False], - fill_value=999999) + mask=[False, False], + fill_value=999999) -Another way to retrieve the valid data is to use the :meth:`compressed` -method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its -subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` -attribute):: + Another way to retrieve the valid data is to use the :meth:`compressed` + method, which returns a one-dimensional :class:`~numpy.ndarray` (or one of its + subclasses, depending on the value of the :attr:`~MaskedArray.baseclass` + attribute): >>> x.compressed() array([1, 4]) -Note that the output of :meth:`compressed` is always 1D. + Note that the output of :meth:`compressed` is always 1D. @@ -218,7 +220,7 @@ Masking an entry ~~~~~~~~~~~~~~~~ The recommended way to mark one or several specific entries of a masked array -as invalid is to assign the special value :attr:`masked` to them:: +as invalid is to assign the special value :attr:`masked` to them: >>> x = ma.array([1, 2, 3]) >>> x[0] = ma.masked @@ -257,8 +259,9 @@ but this usage is discouraged. All the entries of an array can be masked at once by assigning ``True`` to the -mask:: +mask: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x.mask = True >>> x @@ -267,8 +270,8 @@ mask:: fill_value=999999, dtype=int64) -Finally, specific entries can be masked and/or unmasked by assigning to the -mask a sequence of booleans:: + Finally, specific entries can be masked and/or unmasked by assigning to the + mask a sequence of booleans: >>> x = ma.array([1, 2, 3]) >>> x.mask = [0, 1, 0] @@ -281,8 +284,9 @@ Unmasking an entry ~~~~~~~~~~~~~~~~~~ To unmask one or several specific entries, we can just assign one or several -new valid values to them:: +new valid values to them: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x masked_array(data=[1, 2, --], @@ -300,37 +304,40 @@ new valid values to them:: attribute. This feature was introduced to prevent overwriting the mask. To force the unmasking of an entry where the array has a hard mask, the mask must first to be softened using the :meth:`soften_mask` method - before the allocation. It can be re-hardened with :meth:`harden_mask`:: - - >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) - >>> x - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x[-1] = 5 - >>> x - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x.soften_mask() - masked_array(data=[1, 2, --], - mask=[False, False, True], - fill_value=999999) - >>> x[-1] = 5 - >>> x - masked_array(data=[1, 2, 5], - mask=[False, False, False], - fill_value=999999) - >>> x.harden_mask() - masked_array(data=[1, 2, 5], - mask=[False, False, False], - fill_value=999999) + before the allocation. It can be re-hardened with :meth:`harden_mask` as + follows: + + >>> import numpy.ma as ma + >>> x = ma.array([1, 2, 3], mask=[0, 0, 1], hard_mask=True) + >>> x + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x[-1] = 5 + >>> x + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x.soften_mask() + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) + >>> x[-1] = 5 + >>> x + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) + >>> x.harden_mask() + masked_array(data=[1, 2, 5], + mask=[False, False, False], + fill_value=999999) To unmask all masked entries of a masked array (provided the mask isn't a hard mask), the simplest solution is to assign the constant :attr:`nomask` to the -mask:: +mask: + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x masked_array(data=[1, 2, --], @@ -352,8 +359,9 @@ its mechanisms for indexing and slicing. When accessing a single entry of a masked array with no named fields, the output is either a scalar (if the corresponding entry of the mask is ``False``) or the special value :attr:`masked` (if the corresponding entry of -the mask is ``True``):: +the mask is ``True``): + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3], mask=[0, 0, 1]) >>> x[0] 1 @@ -367,6 +375,7 @@ If the masked array has named fields, accessing a single entry returns a array with the same dtype as the initial array if at least one of the fields is masked. + >>> import numpy.ma as ma >>> y = ma.masked_array([(1,2), (3, 4)], ... mask=[(0, 0), (0, 1)], ... dtype=[('a', int), ('b', int)]) @@ -382,6 +391,7 @@ mask is either :attr:`nomask` (if there was no invalid entries in the original array) or a view of the corresponding slice of the original mask. The view is required to ensure propagation of any modification of the mask to the original. + >>> import numpy.ma as ma >>> x = ma.array([1, 2, 3, 4, 5], mask=[0, 1, 0, 0, 1]) >>> mx = x[:3] >>> mx @@ -398,6 +408,7 @@ required to ensure propagation of any modification of the mask to the original. >>> x.data array([ 1, -1, 3, 4, 5]) + Accessing a field of a masked array with structured datatype returns a :class:`MaskedArray`. @@ -417,8 +428,9 @@ meaning that the corresponding :attr:`~MaskedArray.data` entries The :mod:`numpy.ma` module comes with a specific implementation of most ufuncs. Unary and binary functions that have a validity domain (such as :func:`~numpy.log` or :func:`~numpy.divide`) return the :data:`masked` -constant whenever the input is masked or falls outside the validity domain:: +constant whenever the input is masked or falls outside the validity domain: + >>> import numpy.ma as ma >>> ma.log([-1, 0, 1, 2]) masked_array(data=[--, --, 0.0, 0.6931471805599453], mask=[ True, True, False, False], @@ -430,8 +442,9 @@ result of a binary ufunc is masked wherever any of the input is masked. If the ufunc also returns the optional context output (a 3-element tuple containing the name of the ufunc, its arguments and its domain), the context is processed and entries of the output masked array are masked wherever the corresponding -input fall outside the validity domain:: +input fall outside the validity domain: + >>> import numpy.ma as ma >>> x = ma.array([-1, 1, 0, 2, 3], mask=[0, 0, 0, 0, 1]) >>> np.log(x) masked_array(data=[--, 0.0, --, 0.6931471805599453, --], @@ -447,7 +460,7 @@ Data with a given value representing missing data Let's consider a list of elements, ``x``, where values of -9999. represent missing data. We wish to compute the average value of the data and the vector -of anomalies (deviations from the average):: +of anomalies (deviations from the average): >>> import numpy.ma as ma >>> x = [0.,1.,-9999.,3.,4.] @@ -466,6 +479,8 @@ Filling in the missing data Suppose now that we wish to print that same data, but with the missing values replaced by the average value. + >>> import numpy.ma as ma + >>> mx = ma.masked_values (x, -9999.) >>> print(mx.filled(mx.mean())) [0. 1. 2. 3. 4.] @@ -492,8 +507,10 @@ Ignoring extreme values Let's consider an array ``d`` of floats between 0 and 1. We wish to compute the average of the values of ``d`` while ignoring any data outside -the range ``[0.2, 0.9]``:: +the range ``[0.2, 0.9]``: + >>> import numpy as np + >>> import numpy.ma as ma >>> d = np.linspace(0, 1, 20) >>> print(d.mean() - ma.masked_outside(d, 0.2, 0.9).mean()) -0.05263157894736836 diff --git a/doc/source/reference/module_structure.rst b/doc/source/reference/module_structure.rst index 2db9de7f03a8..01a5bcff7fbc 100644 --- a/doc/source/reference/module_structure.rst +++ b/doc/source/reference/module_structure.rst @@ -35,6 +35,7 @@ Special-purpose namespaces - :ref:`numpy.emath ` - mathematical functions with automatic domain - :ref:`numpy.lib ` - utilities & functionality which do not fit the main namespace - :ref:`numpy.rec ` - record arrays (largely superseded by dataframe libraries) +- :ref:`numpy.version ` - small module with more detailed version info Legacy namespaces ================= @@ -67,6 +68,7 @@ and/or this code is deprecated or isn't reliable. numpy.emath numpy.lib numpy.rec + numpy.version numpy.char numpy.distutils numpy.f2py <../f2py/index> diff --git a/doc/source/reference/random/compatibility.rst b/doc/source/reference/random/compatibility.rst index b45e195fbd71..455a2485ea4a 100644 --- a/doc/source/reference/random/compatibility.rst +++ b/doc/source/reference/random/compatibility.rst @@ -22,7 +22,7 @@ outside of NumPy's control that limit our ability to guarantee much more than this. For example, different CPUs implement floating point arithmetic differently, and this can cause differences in certain edge cases that cascade to the rest of the stream. `Generator.multivariate_normal`, for another -example, uses a matrix decomposition from ``numpy.linalg``. Even on the same +example, uses a matrix decomposition from `numpy.linalg`. Even on the same platform, a different build of ``numpy`` may use a different version of this matrix decomposition algorithm from the LAPACK that it links to, causing `Generator.multivariate_normal` to return completely different (but equally diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst index 26407bb2a3fa..9c7dc86b2825 100644 --- a/doc/source/reference/random/extending.rst +++ b/doc/source/reference/random/extending.rst @@ -4,15 +4,15 @@ Extending ========= -The BitGenerators have been designed to be extendable using standard tools for -high-performance Python -- numba and Cython. The `~Generator` object can also -be used with user-provided BitGenerators as long as these export a small set of -required functions. +The `BitGenerator`\ s have been designed to be extendable using standard tools +for high-performance Python -- numba and Cython. The `Generator` object can +also be used with user-provided `BitGenerator`\ s as long as these export a +small set of required functions. Numba ----- Numba can be used with either CTypes or CFFI. The current iteration of the -BitGenerators all export a small set of functions through both interfaces. +`BitGenerator`\ s all export a small set of functions through both interfaces. This example shows how numba can be used to produce gaussian samples using a pure Python implementation which is then compiled. The random numbers are @@ -32,7 +32,7 @@ the `Examples`_ section below. Cython ------ -Cython can be used to unpack the ``PyCapsule`` provided by a BitGenerator. +Cython can be used to unpack the ``PyCapsule`` provided by a `BitGenerator`. This example uses `PCG64` and the example from above. The usual caveats for writing high-performance code using Cython -- removing bounds checks and wrap around, providing array alignment information -- still apply. @@ -41,7 +41,7 @@ wrap around, providing array alignment information -- still apply. :language: cython :end-before: example 2 -The BitGenerator can also be directly accessed using the members of the ``bitgen_t`` +The `BitGenerator` can also be directly accessed using the members of the ``bitgen_t`` struct. .. literalinclude:: ../../../../numpy/random/_examples/cython/extending_distributions.pyx @@ -81,9 +81,9 @@ directly from the ``_generator`` shared object, using the `BitGenerator.cffi` in New BitGenerators ----------------- -`~Generator` can be used with user-provided `~BitGenerator`\ s. The simplest -way to write a new BitGenerator is to examine the pyx file of one of the -existing BitGenerators. The key structure that must be provided is the +`Generator` can be used with user-provided `BitGenerator`\ s. The simplest +way to write a new `BitGenerator` is to examine the pyx file of one of the +existing `BitGenerator`\ s. The key structure that must be provided is the ``capsule`` which contains a ``PyCapsule`` to a struct pointer of type ``bitgen_t``, @@ -98,11 +98,11 @@ existing BitGenerators. The key structure that must be provided is the } bitgen_t; which provides 5 pointers. The first is an opaque pointer to the data structure -used by the BitGenerators. The next three are function pointers which return -the next 64- and 32-bit unsigned integers, the next random double and the next -raw value. This final function is used for testing and so can be set to -the next 64-bit unsigned integer function if not needed. Functions inside -``Generator`` use this structure as in +used by the `BitGenerator`\ s. The next three are function pointers which +return the next 64- and 32-bit unsigned integers, the next random double and +the next raw value. This final function is used for testing and so can be set +to the next 64-bit unsigned integer function if not needed. Functions inside +`Generator` use this structure as in .. code-block:: c diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst index c8662c56a788..088d159c74f5 100644 --- a/doc/source/reference/random/generator.rst +++ b/doc/source/reference/random/generator.rst @@ -2,14 +2,14 @@ Random ``Generator`` ==================== -The `~Generator` provides access to +The `Generator` provides access to a wide range of distributions, and served as a replacement for :class:`~numpy.random.RandomState`. The main difference between -the two is that ``Generator`` relies on an additional BitGenerator to +the two is that `Generator` relies on an additional BitGenerator to manage state and generate the random bits, which are then transformed into random values from useful distributions. The default BitGenerator used by -``Generator`` is `~PCG64`. The BitGenerator -can be changed by passing an instantized BitGenerator to ``Generator``. +`Generator` is `PCG64`. The BitGenerator +can be changed by passing an instantized BitGenerator to `Generator`. .. autofunction:: default_rng @@ -72,6 +72,7 @@ By default, `Generator.permuted` returns a copy. To operate in-place with `Generator.permuted`, pass the same array as the first argument *and* as the value of the ``out`` parameter. For example, + >>> import numpy as np >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x #doctest: +SKIP @@ -84,12 +85,12 @@ the value of the ``out`` parameter. For example, [ 6, 7, 8, 9, 5], [10, 14, 11, 13, 12]]) -Note that when ``out`` is given, the return value is ``out``: + Note that when ``out`` is given, the return value is ``out``: >>> y is x True -.. _generator-handling-axis-parameter: +.. _generator-handling-axis-parameter: Handling the ``axis`` parameter ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -100,6 +101,7 @@ which dimension of the input array to use as the sequence. In the case of a two-dimensional array, ``axis=0`` will, in effect, rearrange the rows of the array, and ``axis=1`` will rearrange the columns. For example + >>> import numpy as np >>> rng = np.random.default_rng() >>> x = np.arange(0, 15).reshape(3, 5) >>> x @@ -119,6 +121,8 @@ how `numpy.sort` treats it. Each slice along the given axis is shuffled independently of the others. Compare the following example of the use of `Generator.permuted` to the above example of `Generator.permutation`: + >>> import numpy as np + >>> rng = np.random.default_rng() >>> rng.permuted(x, axis=1) #doctest: +SKIP array([[ 1, 0, 2, 4, 3], # random [ 5, 7, 6, 9, 8], @@ -132,8 +136,8 @@ Shuffling non-NumPy sequences ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ `Generator.shuffle` works on non-NumPy sequences. That is, if it is given a sequence that is not a NumPy array, it shuffles that sequence in-place. -For example, + >>> import numpy as np >>> rng = np.random.default_rng() >>> a = ['A', 'B', 'C', 'D', 'E'] >>> rng.shuffle(a) # shuffle the list in-place diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst index 682d02c31cd2..976a03a9a449 100644 --- a/doc/source/reference/random/index.rst +++ b/doc/source/reference/random/index.rst @@ -18,18 +18,16 @@ probability distributions. In general, users will create a `Generator` instance with `default_rng` and call the various methods on it to obtain samples from different distributions. -:: - >>> import numpy as np >>> rng = np.random.default_rng() # Generate one random float uniformly distributed over the range [0, 1) >>> rng.random() #doctest: +SKIP 0.06369197489564249 # may vary - # Generate an array of 10 numbers according to a unit Gaussian distribution. + # Generate an array of 10 numbers according to a unit Gaussian distribution >>> rng.standard_normal(10) #doctest: +SKIP array([-0.31018314, -1.8922078 , -0.3628523 , -0.63526532, 0.43181166, # may vary 0.51640373, 1.25693945, 0.07779185, 0.84090247, -2.13406828]) - # Generate an array of 5 integers uniformly over the range [0, 10). + # Generate an array of 5 integers uniformly over the range [0, 10) >>> rng.integers(low=0, high=10, size=5) #doctest: +SKIP array([8, 7, 6, 2, 0]) # may vary @@ -40,14 +38,13 @@ generate different numbers each time. The pseudo-random sequences will be independent for all practical purposes, at least those purposes for which our pseudo-randomness was good for in the first place. -:: - - >>> rng1 = np.random.default_rng() - >>> rng1.random() #doctest: +SKIP - 0.6596288841243357 # may vary - >>> rng2 = np.random.default_rng() - >>> rng2.random() #doctest: +SKIP - 0.11885628817151628 # may vary + >>> import numpy as np + >>> rng1 = np.random.default_rng() + >>> rng1.random() #doctest: +SKIP + 0.6596288841243357 # may vary + >>> rng2 = np.random.default_rng() + >>> rng2.random() #doctest: +SKIP + 0.11885628817151628 # may vary .. warning:: @@ -56,6 +53,8 @@ pseudo-randomness was good for in the first place. or cryptographic purposes. See the :py:mod:`secrets` module from the standard library for such use cases. +.. _recommend-secrets-randbits: + Seeds should be large positive integers. `default_rng` can take positive integers of any size. We recommend using very large, unique numbers to ensure that your seed is different from anyone else's. This is good practice to ensure @@ -64,18 +63,17 @@ intentionally *trying* to reproduce their result. A convenient way to get such a seed number is to use :py:func:`secrets.randbits` to get an arbitrary 128-bit integer. -:: - - >>> import secrets - >>> import numpy as np - >>> secrets.randbits(128) #doctest: +SKIP - 122807528840384100672342137672332424406 # may vary - >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) - >>> rng1.random() - 0.5363922081269535 - >>> rng2 = np.random.default_rng(122807528840384100672342137672332424406) - >>> rng2.random() - 0.5363922081269535 + >>> import numpy as np + >>> import secrets + >>> import numpy as np + >>> secrets.randbits(128) #doctest: +SKIP + 122807528840384100672342137672332424406 # may vary + >>> rng1 = np.random.default_rng(122807528840384100672342137672332424406) + >>> rng1.random() + 0.5363922081269535 + >>> rng2 = np.random.default_rng(122807528840384100672342137672332424406) + >>> rng2.random() + 0.5363922081269535 See the documentation on `default_rng` and `SeedSequence` for more advanced options for controlling the seed in specialized scenarios. diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst index 09a048561e25..99b7ec781b55 100644 --- a/doc/source/reference/random/multithreading.rst +++ b/doc/source/reference/random/multithreading.rst @@ -104,8 +104,8 @@ that does not use an existing array due to array creation overhead. Out[6]: 125 ms ± 309 µs per loop (mean ± std. dev. of 7 runs, 10 loops each) -Note that if `threads` is not set by the user, it will be determined by -`multiprocessing.cpu_count()`. +Note that if ``threads`` is not set by the user, it will be determined by +``multiprocessing.cpu_count()``. .. code-block:: ipython diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst index 0fcd6f4c9dd3..44cf7aa11013 100644 --- a/doc/source/reference/random/new-or-different.rst +++ b/doc/source/reference/random/new-or-different.rst @@ -9,38 +9,40 @@ NumPy 1.17.0 introduced `Generator` as an improved replacement for the :ref:`legacy ` `RandomState`. Here is a quick comparison of the two implementations. -================== ==================== ============= -Feature Older Equivalent Notes ------------------- -------------------- ------------- -`~.Generator` `~.RandomState` ``Generator`` requires a stream - source, called a `BitGenerator` - A number of these are provided. - ``RandomState`` uses - the Mersenne Twister `~.MT19937` by - default, but can also be instantiated - with any BitGenerator. ------------------- -------------------- ------------- -``random`` ``random_sample``, Access the values in a BitGenerator, - ``rand`` convert them to ``float64`` in the - interval ``[0.0.,`` `` 1.0)``. - In addition to the ``size`` kwarg, now - supports ``dtype='d'`` or ``dtype='f'``, - and an ``out`` kwarg to fill a user- - supplied array. - - Many other distributions are also - supported. ------------------- -------------------- ------------- -``integers`` ``randint``, Use the ``endpoint`` kwarg to adjust - ``random_integers`` the inclusion or exclusion of the - ``high`` interval endpoint -================== ==================== ============= +======================= ================== ============= +Feature Older Equivalent Notes +----------------------- ------------------ ------------- +`Generator` `RandomState` `Generator` requires a stream + source, called a `BitGenerator` + A number of these are provided. + `RandomState` uses the Mersenne + Twister `MT19937` by default, + but can also be instantiated + with any BitGenerator. +----------------------- ------------------ ------------- +`~.Generator.random` `random_sample`, Access the values in a + `rand` BitGenerator, convert them to + ``float64`` in the interval + ``[0.0., 1.0)``. In addition + to the ``size`` kwarg, now + supports ``dtype='d'`` or + ``dtype='f'``, and an ``out`` + kwarg to fill a user-supplied + array. + + Many other distributions are also + supported. +----------------------- ------------------ ------------- +`~.Generator.integers` `randint`, Use the ``endpoint`` kwarg to + `random_integers` adjust the inclusion or exclusion + of the ``high`` interval endpoint. +======================= ================== ============= * The normal, exponential and gamma generators use 256-step Ziggurat methods which are 2-10 times faster than NumPy's default implementation in `~.Generator.standard_normal`, `~.Generator.standard_exponential` or `~.Generator.standard_gamma`. Because of the change in algorithms, it is not - possible to reproduce the exact random values using ``Generator`` for these + possible to reproduce the exact random values using `Generator` for these distributions or any distribution method that relies on them. .. ipython:: python @@ -63,8 +65,8 @@ Feature Older Equivalent Notes * `~.Generator.integers` is now the canonical way to generate integer random numbers from a discrete uniform distribution. This replaces both - ``randint`` and the deprecated ``random_integers``. -* The ``rand`` and ``randn`` methods are only available through the legacy + `randint` and the deprecated `random_integers`. +* The `rand` and `randn` methods are only available through the legacy `~.RandomState`. * `Generator.random` is now the canonical way to generate floating-point random numbers, which replaces `RandomState.random_sample`, diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst index af2aac82f480..892ceb3d1698 100644 --- a/doc/source/reference/random/parallel.rst +++ b/doc/source/reference/random/parallel.rst @@ -13,38 +13,42 @@ or distributed). ------------------------ NumPy allows you to spawn new (with very high probability) independent -`~BitGenerator` and `~Generator` instances via their ``spawn()`` method. -This spawning is implemented by the `~SeedSequence` used for initializing +`BitGenerator` and `Generator` instances via their ``spawn()`` method. +This spawning is implemented by the `SeedSequence` used for initializing the bit generators random stream. -`~SeedSequence` `implements an algorithm`_ to process a user-provided seed, +`SeedSequence` `implements an algorithm`_ to process a user-provided seed, typically as an integer of some size, and to convert it into an initial state for -a `~BitGenerator`. It uses hashing techniques to ensure that low-quality seeds +a `BitGenerator`. It uses hashing techniques to ensure that low-quality seeds are turned into high quality initial states (at least, with very high probability). -For example, `MT19937` has a state consisting of 624 -`uint32` integers. A naive way to take a 32-bit integer seed would be to just set +For example, `MT19937` has a state consisting of 624 ``uint32`` +integers. A naive way to take a 32-bit integer seed would be to just set the last element of the state to the 32-bit seed and leave the rest 0s. This is a valid state for `MT19937`, but not a good one. The Mersenne Twister algorithm `suffers if there are too many 0s`_. Similarly, two adjacent 32-bit integer seeds (i.e. ``12345`` and ``12346``) would produce very similar streams. -`~SeedSequence` avoids these problems by using successions of integer hashes +`SeedSequence` avoids these problems by using successions of integer hashes with good `avalanche properties`_ to ensure that flipping any bit in the input has about a 50% chance of flipping any bit in the output. Two input seeds that are very close to each other will produce initial states that are very far from each other (with very high probability). It is also constructed in such a way that you can provide arbitrary-sized integers or lists of integers. -`~SeedSequence` will take all of the bits that you provide and mix them -together to produce however many bits the consuming `~BitGenerator` needs to +`SeedSequence` will take all of the bits that you provide and mix them +together to produce however many bits the consuming `BitGenerator` needs to initialize itself. These properties together mean that we can safely mix together the usual -user-provided seed with simple incrementing counters to get `~BitGenerator` +user-provided seed with simple incrementing counters to get `BitGenerator` states that are (to very high probability) independent of each other. We can wrap this together into an API that is easy to use and difficult to misuse. +Note that while `SeedSequence` attempts to solve many of the issues related to +user-provided small seeds, we still :ref:`recommend` +using :py:func:`secrets.randbits` to generate seeds with 128 bits of entropy to +avoid the remaining biases introduced by human-chosen seeds. .. code-block:: python @@ -58,7 +62,7 @@ wrap this together into an API that is easy to use and difficult to misuse. .. end_block -For convenience the direct use of `~SeedSequence` is not necessary. +For convenience the direct use of `SeedSequence` is not necessary. The above ``streams`` can be spawned directly from a parent generator via `~Generator.spawn`: @@ -70,7 +74,7 @@ via `~Generator.spawn`: .. end_block Child objects can also spawn to make grandchildren, and so on. -Each child has a `~SeedSequence` with its position in the tree of spawned +Each child has a `SeedSequence` with its position in the tree of spawned child objects mixed in with the user-provided seed to generate independent (with very high probability) streams. @@ -88,7 +92,7 @@ Python has increasingly-flexible mechanisms for parallelization available, and this scheme fits in very well with that kind of use. Using this scheme, an upper bound on the probability of a collision can be -estimated if one knows the number of streams that you derive. `~SeedSequence` +estimated if one knows the number of streams that you derive. `SeedSequence` hashes its inputs, both the seed and the spawn-tree-path, down to a 128-bit pool by default. The probability that there is a collision in that pool, pessimistically-estimated ([1]_), will be about :math:`n^2*2^{-128}` where @@ -106,7 +110,7 @@ territory ([2]_). .. [2] In this calculation, we can mostly ignore the amount of numbers drawn from each stream. See :ref:`upgrading-pcg64` for the technical details about `PCG64`. The other PRNGs we provide have some extra protection built in - that avoids overlaps if the `~SeedSequence` pools differ in the + that avoids overlaps if the `SeedSequence` pools differ in the slightest bit. `PCG64DXSM` has :math:`2^{127}` separate cycles determined by the seed in addition to the position in the :math:`2^{128}` long period for each cycle, so one has to both get on or @@ -129,7 +133,7 @@ territory ([2]_). Sequence of integer seeds ------------------------- -As discussed in the previous section, `~SeedSequence` can not only take an +As discussed in the previous section, `SeedSequence` can not only take an integer seed, it can also take an arbitrary-length sequence of (non-negative) integers. If one exercises a little care, one can use this feature to design *ad hoc* schemes for getting safe parallel PRNG streams with similar safety @@ -160,7 +164,7 @@ integer in a list. This can be used to replace a number of unsafe strategies that have been used in the past which try to combine the root seed and the ID back into a single integer seed value. For example, it is common to see users add the worker ID to -the root seed, especially with the legacy `~RandomState` code. +the root seed, especially with the legacy `RandomState` code. .. code-block:: python @@ -249,13 +253,13 @@ are listed below. +-----------------+-------------------------+-------------------------+-------------------------+ | BitGenerator | Period | Jump Size | Bits per Draw | +=================+=========================+=========================+=========================+ -| MT19937 | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +| `MT19937` | :math:`2^{19937}-1` | :math:`2^{128}` | 32 | +-----------------+-------------------------+-------------------------+-------------------------+ -| PCG64 | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +| `PCG64` | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ -| PCG64DXSM | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +| `PCG64DXSM` | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ -| Philox | :math:`2^{256}` | :math:`2^{128}` | 64 | +| `Philox` | :math:`2^{256}` | :math:`2^{128}` | 64 | +-----------------+-------------------------+-------------------------+-------------------------+ .. [3] The jump size is :math:`(\phi-1)*2^{128}` where :math:`\phi` is the diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst index 7fe383f24bdd..7043734f24c8 100644 --- a/doc/source/reference/random/performance.rst +++ b/doc/source/reference/random/performance.rst @@ -24,7 +24,7 @@ even on 32-bit processes, this is your choice. `MT19937` `fails some statistical tests`_ and is not especially fast compared to modern PRNGs. For these reasons, we mostly do not recommend -using it on its own, only through the legacy `~.RandomState` for +using it on its own, only through the legacy `RandomState` for reproducing old results. That said, it has a very long history as a default in many systems. diff --git a/doc/source/reference/random/upgrading-pcg64.rst b/doc/source/reference/random/upgrading-pcg64.rst index 79be8440ef5c..79432ac578f1 100644 --- a/doc/source/reference/random/upgrading-pcg64.rst +++ b/doc/source/reference/random/upgrading-pcg64.rst @@ -2,7 +2,7 @@ .. currentmodule:: numpy.random -Upgrading ``PCG64`` with ``PCG64DXSM`` +Upgrading `PCG64` with `PCG64DXSM` ====================================== Uses of the `PCG64` `BitGenerator` in a massively-parallel context have been diff --git a/doc/source/reference/routines.array-manipulation.rst b/doc/source/reference/routines.array-manipulation.rst index be2b1120e080..619458de8224 100644 --- a/doc/source/reference/routines.array-manipulation.rst +++ b/doc/source/reference/routines.array-manipulation.rst @@ -88,6 +88,7 @@ Splitting arrays dsplit hsplit vsplit + unstack Tiling arrays ============= diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst index b62294b9a191..7a8728f2d727 100644 --- a/doc/source/reference/routines.char.rst +++ b/doc/source/reference/routines.char.rst @@ -16,10 +16,11 @@ Legacy fixed-width string functionality The `numpy.char` module provides a set of vectorized string operations for arrays of type `numpy.str_` or `numpy.bytes_`. For example - >>> np.char.capitalize(["python", "numpy"]) - array(['Python', 'Numpy'], dtype='>> np.char.add(["num", "doc"], ["py", "umentation"]) - array(['numpy', 'documentation'], dtype='>> import numpy as np + >>> np.char.capitalize(["python", "numpy"]) + array(['Python', 'Numpy'], dtype='>> np.char.add(["num", "doc"], ["py", "umentation"]) + array(['numpy', 'documentation'], dtype='>> import numpy as np >>> p1d = np.poly1d([1, 2, 3]) >>> p = np.polynomial.Polynomial(p1d.coef[::-1]) -In addition to the ``coef`` attribute, polynomials from the polynomial -package also have ``domain`` and ``window`` attributes. -These attributes are most relevant when fitting -polynomials to data, though it should be noted that polynomials with -different ``domain`` and ``window`` attributes are not considered equal, and -can't be mixed in arithmetic:: + In addition to the ``coef`` attribute, polynomials from the polynomial + package also have ``domain`` and ``window`` attributes. + These attributes are most relevant when fitting + polynomials to data, though it should be noted that polynomials with + different ``domain`` and ``window`` attributes are not considered equal, and + can't be mixed in arithmetic: >>> p1 = np.polynomial.Polynomial([1, 2, 3]) >>> p1 diff --git a/doc/source/reference/routines.rec.rst b/doc/source/reference/routines.rec.rst index 21700332418b..aa3a715f47a9 100644 --- a/doc/source/reference/routines.rec.rst +++ b/doc/source/reference/routines.rec.rst @@ -11,17 +11,18 @@ Record arrays expose the fields of structured arrays as properties. Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, bools etc. However, it is possible for elements to be combinations -of these using structured types, such as:: +of these using structured types, such as: - >>> a = np.array([(1, 2.0), (1, 2.0)], + >>> import numpy as np + >>> a = np.array([(1, 2.0), (1, 2.0)], ... dtype=[('x', np.int64), ('y', np.float64)]) >>> a array([(1, 2.), (1, 2.)], dtype=[('x', '>> a['x'] array([1, 1]) @@ -29,13 +30,11 @@ one would a dictionary:: >>> a['y'] array([2., 2.]) -Record arrays allow us to access fields as properties:: + Record arrays allow us to access fields as properties: >>> ar = np.rec.array(a) - >>> ar.x array([1, 1]) - >>> ar.y array([2., 2.]) @@ -55,4 +54,3 @@ Functions Also, the `numpy.recarray` class and the `numpy.record` scalar dtype are present in this namespace. - diff --git a/doc/source/reference/routines.strings.rst b/doc/source/reference/routines.strings.rst index 635a01fa1254..f0af9475d10f 100644 --- a/doc/source/reference/routines.strings.rst +++ b/doc/source/reference/routines.strings.rst @@ -31,9 +31,27 @@ String operations :toctree: generated/ add + center + capitalize + decode + encode + expandtabs + ljust + lower lstrip + mod + multiply + partition + replace + rjust + rpartition rstrip strip + swapcase + title + translate + upper + zfill Comparison ---------- @@ -60,11 +78,17 @@ String information count endswith find + index + isalnum isalpha isdecimal isdigit + islower isnumeric isspace + istitle + isupper rfind + rindex startswith str_len diff --git a/doc/source/reference/routines.version.rst b/doc/source/reference/routines.version.rst new file mode 100644 index 000000000000..72c48a752cf6 --- /dev/null +++ b/doc/source/reference/routines.version.rst @@ -0,0 +1,38 @@ +.. currentmodule:: numpy.version + +.. _routines.version: + +******************* +Version information +******************* + +The ``numpy.version`` submodule includes several constants that expose more +detailed information about the exact version of the installed ``numpy`` +package: + +.. data:: version + + Version string for the installed package - matches ``numpy.__version__``. + +.. data:: full_version + + Version string - the same as ``numpy.version.version``. + +.. data:: short_version + + Version string without any local build identifiers. + + .. rubric:: Examples + + >>> np.__version__ + '2.1.0.dev0+git20240319.2ea7ce0' # may vary + >>> np.version.short_version + '2.1.0.dev0' # may vary + +.. data:: git_revision + + String containing the git hash of the commit from which ``numpy`` was built. + +.. data:: release + + ``True`` if this version is a ``numpy`` release, ``False`` if a dev version. diff --git a/doc/source/reference/thread_safety.rst b/doc/source/reference/thread_safety.rst new file mode 100644 index 000000000000..84590bfac39c --- /dev/null +++ b/doc/source/reference/thread_safety.rst @@ -0,0 +1,51 @@ +.. _thread_safety: + +************* +Thread Safety +************* + +NumPy supports use in a multithreaded context via the `threading` module in the +standard library. Many NumPy operations release the GIL, so unlike many +situations in Python, it is possible to improve parallel performance by +exploiting multithreaded parallelism in Python. + +The easiest performance gains happen when each worker thread owns its own array +or set of array objects, with no data directly shared between threads. Because +NumPy releases the GIL for many low-level operations, threads that spend most of +the time in low-level code will run in parallel. + +It is possible to share NumPy arrays between threads, but extreme care must be +taken to avoid creating thread safety issues when mutating arrays that are +shared between multiple threads. If two threads simultaneously read from and +write to the same array, they will at best produce inconsistent, racey results that +are not reproducible, let alone correct. It is also possible to crash the Python +interpreter by, for example, resizing an array while another thread is reading +from it to compute a ufunc operation. + +In the future, we may add locking to ndarray to make writing multithreaded +algorithms using NumPy arrays safer, but for now we suggest focusing on +read-only access of arrays that are shared between threads, or adding your own +locking if you need to mutation and multithreading. + +Note that operations that *do not* release the GIL will see no performance gains +from use of the `threading` module, and instead might be better served with +`multiprocessing`. In particular, operations on arrays with ``dtype=object`` do +not release the GIL. + +Free-threaded Python +-------------------- + +.. versionadded:: 2.1 + +Starting with NumPy 2.1 and CPython 3.13, NumPy also has experimental support +for python runtimes with the GIL disabled. See +https://py-free-threading.github.io for more information about installing and +using free-threaded Python, as well as information about supporting it in +libraries that depend on NumPy. + +Because free-threaded Python does not have a global interpreter lock to +serialize access to Python objects, there are more opportunities for threads to +mutate shared state and create thread safety issues. In addition to the +limitations about locking of the ndarray object noted above, this also means +that arrays with ``dtype=object`` are not protected by the GIL, creating data +races for python objects that are not possible outside free-threaded python. diff --git a/doc/source/release.rst b/doc/source/release.rst index 41eeac87bf64..c990b7ab8076 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -5,6 +5,12 @@ Release notes .. toctree:: :maxdepth: 2 + 2.1.3 + 2.1.2 + 2.1.1 + 2.1.0 + 2.0.2 + 2.0.1 2.0.0 1.26.4 1.26.3 diff --git a/doc/source/release/2.0.0-notes.rst b/doc/source/release/2.0.0-notes.rst index e43e54fb9cbc..9d54513edb7c 100644 --- a/doc/source/release/2.0.0-notes.rst +++ b/doc/source/release/2.0.0-notes.rst @@ -12,10 +12,10 @@ NumPy 2.0.0 Release Notes and those full notes should be complete (if not copy-edited well enough yet). -NumPy 2.0.0 is the first major release since 2006. It is the result of X months -of development since the last feature release by Y contributors, and contains a -large amount of exciting new features as well as a large amount of changes to -both the Python and C APIs. +NumPy 2.0.0 is the first major release since 2006. It is the result of 11 +months of development since the last feature release and is the work of 212 +contributors spread over 1078 pull requests. It contains a large number of +exciting new features as well as changes to both the Python and C APIs. This major release includes breaking changes that could not happen in a regular minor (feature) release - including an ABI break, changes to type promotion @@ -50,10 +50,13 @@ Highlights of this release include: that are about 3 times smaller, - `numpy.char` fixed-length string operations have been accelerated by implementing ufuncs that also support `~numpy.dtypes.StringDType` in - addition to the the fixed-length string dtypes, + addition to the fixed-length string dtypes, - A new tracing and introspection API, `~numpy.lib.introspect.opt_func_info`, to determine which hardware-specific kernels are available and will be dispatched to. + - `numpy.save` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. - Python API improvements: @@ -76,8 +79,8 @@ Highlights of this release include: - Improved behavior: - - Improvements to type promotion behavior was changed by adopting `NEP - 50 `_. This fixes many user surprises about promotions which + - Improvements to type promotion behavior was changed by adopting :ref:`NEP + 50 `. This fixes many user surprises about promotions which previously often depended on data values of input arrays rather than only their dtypes. Please see the NEP and the :ref:`numpy-2-migration-guide` for details as this change can lead to changes in output dtypes and lower @@ -88,7 +91,7 @@ Highlights of this release include: - Documentation: - - The reference guide navigation was signficantly improved, and there is now + - The reference guide navigation was significantly improved, and there is now documentation on NumPy's :ref:`module structure `, - The :ref:`building from source ` documentation was completely rewritten, @@ -112,7 +115,7 @@ API and behavior improvements and better future extensibility. This price is: 2. Breaking changes to the NumPy ABI. As a result, binaries of packages that use the NumPy C API and were built against a NumPy 1.xx release will not work with NumPy 2.0. On import, such packages will see an ``ImportError`` - with a message about binary incompatibiliy. + with a message about binary incompatibility. It is possible to build binaries against NumPy 2.0 that will work at runtime with both NumPy 2.0 and 1.x. See :ref:`numpy-2-abi-handling` for more details. @@ -149,6 +152,10 @@ NumPy 2.0 Python API removals (`gh-24321 `__) +* Warnings and exceptions present in `numpy.exceptions` (e.g, + `~numpy.exceptions.ComplexWarning`, + `~numpy.exceptions.VisibleDeprecationWarning`) are no longer exposed in the + main namespace. * Multiple niche enums, expired members and functions have been removed from the main namespace, such as: ``ERR_*``, ``SHIFT_*``, ``np.fastCopyAndTranspose``, ``np.kernel_version``, ``np.numarray``, ``np.oldnumeric`` and ``np.set_numeric_ops``. @@ -202,7 +209,8 @@ NumPy 2.0 Python API removals * ``np.tracemalloc_domain`` is now only available from ``np.lib``. -* ``np.recfromcsv`` and ``recfromtxt`` are now only available from ``np.lib.npyio``. +* ``np.recfromcsv`` and ``np.recfromtxt`` were removed from the main namespace. + Use ``np.genfromtxt`` with comma delimiter instead. * ``np.issctype``, ``np.maximum_sctype``, ``np.obj2sctype``, ``np.sctype2char``, ``np.sctypes``, ``np.issubsctype`` were all removed from the @@ -251,9 +259,9 @@ NumPy 2.0 Python API removals (`gh-25911 `__) + ``__array_prepare__`` is removed -------------------------------- - UFuncs called ``__array_prepare__`` before running computations for normal ufunc calls (not generalized ufuncs, reductions, etc.). The function was also called instead of ``__array_wrap__`` on the @@ -272,6 +280,15 @@ Deprecations * ``np.compat`` has been deprecated, as Python 2 is no longer supported. +* ``numpy.int8`` and similar classes will no longer support conversion of + out of bounds python integers to integer arrays. For example, + conversion of 255 to int8 will not return -1. + ``numpy.iinfo(dtype)`` can be used to check the machine limits for data types. + For example, ``np.iinfo(np.uint16)`` returns min = 0 and max = 65535. + + ``np.array(value).astype(dtype)`` will give the desired result. + + * ``np.safe_eval`` has been deprecated. ``ast.literal_eval`` should be used instead. (`gh-23830 `__) @@ -294,7 +311,7 @@ Deprecations support for implementations not accepting all three are deprecated. Its signature should be ``__array_wrap__(self, arr, context=None, return_scalar=False)`` - (`gh-25408 `__) + (`gh-25409 `__) * Arrays of 2-dimensional vectors for ``np.cross`` have been deprecated. Use arrays of 3-dimensional vectors instead. @@ -312,9 +329,9 @@ Deprecations (`gh-24978 `__) -`numpy.fft` deprecations for n-D transforms with None values in arguments -------------------------------------------------------------------------- +``numpy.fft`` deprecations for n-D transforms with None values in arguments +--------------------------------------------------------------------------- Using ``fftn``, ``ifftn``, ``rfftn``, ``irfftn``, ``fft2``, ``ifft2``, ``rfft2`` or ``irfft2`` with the ``s`` parameter set to a value that is not ``None`` and the ``axes`` parameter set to ``None`` has been deprecated, in @@ -330,9 +347,9 @@ axis, the ``s`` argument can be omitted. (`gh-25495 `__) + ``np.linalg.lstsq`` now defaults to a new ``rcond`` value --------------------------------------------------------- - `~numpy.linalg.lstsq` now uses the new rcond value of the machine precision times ``max(M, N)``. Previously, the machine precision was used but a FutureWarning was given to notify that this change will happen eventually. @@ -396,7 +413,6 @@ Compatibility notes ``loadtxt`` and ``genfromtxt`` default encoding changed ------------------------------------------------------- - ``loadtxt`` and ``genfromtxt`` now both default to ``encoding=None`` which may mainly modify how ``converters`` work. These will now be passed ``str`` rather than ``bytes``. Pass the @@ -406,48 +422,39 @@ unicode strings rather than bytes. (`gh-25158 `__) + ``f2py`` compatibility notes ---------------------------- +* ``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI + combinations. When more than one ``.pyf`` file is passed, an error is + raised. When both ``-m`` and a ``.pyf`` is passed, a warning is emitted and + the ``-m`` provided name is ignored. -``f2py`` will no longer accept ambiguous ``-m`` and ``.pyf`` CLI combinations. -When more than one ``.pyf`` file is passed, an error is raised. When both ``-m`` -and a ``.pyf`` is passed, a warning is emitted and the ``-m`` provided name is -ignored. + (`gh-25181 `__) -(`gh-25181 `__) +* The ``f2py.compile()`` helper has been removed because it leaked memory, has + been marked as experimental for several years now, and was implemented as a + thin ``subprocess.run`` wrapper. It was also one of the test bottlenecks. See + `gh-25122 `_ for the full + rationale. It also used several ``np.distutils`` features which are too + fragile to be ported to work with ``meson``. -The ``f2py.compile()`` helper has been removed because it leaked memory, has -been marked as experimental for several years now, and was implemented as a thin -``subprocess.run`` wrapper. It is also one of the test bottlenecks. See -`gh-25122 `_ for the full -rationale. It also used several ``np.distutils`` features which are too fragile -to be ported to work with ``meson``. +* Users are urged to replace calls to ``f2py.compile`` with calls to + ``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use + environment variables to interact with ``meson``. `Native files + `_ are also an option. -Users are urged to replace calls to ``f2py.compile`` with calls to -``subprocess.run("python", "-m", "numpy.f2py",...`` instead, and to use -environment variables to interact with ``meson``. `Native files -`_ are also an option. + (`gh-25193 `__) -(`gh-25193 `__) - -``arange``'s ``start`` argument is positional-only --------------------------------------------------- -The first argument of ``arange`` is now positional only. This way, -specifying a ``start`` argument as a keyword, e.g. ``arange(start=0, stop=4)``, -raises a TypeError. Other behaviors, are unchanged so ``arange(stop=4)``, -``arange(2, stop=4)`` and so on, are still valid and have the same meaning as -before. - -(`gh-25336 `__) Minor changes in behavior of sorting functions ---------------------------------------------- - Due to algorithmic changes and use of SIMD code, sorting functions with methods that aren't stable may return slightly different results in 2.0.0 compared to 1.26.x. This includes the default method of `~numpy.argsort` and `~numpy.argpartition`. + Removed ambiguity when broadcasting in ``np.solve`` --------------------------------------------------- The broadcasting rules for ``np.solve(a, b)`` were ambiguous when ``b`` had 1 @@ -457,6 +464,7 @@ reconstructed by using ``np.solve(a, b[..., None])[..., 0]``. (`gh-25914 `__) + Modified representation for ``Polynomial`` ------------------------------------------ The representation method for `~numpy.polynomial.polynomial.Polynomial` was @@ -473,6 +481,7 @@ C API changes * The ``PyArray_CGT``, ``PyArray_CLT``, ``PyArray_CGE``, ``PyArray_CLE``, ``PyArray_CEQ``, ``PyArray_CNE`` macros have been removed. + * ``PyArray_MIN`` and ``PyArray_MAX`` have been moved from ``ndarraytypes.h`` to ``npy_math.h``. @@ -482,6 +491,7 @@ C API changes This includes functions for acquiring and releasing mutexes which lock access to the string data, as well as packing and unpacking UTF-8 bytestreams from array entries. + * ``NPY_NTYPES`` has been renamed to ``NPY_NTYPES_LEGACY`` as it does not include new NumPy built-in DTypes. In particular the new string DType will likely not work correctly with code that handles legacy DTypes. @@ -515,6 +525,7 @@ C API changes after including ``numpy/ndarrayobject.h`` as it requires ``import_array()``. This includes ``PyDataType_FLAGCHK``, ``PyDataType_REFCHK`` and ``NPY_BEGIN_THREADS_DESCR``. + * The dtype flags on ``PyArray_Descr`` must now be accessed through the ``PyDataType_FLAGS`` inline function to be compatible with both 1.x and 2.x. This function is defined in ``npy_2_compat.h`` to allow backporting. @@ -525,9 +536,9 @@ C API changes (`gh-25816 `__) + Datetime functionality exposed in the C API and Cython bindings --------------------------------------------------------------- - The functions ``NpyDatetime_ConvertDatetime64ToDatetimeStruct``, ``NpyDatetime_ConvertDatetimeStructToDatetime64``, ``NpyDatetime_ConvertPyDateTimeToDatetimeStruct``, @@ -538,9 +549,9 @@ external libraries. (`gh-21199 `__) + Const correctness for the generalized ufunc C API ------------------------------------------------- - The NumPy C API's functions for constructing generalized ufuncs (``PyUFunc_FromFuncAndData``, ``PyUFunc_FromFuncAndDataAndSignature``, ``PyUFunc_FromFuncAndDataAndSignatureAndIdentity``) take ``types`` and ``data`` @@ -553,9 +564,9 @@ code may be. (`gh-23847 `__) + Larger ``NPY_MAXDIMS`` and ``NPY_MAXARGS``, ``NPY_RAVEL_AXIS`` introduced ------------------------------------------------------------------------- - ``NPY_MAXDIMS`` is now 64, you may want to review its use. This is usually used in a stack allocation, where the increase should be safe. However, we do encourage generally to remove any use of ``NPY_MAXDIMS`` and @@ -566,9 +577,9 @@ replaced with ``NPY_RAVEL_AXIS``. See also :ref:`migration_maxdims`. (`gh-25149 `__) + ``NPY_MAXARGS`` not constant and ``PyArrayMultiIterObject`` size change ----------------------------------------------------------------------- - Since ``NPY_MAXARGS`` was increased, it is now a runtime constant and not compile-time constant anymore. We expect almost no users to notice this. But if used for stack allocations @@ -581,9 +592,9 @@ to avoid issues with Cython. (`gh-25271 `__) + Required changes for custom legacy user dtypes ---------------------------------------------- - In order to improve our DTypes it is unfortunately necessary to break the ABI, which requires some changes for dtypes registered with ``PyArray_RegisterDataType``. @@ -592,9 +603,9 @@ to adapt your code and achieve compatibility with both 1.x and 2.x. (`gh-25792 `__) + New Public DType API -------------------- - The C implementation of the NEP 42 DType API is now public. While the DType API has shipped in NumPy for a few versions, it was only usable in sessions with a special environment variable set. It is now possible to write custom DTypes @@ -608,9 +619,9 @@ be updated to work correctly with new DTypes. (`gh-25754 `__) + New C-API import functions -------------------------- - We have now added ``PyArray_ImportNumPyAPI`` and ``PyUFunc_ImportUFuncAPI`` as static inline functions to import the NumPy C-API tables. The new functions have two advantages over ``import_array`` and @@ -642,7 +653,7 @@ The ``metadata`` field is kept, but the macro version should also be preferred. Descriptor ``elsize`` and ``alignment`` access ---------------------------------------------- -Unless compiling only with NumPy 2 support, the ``elsize`` and ``aligment`` +Unless compiling only with NumPy 2 support, the ``elsize`` and ``alignment`` fields must now be accessed via ``PyDataType_ELSIZE``, ``PyDataType_SET_ELSIZE``, and ``PyDataType_ALIGNMENT``. In cases where the descriptor is attached to an array, we advise @@ -659,6 +670,7 @@ NumPy 2.0 C API removals have been removed. We recommend querying ``PyErr_CheckSignals()`` or ``PyOS_InterruptOccurred()`` periodically (these do currently require holding the GIL though). + * The ``noprefix.h`` header has been removed. Replace missing symbols with their prefixed counterparts (usually an added ``NPY_`` or ``npy_``). @@ -712,56 +724,58 @@ NumPy 2.0 C API removals * ``PyArrayFlags_Type`` and ``PyArray_NewFlagsObject`` as well as ``PyArrayFlagsObject`` are private now. There is no known use-case; use the Python API if needed. + * ``PyArray_MoveInto``, ``PyArray_CastTo``, ``PyArray_CastAnyTo`` are removed use ``PyArray_CopyInto`` and if absolutely needed ``PyArray_CopyAnyInto`` (the latter does a flat copy). -* ``PyArray_FillObjectArray`` is removed, its only true use is for + +* ``PyArray_FillObjectArray`` is removed, its only true use was for implementing ``np.empty``. Create a new empty array or use ``PyArray_FillWithScalar()`` (decrefs existing objects). + * ``PyArray_CompareUCS4`` and ``PyArray_CompareString`` are removed. Use the standard C string comparison functions. + * ``PyArray_ISPYTHON`` is removed as it is misleading, has no known use-cases, and is easy to replace. + * ``PyArray_FieldNames`` is removed, as it is unclear what it would be useful for. It also has incorrect semantics in some possible use-cases. + * ``PyArray_TypestrConvert`` is removed, since it seems a misnomer and unlikely to be used by anyone. If you know the size or are limited to few types, just use it explicitly, otherwise go via Python strings. (`gh-25292 `__) - -* ``PyDataType_GetDatetimeMetaData`` has been removed, it did not actually +* ``PyDataType_GetDatetimeMetaData`` is removed, it did not actually do anything since at least NumPy 1.7. (`gh-25802 `__) -``PyArray_GetCastFunc`` was removed ------------------------------------ +* ``PyArray_GetCastFunc`` is removed. Note that custom legacy user dtypes + can still provide a castfunc as their implementation, but any access to them + is now removed. The reason for this is that NumPy never used these + internally for many years. If you use simple numeric types, please just use + C casts directly. In case you require an alternative, please let us know so + we can create new API such as ``PyArray_CastBuffer()`` which could use old or + new cast functions depending on the NumPy version. -Note that custom legacy user dtypes can still provide a castfunc -as their implementation, but any access to them is now removed. -The reason for this is that NumPy never used these internally -for many years. -If you use simple numeric types, please just use C casts directly. -In case you require an alternative, please let us know so we can -create new API such as ``PyArray_CastBuffer()`` which could -use old or new cast functions depending on the NumPy version. - -(`gh-25161 `__) + (`gh-25161 `__) New Features ============ -* ``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +``np.add`` was extended to work with ``unicode`` and ``bytes`` dtypes. +---------------------------------------------------------------------- (`gh-24858 `__) + A new ``bitwise_count`` function -------------------------------- - This new function counts the number of 1-bits in a number. `~numpy.bitwise_count` works on all the numpy integer types and integer-like objects. @@ -775,9 +789,9 @@ integer-like objects. (`gh-19355 `__) + macOS Accelerate support, including the ILP64 --------------------------------------------- - Support for the updated Accelerate BLAS/LAPACK library, including ILP64 (64-bit integer) support, in macOS 13.3 has been added. This brings arm64 support, and significant performance improvements of up to 10x for commonly used linear @@ -792,18 +806,18 @@ PyPI will get wheels built against Accelerate rather than OpenBLAS. (`gh-25255 `__) + Option to use weights for quantile and percentile functions ----------------------------------------------------------- - A ``weights`` keyword is now available for `~numpy.quantile`, `~numpy.percentile`, `~numpy.nanquantile` and `~numpy.nanpercentile`. Only ``method="inverted_cdf"`` supports weights. (`gh-24254 `__) + Improved CPU optimization tracking ---------------------------------- - A new tracer mechanism is available which enables tracking of the enabled targets for each optimized function (i.e., that uses hardware-specific SIMD instructions) in the NumPy library. With this enhancement, it becomes possible @@ -817,9 +831,9 @@ and data type signatures. (`gh-24420 `__) + A new Meson backend for ``f2py`` -------------------------------- - ``f2py`` in compile mode (i.e. ``f2py -c``) now accepts the ``--backend meson`` option. This is the default option for Python >=3.12. For older Python versions, ``f2py`` will still default to ``--backend distutils``. @@ -832,9 +846,9 @@ There are no changes for users of ``f2py`` only as a code generator, i.e. withou (`gh-24532 `__) + ``bind(c)`` support for ``f2py`` -------------------------------- - Both functions and subroutines can be annotated with ``bind(c)``. ``f2py`` will handle both the correct type mapping, and preserve the unique label for other C interfaces. @@ -846,9 +860,9 @@ Fortran. (`gh-24555 `__) + A new ``strict`` option for several testing functions ----------------------------------------------------- - The ``strict`` keyword is now available for `~numpy.testing.assert_allclose`, `~numpy.testing.assert_equal`, and `~numpy.testing.assert_array_less`. Setting ``strict=True`` will disable the broadcasting behaviour for scalars @@ -858,6 +872,7 @@ and ensure that input arrays have the same data type. `gh-24770 `__, `gh-24775 `__) + Add ``np.core.umath.find`` and ``np.core.umath.rfind`` UFuncs ------------------------------------------------------------- Add two ``find`` and ``rfind`` UFuncs that operate on unicode or byte strings @@ -866,9 +881,9 @@ and are used in ``np.char``. They operate similar to ``str.find`` and (`gh-24868 `__) -``diagonal`` and ``trace`` for `numpy.linalg` ---------------------------------------------- +``diagonal`` and ``trace`` for ``numpy.linalg`` +----------------------------------------------- `numpy.linalg.diagonal` and `numpy.linalg.trace` have been added, which are array API standard-compatible variants of `numpy.diagonal` and `numpy.trace`. They differ in the default axis selection which define 2-D @@ -876,18 +891,18 @@ sub-arrays. (`gh-24887 `__) + New ``long`` and ``ulong`` dtypes --------------------------------- - `numpy.long` and `numpy.ulong` have been added as NumPy integers mapping to C's ``long`` and ``unsigned long``. Prior to NumPy 1.24, ``numpy.long`` was an alias to Python's ``int``. (`gh-24922 `__) -``svdvals`` for `numpy.linalg` ------------------------------- +``svdvals`` for ``numpy.linalg`` +-------------------------------- `numpy.linalg.svdvals` has been added. It computes singular values for (a stack of) matrices. Executing ``np.svdvals(x)`` is the same as calling ``np.svd(x, compute_uv=False, hermitian=False)``. @@ -895,25 +910,25 @@ This function is compatible with the array API standard. (`gh-24940 `__) + A new ``isdtype`` function -------------------------- - `numpy.isdtype` was added to provide a canonical way to classify NumPy's dtypes in compliance with the array API standard. (`gh-25054 `__) + A new ``astype`` function ------------------------- - `numpy.astype` was added to provide an array API standard-compatible alternative to the `numpy.ndarray.astype` method. (`gh-25079 `__) + Array API compatible functions' aliases --------------------------------------- - 13 aliases for existing functions were added to improve compatibility with the array API standard: * Trigonometry: ``acos``, ``acosh``, ``asin``, ``asinh``, ``atan``, ``atanh``, ``atan2``. @@ -926,9 +941,9 @@ Array API compatible functions' aliases (`gh-25086 `__) + New ``unique_*`` functions -------------------------- - The `~numpy.unique_all`, `~numpy.unique_counts`, `~numpy.unique_inverse`, and `~numpy.unique_values` functions have been added. They provide functionality of `~numpy.unique` with different sets of flags. They are array API @@ -938,9 +953,9 @@ compilation. (`gh-25088 `__) + Matrix transpose support for ndarrays ------------------------------------- - NumPy now offers support for calculating the matrix transpose of an array (or stack of arrays). The matrix transpose is equivalent to swapping the last two axes of an array. Both ``np.ndarray`` and ``np.ma.MaskedArray`` now expose a @@ -949,9 +964,9 @@ function. (`gh-23762 `__) + Array API compatible functions for ``numpy.linalg`` --------------------------------------------------- - Six new functions and two aliases were added to improve compatibility with the Array API standard for `numpy.linalg`: @@ -980,18 +995,18 @@ the Array API standard for `numpy.linalg`: (`gh-25145 `__) + A ``correction`` argument for ``var`` and ``std`` ------------------------------------------------- - A ``correction`` argument was added to `~numpy.var` and `~numpy.std`, which is an array API standard compatible alternative to ``ddof``. As both arguments serve a similar purpose, only one of them can be provided at the same time. (`gh-25169 `__) + ``ndarray.device`` and ``ndarray.to_device`` -------------------------------------------- - An ``ndarray.device`` attribute and ``ndarray.to_device`` method were added to ``numpy.ndarray`` for array API standard compatibility. @@ -1004,9 +1019,9 @@ For all these new arguments, only ``device="cpu"`` is supported. (`gh-25233 `__) + StringDType has been added to NumPy ----------------------------------- - We have added a new variable-width UTF-8 encoded string data type, implementing a "NumPy array of Python strings", including support for a user-provided missing data sentinel. It is intended as a drop-in replacement for arrays of Python @@ -1016,9 +1031,9 @@ documentation ` for more details. (`gh-25347 `__) + New keywords for ``cholesky`` and ``pinv`` ------------------------------------------ - The ``upper`` and ``rtol`` keywords were added to `numpy.linalg.cholesky` and `numpy.linalg.pinv`, respectively, to improve array API standard compatibility. @@ -1028,9 +1043,9 @@ the future. (`gh-25388 `__) + New keywords for ``sort``, ``argsort`` and ``linalg.matrix_rank`` ----------------------------------------------------------------- - New keyword parameters were added to improve array API standard compatibility: * ``rtol`` was added to `~numpy.linalg.matrix_rank`. @@ -1039,9 +1054,9 @@ New keyword parameters were added to improve array API standard compatibility: (`gh-25437 `__) + New ``numpy.strings`` namespace for string ufuncs ------------------------------------------------- - NumPy now implements some string operations as ufuncs. The old ``np.char`` namespace is still available, and where possible the string manipulation functions in that namespace have been updated to use the new ufuncs, @@ -1053,9 +1068,9 @@ instead of ``np.char``. In the future we may deprecate ``np.char`` in favor of (`gh-25463 `__) -`numpy.fft` support for different precisions and in-place calculations ----------------------------------------------------------------------- +``numpy.fft`` support for different precisions and in-place calculations +------------------------------------------------------------------------ The various FFT routines in `numpy.fft` now do their calculations natively in float, double, or long double precision, depending on the input precision, instead of always calculating in double precision. Hence, the calculation will @@ -1067,9 +1082,9 @@ for in-place calculations. (`gh-25536 `__) + configtool and pkg-config support --------------------------------- - A new ``numpy-config`` CLI script is available that can be queried for the NumPy version and for compile flags needed to use the NumPy C API. This will allow build systems to better support the use of NumPy as a dependency. @@ -1079,9 +1094,9 @@ find its location for use with ``PKG_CONFIG_PATH``, use (`gh-25730 `__) + Array API standard support in the main namespace ------------------------------------------------ - The main ``numpy`` namespace now supports the array API standard. See :ref:`array-api-standard-compatibility` for details. @@ -1090,40 +1105,41 @@ The main ``numpy`` namespace now supports the array API standard. See Improvements ============ -* Strings are now supported by ``any``, ``all``, and the logical ufuncs. +Strings are now supported by ``any``, ``all``, and the logical ufuncs. +---------------------------------------------------------------------- (`gh-25651 `__) + Integer sequences as the shape argument for ``memmap`` ------------------------------------------------------ - `numpy.memmap` can now be created with any integer sequence as the ``shape`` argument, such as a list or numpy array of integers. Previously, only the types of tuple and int could be used without raising an error. (`gh-23729 `__) + ``errstate`` is now faster and context safe ------------------------------------------- - The `numpy.errstate` context manager/decorator is now faster and safer. Previously, it was not context safe and had (rare) issues with thread-safety. (`gh-23936 `__) + AArch64 quicksort speed improved by using Highway's VQSort ---------------------------------------------------------- - The first introduction of the Google Highway library, using VQSort on AArch64. Execution time is improved by up to 16x in some cases, see the PR for benchmark results. Extensions to other platforms will be done in the future. (`gh-24018 `__) + Complex types - underlying C type changes ----------------------------------------- - * The underlying C types for all of NumPy's complex types have been changed to use C99 complex types. @@ -1149,9 +1165,9 @@ Complex types - underlying C type changes (`gh-24085 `__) + ``iso_c_binding`` support and improved common blocks for ``f2py`` ----------------------------------------------------------------- - Previously, users would have to define their own custom ``f2cmap`` file to use type mappings defined by the Fortran2003 ``iso_c_binding`` intrinsic module. These type maps are now natively supported by ``f2py`` @@ -1164,27 +1180,27 @@ modules. This further expands the usability of intrinsics like (`gh-25186 `__) + Call ``str`` automatically on third argument to functions like ``assert_equal`` ------------------------------------------------------------------------------- - The third argument to functions like `~numpy.testing.assert_equal` now has ``str`` called on it automatically. This way it mimics the built-in ``assert`` statement, where ``assert_equal(a, b, obj)`` works like ``assert a == b, obj``. (`gh-24877 `__) + Support for array-like ``atol``/``rtol`` in ``isclose``, ``allclose`` --------------------------------------------------------------------- - The keywords ``atol`` and ``rtol`` in `~numpy.isclose` and `~numpy.allclose` now accept both scalars and arrays. An array, if given, must broadcast to the shapes of the first two array arguments. (`gh-24878 `__) + Consistent failure messages in test functions --------------------------------------------- - Previously, some `numpy.testing` assertions printed messages that referred to the actual and desired results as ``x`` and ``y``. Now, these values are consistently referred to as ``ACTUAL`` and @@ -1192,9 +1208,9 @@ Now, these values are consistently referred to as ``ACTUAL`` and (`gh-24931 `__) + n-D FFT transforms allow ``s[i] == -1`` --------------------------------------- - The `~numpy.fft.fftn`, `~numpy.fft.ifftn`, `~numpy.fft.rfftn`, `~numpy.fft.irfftn`, `~numpy.fft.fft2`, `~numpy.fft.ifft2`, `~numpy.fft.rfft2` and `~numpy.fft.irfft2` functions now use the whole input array along the axis @@ -1202,9 +1218,9 @@ and `~numpy.fft.irfft2` functions now use the whole input array along the axis (`gh-25495 `__) + Guard PyArrayScalar_VAL and PyUnicodeScalarObject for the limited API --------------------------------------------------------------------- - ``PyUnicodeScalarObject`` holds a ``PyUnicodeObject``, which is not available when using ``Py_LIMITED_API``. Add guards to hide it and consequently also make the ``PyArrayScalar_VAL`` macro hidden. @@ -1222,6 +1238,7 @@ Changes * Being fully context and thread-safe, ``np.errstate`` can only be entered once now. + * ``np.setbufsize`` is now tied to ``np.errstate()``: leaving an ``np.errstate`` context will also reset the ``bufsize``. @@ -1248,9 +1265,9 @@ Changes (`gh-25816 `__) + Representation of NumPy scalars changed --------------------------------------- - As per :ref:`NEP 51 `, the scalar representation has been updated to include the type information to avoid confusion with Python scalars. @@ -1268,9 +1285,9 @@ to facilitate updates. (`gh-22449 `__) + Truthiness of NumPy strings changed ----------------------------------- - NumPy strings previously were inconsistent about how they defined if the string is ``True`` or ``False`` and the definition did not match the one used by Python. @@ -1298,9 +1315,9 @@ The change does affect ``np.fromregex`` as it uses direct assignments. (`gh-23871 `__) + A ``mean`` keyword was added to var and std function ---------------------------------------------------- - Often when the standard deviation is needed the mean is also needed. The same holds for the variance and the mean. Until now the mean is then calculated twice, the change introduced here for the `~numpy.var` and `~numpy.std` functions @@ -1309,18 +1326,18 @@ docstrings for details and an example illustrating the speed-up. (`gh-24126 `__) + Remove datetime64 deprecation warning when constructing with timezone --------------------------------------------------------------------- - The `numpy.datetime64` method now issues a UserWarning rather than a DeprecationWarning whenever a timezone is included in the datetime string that is provided. (`gh-24193 `__) + Default integer dtype is now 64-bit on 64-bit Windows ----------------------------------------------------- - The default NumPy integer is now 64-bit on all 64-bit systems as the historic 32-bit default on Windows was a common source of issues. Most users should not notice this. The main issues may occur with code interfacing with libraries @@ -1329,6 +1346,7 @@ written in a compiled language like C. For more information see (`gh-24224 `__) + Renamed ``numpy.core`` to ``numpy._core`` ----------------------------------------- Accessing ``numpy.core`` now emits a DeprecationWarning. In practice @@ -1349,9 +1367,9 @@ the ``NPY_RELAXED_STRIDES_DEBUG`` environment variable or the (`gh-24717 `__) + Redefinition of ``np.intp``/``np.uintp`` (almost never a change) ---------------------------------------------------------------- - Due to the actual use of these types almost always matching the use of ``size_t``/``Py_ssize_t`` this is now the definition in C. Previously, it matched ``intptr_t`` and ``uintptr_t`` which would often @@ -1371,24 +1389,25 @@ However, it means that: (`gh-24888 `__) + ``numpy.fft.helper`` made private --------------------------------- - ``numpy.fft.helper`` was renamed to ``numpy.fft._helper`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.fft`. (`gh-24945 `__) + ``numpy.linalg.linalg`` made private ------------------------------------ - ``numpy.linalg.linalg`` was renamed to ``numpy.linalg._linalg`` to indicate that it is a private submodule. All public functions exported by it should be accessed from `numpy.linalg`. (`gh-24946 `__) + Out-of-bound axis not the same as ``axis=None`` ----------------------------------------------- In some cases ``axis=32`` or for concatenate any large value @@ -1401,9 +1420,9 @@ Any out of bound axis value will now error, make sure to use .. _copy-keyword-changes-2.0: + New ``copy`` keyword meaning for ``array`` and ``asarray`` constructors ----------------------------------------------------------------------- - Now `numpy.array` and `numpy.asarray` support three values for ``copy`` parameter: * ``None`` - A copy will only be made if it is necessary. @@ -1414,9 +1433,9 @@ The meaning of ``False`` changed as it now raises an exception if a copy is need (`gh-25168 `__) + The ``__array__`` special method now takes a ``copy`` keyword argument. ----------------------------------------------------------------------- - NumPy will pass ``copy`` to the ``__array__`` special method in situations where it would be set to a non-default value (e.g. in a call to ``np.asarray(some_object, copy=False)``). Currently, if an @@ -1428,9 +1447,9 @@ argument with the same meaning as when passed to `numpy.array` or (`gh-25168 `__) + Cleanup of initialization of ``numpy.dtype`` with strings with commas --------------------------------------------------------------------- - The interpretation of strings with commas is changed slightly, in that a trailing comma will now always create a structured dtype. E.g., where previously ``np.dtype("i")`` and ``np.dtype("i,")`` were treated as identical, @@ -1447,9 +1466,9 @@ case for initializations without a comma, like ``np.dtype("(2)i")``. (`gh-25434 `__) + Change in how complex sign is calculated ---------------------------------------- - Following the array API standard, the complex sign is now calculated as ``z / |z|`` (instead of the rather less logical case where the sign of the real part was taken, unless the real part was zero, in which case @@ -1458,9 +1477,9 @@ zero is returned if ``z==0``. (`gh-25441 `__) + Return types of functions that returned a list of arrays -------------------------------------------------------- - Functions that returned a list of ndarrays have been changed to return a tuple of ndarrays instead. Returning tuples consistently whenever a sequence of arrays is returned makes it easier for JIT compilers like Numba, as well as for @@ -1469,20 +1488,26 @@ functions are: `~numpy.atleast_1d`, `~numpy.atleast_2d`, `~numpy.atleast_3d`, `~numpy.broadcast_arrays`, `~numpy.meshgrid`, `~numpy.ogrid`, `~numpy.histogramdd`. + ``np.unique`` ``return_inverse`` shape for multi-dimensional inputs ------------------------------------------------------------------- - When multi-dimensional inputs are passed to ``np.unique`` with ``return_inverse=True``, the ``unique_inverse`` output is now shaped such that the input can be reconstructed directly using ``np.take(unique, unique_inverse)`` when ``axis=None``, and ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. -(`gh-25553 `__, +.. note:: + This change was reverted in 2.0.1 except for ``axis=None``. The correct + reconstruction is always ``np.take(unique, unique_inverse, axis=axis)``. + When 2.0.0 needs to be supported, add ``unique_inverse.reshape(-1)`` + to code. + +(`gh-25553 `__, `gh-25570 `__) + ``any`` and ``all`` return booleans for object arrays ----------------------------------------------------- - The ``any`` and ``all`` functions and methods now return booleans also for object arrays. Previously, they did a reduction which behaved like the Python ``or`` and @@ -1492,8 +1517,16 @@ to achieve the previous behavior. (`gh-25712 `__) +``np.can_cast`` cannot be called on Python int, float, or complex +----------------------------------------------------------------- +``np.can_cast`` cannot be called with Python int, float, or complex instances +anymore. This is because NEP 50 means that the result of ``can_cast`` must +not depend on the value passed in. +Unfortunately, for Python scalars whether a cast should be considered +``"same_kind"`` or ``"safe"`` may depend on the context and value so that +this is currently not implemented. +In some cases, this means you may have to add a specific path for: +``if type(obj) in (int, float, complex): ...``. +(`gh-26393 `__) -**Content from release note snippets in doc/release/upcoming_changes:** - -.. include:: notes-towncrier.rst diff --git a/doc/source/release/2.0.1-notes.rst b/doc/source/release/2.0.1-notes.rst new file mode 100644 index 000000000000..a49f2ee36abd --- /dev/null +++ b/doc/source/release/2.0.1-notes.rst @@ -0,0 +1,74 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.1 Release Notes +========================== + +NumPy 2.0.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.0 release. NumPy 2.0.1 is the last planned +release in the 2.0.x series, 2.1.0rc1 should be out shortly. + +The Python versions supported by this release are 3.9-3.12. + +Improvements +============ + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* @vahidmech + +* Alex Herbert + +* Charles Harris +* Giovanni Del Monte + +* Leo Singer +* Lysandros Nikolaou +* Matti Picus +* Nathan Goldbaum +* Patrick J. Roddy + +* Raghuveer Devulapalli +* Ralf Gommers +* Rostan Tabet + +* Sebastian Berg +* Tyler Reddy +* Yannik Wicke + + +Pull requests merged +==================== + +A total of 24 pull requests were merged for this release. + +* `#26711 `__: MAINT: prepare 2.0.x for further development +* `#26792 `__: TYP: fix incorrect import in ``ma/extras.pyi`` stub +* `#26793 `__: DOC: Mention '1.25' legacy printing mode in ``set_printoptions`` +* `#26794 `__: DOC: Remove mention of NaN and NAN aliases from constants +* `#26821 `__: BLD: Fix x86-simd-sort build failure on openBSD +* `#26822 `__: BUG: Ensure output order follows input in numpy.fft +* `#26823 `__: TYP: fix missing sys import in numeric.pyi +* `#26832 `__: DOC: remove hack to override _add_newdocs_scalars (#26826) +* `#26835 `__: BUG: avoid side-effect of 'include complex.h' +* `#26836 `__: BUG: fix max_rows and chunked string/datetime reading in ``loadtxt`` +* `#26837 `__: BUG: fix PyArray_ImportNumPyAPI under -Werror=strict-prototypes +* `#26856 `__: DOC: Update some documentation +* `#26868 `__: BUG: fancy indexing copy +* `#26869 `__: BUG: Mismatched allocation domains in ``PyArray_FillWithScalar`` +* `#26870 `__: BUG: Handle --f77flags and --f90flags for meson [wheel build] +* `#26887 `__: BUG: Fix new DTypes and new string promotion when signature is... +* `#26888 `__: BUG: remove numpy.f2py from excludedimports +* `#26959 `__: BUG: Quantile closest_observation to round to nearest even order +* `#26960 `__: BUG: Fix off-by-one error in amount of characters in strip +* `#26961 `__: API: Partially revert unique with return_inverse +* `#26962 `__: BUG,MAINT: Fix utf-8 character stripping memory access +* `#26963 `__: BUG: Fix out-of-bound minimum offset for in1d table method +* `#26971 `__: BUG: fix f2py tests to work with v2 API +* `#26995 `__: BUG: Add object cast to avoid warning with limited API diff --git a/doc/source/release/2.0.2-notes.rst b/doc/source/release/2.0.2-notes.rst new file mode 100644 index 000000000000..ae5c26250ba7 --- /dev/null +++ b/doc/source/release/2.0.2-notes.rst @@ -0,0 +1,58 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.0.2 Release Notes +========================== + +NumPy 2.0.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.0.1 release. + +The Python versions supported by this release are 3.9-3.12. + + +Contributors +============ + +A total of 13 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Bruno Oliveira + +* Charles Harris +* Chris Sidebottom +* Christian Heimes + +* Christopher Sidebottom +* Mateusz Sokół +* Matti Picus +* Nathan Goldbaum +* Pieter Eendebak +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* Yair Chuchem + + + +Pull requests merged +==================== + +A total of 19 pull requests were merged for this release. + +* `#27000 `__: REL: Prepare for the NumPy 2.0.1 release [wheel build] +* `#27001 `__: MAINT: prepare 2.0.x for further development +* `#27021 `__: BUG: cfuncs.py: fix crash when sys.stderr is not available +* `#27022 `__: DOC: Fix migration note for ``alltrue`` and ``sometrue`` +* `#27061 `__: BUG: use proper input and output descriptor in array_assign_subscript... +* `#27073 `__: BUG: Mirror VQSORT_ENABLED logic in Quicksort +* `#27074 `__: BUG: Bump Highway to latest master +* `#27077 `__: BUG: Off by one in memory overlap check +* `#27122 `__: BUG: Use the new ``npyv_loadable_stride_`` functions for ldexp and... +* `#27126 `__: BUG: Bump Highway to latest +* `#27128 `__: BUG: add missing error handling in public_dtype_api.c +* `#27129 `__: BUG: fix another cast setup in array_assign_subscript +* `#27130 `__: BUG: Fix building NumPy in FIPS mode +* `#27131 `__: BLD: update vendored Meson for cross-compilation patches +* `#27146 `__: MAINT: Scipy openblas 0.3.27.44.4 +* `#27151 `__: BUG: Do not accidentally store dtype metadata in ``np.save`` +* `#27195 `__: REV: Revert undef I and document it +* `#27213 `__: BUG: Fix NPY_RAVEL_AXIS on backwards compatible NumPy 2 builds +* `#27279 `__: BUG: Fix array_equal for numeric and non-numeric scalar types + diff --git a/doc/source/release/2.1.0-notes.rst b/doc/source/release/2.1.0-notes.rst new file mode 100644 index 000000000000..bb9c71079062 --- /dev/null +++ b/doc/source/release/2.1.0-notes.rst @@ -0,0 +1,362 @@ +.. currentmodule:: numpy + +========================= +NumPy 2.1.0 Release Notes +========================= + +NumPy 2.1.0 provides support for the upcoming Python 3.13 release and drops +support for Python 3.9. In addition to the usual bug fixes and updated Python +support, it helps get us back into our usual release cycle after the extended +development of 2.0. The highlights for this release are: + +- Support for the array-api 2023.12 standard. +- Support for Python 3.13. +- Preliminary support for free threaded Python 3.13. + +Python versions 3.10-3.13 are supported in this release. + + +New functions +============= + +New function ``numpy.unstack`` +------------------------------ + +A new function ``np.unstack(array, axis=...)`` was added, which splits +an array into a tuple of arrays along an axis. It serves as the inverse +of `numpy.stack`. + +(`gh-26579 `__) + + +Deprecations +============ + +* The ``fix_imports`` keyword argument in ``numpy.save`` is deprecated. Since + NumPy 1.17, ``numpy.save`` uses a pickle protocol that no longer supports + Python 2, and ignored ``fix_imports`` keyword. This keyword is kept only + for backward compatibility. It is now deprecated. + + (`gh-26452 `__) + +* Passing non-integer inputs as the first argument of `bincount` is now + deprecated, because such inputs are silently cast to integers with no + warning about loss of precision. + + (`gh-27076 `__) + + +Expired deprecations +==================== + +* Scalars and 0D arrays are disallowed for ``numpy.nonzero`` and ``numpy.ndarray.nonzero``. + + (`gh-26268 `__) + +* ``set_string_function`` internal function was removed and ``PyArray_SetStringFunction`` + was stubbed out. + + (`gh-26611 `__) + + +C API changes +============= + +API symbols now hidden but customizable +--------------------------------------- +NumPy now defaults to hide the API symbols it adds to allow all NumPy API +usage. This means that by default you cannot dynamically fetch the NumPy API +from another library (this was never possible on windows). + +If you are experiencing linking errors related to ``PyArray_API`` or +``PyArray_RUNTIME_VERSION``, you can define the +``NPY_API_SYMBOL_ATTRIBUTE`` to opt-out of this change. + +If you are experiencing problems due to an upstream header including NumPy, +the solution is to make sure you ``#include "numpy/ndarrayobject.h"`` before +their header and import NumPy yourself based on ``including-the-c-api``. + +(`gh-26103 `__) + +Many shims removed from npy_3kcompat.h +-------------------------------------- +Many of the old shims and helper functions were removed from +``npy_3kcompat.h``. If you find yourself in need of these, vendor the previous +version of the file into your codebase. + +(`gh-26842 `__) + +New ``PyUFuncObject`` field ``process_core_dims_func`` +------------------------------------------------------ +The field ``process_core_dims_func`` was added to the structure +``PyUFuncObject``. For generalized ufuncs, this field can be set to a function +of type ``PyUFunc_ProcessCoreDimsFunc`` that will be called when the ufunc is +called. It allows the ufunc author to check that core dimensions satisfy +additional constraints, and to set output core dimension sizes if they have not +been provided. + +(`gh-26908 `__) + + +New Features +============ + +Preliminary Support for Free-Threaded CPython 3.13 +-------------------------------------------------- + +CPython 3.13 will be available as an experimental free-threaded build. See +https://py-free-threading.github.io, `PEP 703 +`_ and the `CPython 3.13 release notes +`_ for +more detail about free-threaded Python. + +NumPy 2.1 has preliminary support for the free-threaded build of CPython +3.13. This support was enabled by fixing a number of C thread-safety issues in +NumPy. Before NumPy 2.1, NumPy used a large number of C global static variables +to store runtime caches and other state. We have either refactored to avoid the +need for global state, converted the global state to thread-local state, or +added locking. + +Support for free-threaded Python does not mean that NumPy is thread +safe. Read-only shared access to ndarray should be safe. NumPy exposes shared +mutable state and we have not added any locking to the array object itself to +serialize access to shared state. Care must be taken in user code to avoid +races if you would like to mutate the same array in multiple threads. It is +certainly possible to crash NumPy by mutating an array simultaneously in +multiple threads, for example by calling a ufunc and the ``resize`` method +simultaneously. For now our guidance is: "don't do that". In the future we would +like to provide stronger guarantees. + +Object arrays in particular need special care, since the GIL +previously provided locking for object array access and no longer does. See +`Issue #27199 `_ for more +information about object arrays in the free-threaded build. + +If you are interested in free-threaded Python, for example because you have a +multiprocessing-based workflow that you are interested in running with Python +threads, we encourage testing and experimentation. + +If you run into problems that you suspect are because of NumPy, please `open an +issue `_, checking first if +the bug also occurs in the "regular" non-free-threaded CPython 3.13 build. Many +threading bugs can also occur in code that releases the GIL; disabling the GIL +only makes it easier to hit threading bugs. + +(`gh-26157 `__) + +* ``numpy.reshape`` and ``numpy.ndarray.reshape`` now support ``shape`` and + ``copy`` arguments. + + (`gh-26292 `__) + +* NumPy now supports DLPack v1, support for older versions will + be deprecated in the future. + + (`gh-26501 `__) + +* ``numpy.asanyarray`` now supports ``copy`` and ``device`` arguments, matching + ``numpy.asarray``. + + (`gh-26580 `__) + +* ``numpy.printoptions``, ``numpy.get_printoptions``, and + ``numpy.set_printoptions`` now support a new option, ``override_repr``, for + defining custom ``repr(array)`` behavior. + + (`gh-26611 `__) + +* ``numpy.cumulative_sum`` and ``numpy.cumulative_prod`` were added as Array + API compatible alternatives for ``numpy.cumsum`` and ``numpy.cumprod``. The + new functions can include a fixed initial (zeros for ``sum`` and ones for + ``prod``) in the result. + + (`gh-26724 `__) + +* ``numpy.clip`` now supports ``max`` and ``min`` keyword arguments which are + meant to replace ``a_min`` and ``a_max``. Also, for ``np.clip(a)`` or + ``np.clip(a, None, None)`` a copy of the input array will be returned instead + of raising an error. + + (`gh-26724 `__) + +* ``numpy.astype`` now supports ``device`` argument. + + (`gh-26724 `__) + +``f2py`` can generate freethreading-compatible C extensions +----------------------------------------------------------- +Pass ``--freethreading-compatible`` to the f2py CLI tool to produce a C +extension marked as compatible with the free threading CPython +interpreter. Doing so prevents the interpreter from re-enabling the GIL at +runtime when it imports the C extension. Note that ``f2py`` does not analyze +fortran code for thread safety, so you must verify that the wrapped fortran +code is thread safe before marking the extension as compatible. + +(`gh-26981 `__) + + +Improvements +============ + +``histogram`` auto-binning now returns bin sizes >=1 for integer input data +--------------------------------------------------------------------------- +For integer input data, bin sizes smaller than 1 result in spurious empty +bins. This is now avoided when the number of bins is computed using one of the +algorithms provided by ``histogram_bin_edges``. + +(`gh-12150 `__) + +``ndarray`` shape-type parameter is now covariant and bound to ``tuple[int, ...]`` +---------------------------------------------------------------------------------- +Static typing for ``ndarray`` is a long-term effort that continues +with this change. It is a generic type with type parameters for +the shape and the data type. Previously, the shape type parameter could be +any value. This change restricts it to a tuple of ints, as one would expect +from using ``ndarray.shape``. Further, the shape-type parameter has been +changed from invariant to covariant. This change also applies to the subtypes +of ``ndarray``, e.g. ``numpy.ma.MaskedArray``. See the +`typing docs `_ +for more information. + +(`gh-26081 `__) + +``np.quantile`` with method ``closest_observation`` chooses nearest even order statistic +---------------------------------------------------------------------------------------- +This changes the definition of nearest for border cases from the nearest odd +order statistic to nearest even order statistic. The numpy implementation now +matches other reference implementations. + +(`gh-26656 `__) + +``lapack_lite`` is now thread safe +---------------------------------- +NumPy provides a minimal low-performance version of LAPACK named ``lapack_lite`` +that can be used if no BLAS/LAPACK system is detected at build time. + +Until now, ``lapack_lite`` was not thread safe. Single-threaded use cases did +not hit any issues, but running linear algebra operations in multiple threads +could lead to errors, incorrect results, or segfaults due to data races. + +We have added a global lock, serializing access to ``lapack_lite`` in multiple +threads. + +(`gh-26750 `__) + +The ``numpy.printoptions`` context manager is now thread and async-safe +----------------------------------------------------------------------- +In prior versions of NumPy, the printoptions were defined using a combination +of Python and C global variables. We have refactored so the state is stored in +a python ``ContextVar``, making the context manager thread and async-safe. + +(`gh-26846 `__) + +Type hinting ``numpy.polynomial`` +--------------------------------- +Starting from the 2.1 release, PEP 484 type annotations have been included for +the functions and convenience classes in ``numpy.polynomial`` and its +sub-packages. + +(`gh-26897 `__) + +Improved ``numpy.dtypes`` type hints +------------------------------------ +The type annotations for ``numpy.dtypes`` are now a better reflection of the +runtime: The ``numpy.dtype`` type-aliases have been replaced with specialized +``dtype`` *subtypes*, and the previously missing annotations for +``numpy.dtypes.StringDType`` have been added. + +(`gh-27008 `__) + + +Performance improvements and changes +==================================== + +* ``numpy.save`` now uses pickle protocol version 4 for saving arrays with + object dtype, which allows for pickle objects larger than 4GB and improves + saving speed by about 5% for large arrays. + + (`gh-26388 `__) + +* OpenBLAS on x86_64 and i686 is built with fewer kernels. Based on + benchmarking, there are 5 clusters of performance around these kernels: + ``PRESCOTT NEHALEM SANDYBRIDGE HASWELL SKYLAKEX``. + + (`gh-27147 `__) + +* OpenBLAS on windows is linked without quadmath, simplifying licensing + + (`gh-27147 `__) + +* Due to a regression in OpenBLAS on windows, the performance improvements when + using multiple threads for OpenBLAS 0.3.26 were reverted. + + (`gh-27147 `__) + +``ma.cov`` and ``ma.corrcoef`` are now significantly faster +----------------------------------------------------------- +The private function has been refactored along with ``ma.cov`` and +``ma.corrcoef``. They are now significantly faster, particularly on large, +masked arrays. + +(`gh-26285 `__) + + +Changes +======= + +* As ``numpy.vecdot`` is now a ufunc it has a less precise signature. + This is due to the limitations of ufunc's typing stub. + + (`gh-26313 `__) + +* ``numpy.floor``, ``numpy.ceil``, and ``numpy.trunc`` now won't perform + casting to a floating dtype for integer and boolean dtype input arrays. + + (`gh-26766 `__) + +``ma.corrcoef`` may return a slightly different result +------------------------------------------------------ +A pairwise observation approach is currently used in ``ma.corrcoef`` to +calculate the standard deviations for each pair of variables. This has been +changed as it is being used to normalise the covariance, estimated using +``ma.cov``, which does not consider the observations for each variable in a +pairwise manner, rendering it unnecessary. The normalisation has been replaced +by the more appropriate standard deviation for each variable, which +significantly reduces the wall time, but will return slightly different +estimates of the correlation coefficients in cases where the observations +between a pair of variables are not aligned. However, it will return the same +estimates in all other cases, including returning the same correlation matrix +as ``corrcoef`` when using a masked array with no masked values. + +(`gh-26285 `__) + +Cast-safety fixes in ``copyto`` and ``full`` +-------------------------------------------- +``copyto`` now uses NEP 50 correctly and applies this to its cast safety. +Python integer to NumPy integer casts and Python float to NumPy float casts +are now considered "safe" even if assignment may fail or precision may be lost. +This means the following examples change slightly: + +* ``np.copyto(int8_arr, 1000)`` previously performed an unsafe/same-kind cast + of the Python integer. It will now always raise, to achieve an unsafe cast + you must pass an array or NumPy scalar. + +* ``np.copyto(uint8_arr, 1000, casting="safe")`` will raise an OverflowError + rather than a TypeError due to same-kind casting. + +* ``np.copyto(float32_arr, 1e300, casting="safe")`` will overflow to ``inf`` + (float32 cannot hold ``1e300``) rather raising a TypeError. + +Further, only the dtype is used when assigning NumPy scalars (or 0-d arrays), +meaning that the following behaves differently: + +* ``np.copyto(float32_arr, np.float64(3.0), casting="safe")`` raises. + +* ``np.coptyo(int8_arr, np.int64(100), casting="safe")`` raises. + Previously, NumPy checked whether the 100 fits the ``int8_arr``. + +This aligns ``copyto``, ``full``, and ``full_like`` with the correct NumPy 2 +behavior. + +(`gh-27091 `__) + diff --git a/doc/source/release/2.1.1-notes.rst b/doc/source/release/2.1.1-notes.rst new file mode 100644 index 000000000000..79c63514695c --- /dev/null +++ b/doc/source/release/2.1.1-notes.rst @@ -0,0 +1,41 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.1 Release Notes +========================== + +NumPy 2.1.1 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.0 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 7 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Andrew Nelson +* Charles Harris +* Mateusz Sokół +* Maximilian Weigand + +* Nathan Goldbaum +* Pieter Eendebak +* Sebastian Berg + +Pull requests merged +==================== + +A total of 10 pull requests were merged for this release. + +* `#27236 `__: REL: Prepare for the NumPy 2.1.0 release [wheel build] +* `#27252 `__: MAINT: prepare 2.1.x for further development +* `#27259 `__: BUG: revert unintended change in the return value of set_printoptions +* `#27266 `__: BUG: fix reference counting bug in __array_interface__ implementation… +* `#27267 `__: TST: Add regression test for missing descr in array-interface +* `#27276 `__: BUG: Fix #27256 and #27257 +* `#27278 `__: BUG: Fix array_equal for numeric and non-numeric scalar types +* `#27287 `__: MAINT: Update maintenance/2.1.x after the 2.0.2 release +* `#27303 `__: BLD: cp311- macosx_arm64 wheels [wheel build] +* `#27304 `__: BUG: f2py: better handle filtering of public/private subroutines + diff --git a/doc/source/release/2.1.2-notes.rst b/doc/source/release/2.1.2-notes.rst new file mode 100644 index 000000000000..1a187dbd3365 --- /dev/null +++ b/doc/source/release/2.1.2-notes.rst @@ -0,0 +1,48 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.2 Release Notes +========================== + +NumPy 2.1.2 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.1 release. + +The Python versions supported by this release are 3.10-3.13. + +Contributors +============ + +A total of 11 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Chris Sidebottom +* Ishan Koradia + +* João Eiras + +* Katie Rust + +* Marten van Kerkwijk +* Matti Picus +* Nathan Goldbaum +* Peter Hawkins +* Pieter Eendebak +* Slava Gorloff + + +Pull requests merged +==================== + +A total of 14 pull requests were merged for this release. + +* `#27333 `__: MAINT: prepare 2.1.x for further development +* `#27400 `__: BUG: apply critical sections around populating the dispatch cache +* `#27406 `__: BUG: Stub out get_build_msvc_version if distutils.msvccompiler... +* `#27416 `__: BUILD: fix missing include for std::ptrdiff_t for C++23 language... +* `#27433 `__: BLD: pin setuptools to avoid breaking numpy.distutils +* `#27437 `__: BUG: Allow unsigned shift argument for np.roll +* `#27439 `__: BUG: Disable SVE VQSort +* `#27471 `__: BUG: rfftn axis bug +* `#27479 `__: BUG: Fix extra decref of PyArray_UInt8DType. +* `#27480 `__: CI: use PyPI not scientific-python-nightly-wheels for CI doc... +* `#27481 `__: MAINT: Check for SVE support on demand +* `#27484 `__: BUG: initialize the promotion state to be weak +* `#27501 `__: MAINT: Bump pypa/cibuildwheel from 2.20.0 to 2.21.2 +* `#27506 `__: BUG: avoid segfault on bad arguments in ndarray.__array_function__ diff --git a/doc/source/release/2.1.3-notes.rst b/doc/source/release/2.1.3-notes.rst new file mode 100644 index 000000000000..cd797e0062a0 --- /dev/null +++ b/doc/source/release/2.1.3-notes.rst @@ -0,0 +1,81 @@ +.. currentmodule:: numpy + +========================== +NumPy 2.1.3 Release Notes +========================== + +NumPy 2.1.3 is a maintenance release that fixes bugs and regressions +discovered after the 2.1.2 release. + +The Python versions supported by this release are 3.10-3.13. + + +Improvements +============ + +* Fixed a number of issues around promotion for string ufuncs with StringDType + arguments. Mixing StringDType and the fixed-width DTypes using the string + ufuncs should now generate much more uniform results. + + (`gh-27636 `__) + + +Changes +======= + +* `numpy.fix` now won't perform casting to a floating data-type for integer + and boolean data-type input arrays. + + (`gh-26766 `__) + + +Contributors +============ + +A total of 15 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Abhishek Kumar + +* Austin + +* Benjamin A. Beasley + +* Charles Harris +* Christian Lorentzen +* Marcel Telka + +* Matti Picus +* Michael Davidsaver + +* Nathan Goldbaum +* Peter Hawkins +* Raghuveer Devulapalli +* Ralf Gommers +* Sebastian Berg +* dependabot[bot] +* kp2pml30 + + + +Pull requests merged +==================== + +A total of 21 pull requests were merged for this release. + +* `#27512 `__: MAINT: prepare 2.1.x for further development +* `#27537 `__: MAINT: Bump actions/cache from 4.0.2 to 4.1.1 +* `#27538 `__: MAINT: Bump pypa/cibuildwheel from 2.21.2 to 2.21.3 +* `#27539 `__: MAINT: MSVC does not support #warning directive +* `#27543 `__: BUG: Fix user dtype can-cast with python scalar during promotion +* `#27561 `__: DEV: bump ``python`` to 3.12 in environment.yml +* `#27562 `__: BLD: update vendored Meson to 1.5.2 +* `#27563 `__: BUG: weighted quantile for some zero weights (#27549) +* `#27565 `__: MAINT: Use miniforge for macos conda test. +* `#27566 `__: BUILD: satisfy gcc-13 pendantic errors +* `#27569 `__: BUG: handle possible error for PyTraceMallocTrack +* `#27570 `__: BLD: start building Windows free-threaded wheels [wheel build] +* `#27571 `__: BUILD: vendor tempita from Cython +* `#27574 `__: BUG: Fix warning "differs in levels of indirection" in npy_atomic.h... +* `#27592 `__: MAINT: Update Highway to latest +* `#27593 `__: BUG: Adjust numpy.i for SWIG 4.3 compatibility +* `#27616 `__: BUG: Fix Linux QEMU CI workflow +* `#27668 `__: BLD: Do not set __STDC_VERSION__ to zero during build +* `#27669 `__: ENH: fix wasm32 runtime type error in numpy._core +* `#27672 `__: BUG: Fix a reference count leak in npy_find_descr_for_scalar. +* `#27673 `__: BUG: fixes for StringDType/unicode promoters + diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst index 93b009628571..61468132879f 100644 --- a/doc/source/user/absolute_beginners.rst +++ b/doc/source/user/absolute_beginners.rst @@ -97,7 +97,7 @@ array". Most NumPy arrays have some restrictions. For instance: - All elements of the array must be of the same type of data. -- Once created, the total size of the the array can't change. +- Once created, the total size of the array can't change. - The shape must be "rectangular", not "jagged"; e.g., each row of a two-dimensional array must have the same number of columns. @@ -235,7 +235,7 @@ only one "data type". The data type is recorded in the ``dtype`` attribute. >>> a.dtype dtype('int64') # "int" for integer, "64" for 64-bit -ref:`Read more about array attributes here ` and learn about +:ref:`Read more about array attributes here ` and learn about :ref:`array objects here `. How to create a basic array @@ -301,7 +301,7 @@ Adding, removing, and sorting elements ----- -Sorting an element is simple with ``np.sort()``. You can specify the axis, kind, +Sorting an array is simple with ``np.sort()``. You can specify the axis, kind, and order when you call the function. If you start with this array:: @@ -425,7 +425,7 @@ this array to an array with three rows and two columns:: With ``np.reshape``, you can specify a few optional parameters:: - >>> np.reshape(a, newshape=(1, 6), order='C') + >>> np.reshape(a, shape=(1, 6), order='C') array([[0, 1, 2, 3, 4, 5]]) ``a`` is the array to be reshaped. @@ -1525,7 +1525,7 @@ If you want to store a single ndarray object, store it as a .npy file using save it as a .npz file using ``np.savez``. You can also save several arrays into a single file in compressed npz format with `savez_compressed`. -It's easy to save and load and array with ``np.save()``. Just make sure to +It's easy to save and load an array with ``np.save()``. Just make sure to specify the array you want to save and a file name. For example, if you create this array:: diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst index a753767655c7..2b03817bba91 100644 --- a/doc/source/user/basics.broadcasting.rst +++ b/doc/source/user/basics.broadcasting.rst @@ -23,6 +23,7 @@ NumPy operations are usually done on pairs of arrays on an element-by-element basis. In the simplest case, the two arrays must have exactly the same shape, as in the following example: + >>> import numpy as np >>> a = np.array([1.0, 2.0, 3.0]) >>> b = np.array([2.0, 2.0, 2.0]) >>> a * b @@ -32,6 +33,7 @@ NumPy's broadcasting rule relaxes this constraint when the arrays' shapes meet certain constraints. The simplest broadcasting example occurs when an array and a scalar value are combined in an operation: +>>> import numpy as np >>> a = np.array([1.0, 2.0, 3.0]) >>> b = 2.0 >>> a * b @@ -162,6 +164,7 @@ Here are examples of shapes that do not broadcast:: An example of broadcasting when a 1-d array is added to a 2-d array:: + >>> import numpy as np >>> a = np.array([[ 0.0, 0.0, 0.0], ... [10.0, 10.0, 10.0], ... [20.0, 20.0, 20.0], @@ -209,6 +212,7 @@ Broadcasting provides a convenient way of taking the outer product (or any other outer operation) of two arrays. The following example shows an outer addition operation of two 1-d arrays:: + >>> import numpy as np >>> a = np.array([0.0, 10.0, 20.0, 30.0]) >>> b = np.array([1.0, 2.0, 3.0]) >>> a[:, np.newaxis] + b diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst index 482cbc189ec8..3148fbf2d27f 100644 --- a/doc/source/user/basics.copies.rst +++ b/doc/source/user/basics.copies.rst @@ -50,6 +50,7 @@ Views are created when elements can be addressed with offsets and strides in the original array. Hence, basic indexing always creates views. For example:: + >>> import numpy as np >>> x = np.arange(10) >>> x array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -60,13 +61,14 @@ For example:: >>> x array([ 0, 10, 11, 3, 4, 5, 6, 7, 8, 9]) >>> y - array([10, 11]) + array([10, 11]) Here, ``y`` gets changed when ``x`` is changed because it is a view. :ref:`advanced-indexing`, on the other hand, always creates copies. For example:: + >>> import numpy as np >>> x = np.arange(9).reshape(3, 3) >>> x array([[0, 1, 2], @@ -79,9 +81,9 @@ For example:: >>> y.base is None True -Here, ``y`` is a copy, as signified by the :attr:`base <.ndarray.base>` -attribute. We can also confirm this by assigning new values to ``x[[1, 2]]`` -which in turn will not affect ``y`` at all:: + Here, ``y`` is a copy, as signified by the :attr:`base <.ndarray.base>` + attribute. We can also confirm this by assigning new values to ``x[[1, 2]]`` + which in turn will not affect ``y`` at all:: >>> x[[1, 2]] = [[10, 11, 12], [13, 14, 15]] >>> x @@ -93,7 +95,7 @@ which in turn will not affect ``y`` at all:: [6, 7, 8]]) It must be noted here that during the assignment of ``x[[1, 2]]`` no view -or copy is created as the assignment happens in-place. +or copy is created as the assignment happens in-place. Other operations @@ -107,6 +109,7 @@ the reshaping cannot be done by modifying strides and requires a copy. In these cases, we can raise an error by assigning the new shape to the shape attribute of the array. For example:: + >>> import numpy as np >>> x = np.ones((2, 3)) >>> y = x.T # makes the array non-contiguous >>> y @@ -132,6 +135,7 @@ The :attr:`base <.ndarray.base>` attribute of the ndarray makes it easy to tell if an array is a view or a copy. The base attribute of a view returns the original array while it returns ``None`` for a copy. + >>> import numpy as np >>> x = np.arange(9) >>> x array([0, 1, 2, 3, 4, 5, 6, 7, 8]) diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst index c9773dc0fcd0..6c09adfdff54 100644 --- a/doc/source/user/basics.creation.rst +++ b/doc/source/user/basics.creation.rst @@ -35,6 +35,7 @@ respectively. Lists and tuples can define ndarray creation: :: + >>> import numpy as np >>> a1D = np.array([1, 2, 3, 4]) >>> a2D = np.array([[1, 2], [3, 4]]) >>> a3D = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) @@ -47,6 +48,7 @@ are handled in C/C++ functions. When values do not fit and you are using a ``dtype``, NumPy may raise an error:: + >>> import numpy as np >>> np.array([127, 128, 129], dtype=np.int8) Traceback (most recent call last): ... @@ -56,8 +58,9 @@ An 8-bit signed integer represents integers from -128 to 127. Assigning the ``int8`` array to integers outside of this range results in overflow. This feature can often be misunderstood. If you perform calculations with mismatching ``dtypes``, you can get unwanted -results, for example:: +results, for example:: + >>> import numpy as np >>> a = np.array([2, 3, 4], dtype=np.uint32) >>> b = np.array([5, 6, 7], dtype=np.uint32) >>> c_unsigned32 = a - b @@ -72,7 +75,7 @@ Notice when you perform operations with two arrays of the same perform operations with different ``dtype``, NumPy will assign a new type that satisfies all of the array elements involved in the computation, here ``uint32`` and ``int32`` can both be represented in -as ``int64``. +as ``int64``. The default NumPy behavior is to create arrays in either 32 or 64-bit signed integers (platform dependent and matches C ``long`` size) or double precision @@ -107,6 +110,7 @@ The 1D array creation functions e.g. :func:`numpy.linspace` and Check the documentation for complete information and examples. A few examples are shown:: + >>> import numpy as np >>> np.arange(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> np.arange(2, 10, dtype=float) @@ -124,6 +128,7 @@ the ``stop`` value is sometimes included. spaced equally between the specified beginning and end values. For example: :: + >>> import numpy as np >>> np.linspace(1., 4., 6) array([1. , 1.6, 2.2, 2.8, 3.4, 4. ]) @@ -140,6 +145,7 @@ define properties of special matrices represented as 2D arrays. ``np.eye(n, m)`` defines a 2D identity matrix. The elements where i=j (row index and column index are equal) are 1 and the rest are 0, as such:: + >>> import numpy as np >>> np.eye(3) array([[1., 0., 0.], [0., 1., 0.], @@ -154,6 +160,7 @@ the diagonal *or* if given a 2D array returns a 1D array that is only the diagonal elements. The two array creation functions can be helpful while doing linear algebra, as such:: + >>> import numpy as np >>> np.diag([1, 2, 3]) array([[1, 0, 0], [0, 2, 0], @@ -172,7 +179,8 @@ of the Vandermonde matrix is a decreasing power of the input 1D array or list or tuple, ``x`` where the highest polynomial order is ``n-1``. This array creation routine is helpful in generating linear least squares models, as such:: - + + >>> import numpy as np >>> np.vander(np.linspace(0, 2, 5), 2) array([[0. , 1. ], [0.5, 1. ], @@ -202,6 +210,7 @@ and length along that dimension in a tuple or list. :func:`numpy.zeros` will create an array filled with 0 values with the specified shape. The default dtype is ``float64``:: + >>> import numpy as np >>> np.zeros((2, 3)) array([[0., 0., 0.], [0., 0., 0.]]) @@ -217,6 +226,7 @@ specified shape. The default dtype is ``float64``:: :func:`numpy.ones` will create an array filled with 1 values. It is identical to ``zeros`` in all other respects as such:: + >>> import numpy as np >>> np.ones((2, 3)) array([[1., 1., 1.], [1., 1., 1.]]) @@ -236,6 +246,7 @@ library. Below, two arrays are created with shapes (2,3) and (2,3,2), respectively. The seed is set to 42 so you can reproduce these pseudorandom numbers:: + >>> import numpy as np >>> from numpy.random import default_rng >>> default_rng(42).random((2,3)) array([[0.77395605, 0.43887844, 0.85859792], @@ -250,8 +261,9 @@ pseudorandom numbers:: :func:`numpy.indices` will create a set of arrays (stacked as a one-higher dimensioned array), one per dimension with each representing variation in that -dimension: :: +dimension:: + >>> import numpy as np >>> np.indices((3,3)) array([[[0, 0, 0], [1, 1, 1], @@ -272,6 +284,7 @@ elements to a new variable, you have to explicitly :func:`numpy.copy` the array, otherwise the variable is a view into the original array. Consider the following example:: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4, 5, 6]) >>> b = a[:2] >>> b += 1 @@ -283,6 +296,7 @@ In this example, you did not create a new array. You created a variable, would get the same result by adding 1 to ``a[:2]``. If you want to create a *new* array, use the :func:`numpy.copy` array creation routine as such:: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4]) >>> b = a[:2].copy() >>> b += 1 @@ -296,6 +310,7 @@ There are a number of routines to join existing arrays e.g. :func:`numpy.vstack` :func:`numpy.hstack`, and :func:`numpy.block`. Here is an example of joining four 2-by-2 arrays into a 4-by-4 array using ``block``:: + >>> import numpy as np >>> A = np.ones((2, 2)) >>> B = np.eye(2, 2) >>> C = np.zeros((2, 2)) @@ -354,6 +369,7 @@ and :func:`numpy.genfromtxt`. These functions have more involved use cases in Importing ``simple.csv`` is accomplished using :func:`numpy.loadtxt`:: + >>> import numpy as np >>> np.loadtxt('simple.csv', delimiter = ',', skiprows = 1) # doctest: +SKIP array([[0., 0.], [1., 1.], diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 29b9eae06481..1505c9285ea8 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -23,6 +23,10 @@ example that has rather narrow utility but illustrates the concepts involved. ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) Our custom array can be instantiated like: @@ -85,6 +89,10 @@ For this example we will only handle the method ``__call__`` ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -136,6 +144,10 @@ conveniently by inheriting from the mixin ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -174,6 +186,10 @@ functions to our custom variants. ... def __repr__(self): ... return f"{self.__class__.__name__}(N={self._N}, value={self._i})" ... def __array__(self, dtype=None, copy=None): +... if copy is False: +... raise ValueError( +... "`copy=False` isn't supported. A copy is always created." +... ) ... return self._i * np.eye(self._N, dtype=dtype) ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): ... if method == '__call__': @@ -284,7 +300,7 @@ implement the ``__array_ufunc__`` and ``__array_function__`` protocols in the To check if a Numpy function can be overridden via ``__array_ufunc__``, you can use :func:`~numpy.testing.overrides.allows_array_ufunc_override`: ->>> from np.testing.overrides import allows_array_ufunc_override +>>> from numpy.testing.overrides import allows_array_ufunc_override >>> allows_array_ufunc_override(np.add) True diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst index fffb0ecb8519..7481468fe6db 100644 --- a/doc/source/user/basics.indexing.rst +++ b/doc/source/user/basics.indexing.rst @@ -665,11 +665,11 @@ behave just like slicing). .. rubric:: Example -Suppose ``x.shape`` is (10, 20, 30) and ``ind`` is a (2, 3, 4)-shaped +Suppose ``x.shape`` is (10, 20, 30) and ``ind`` is a (2, 5, 2)-shaped indexing :class:`intp` array, then ``result = x[..., ind, :]`` has -shape (10, 2, 3, 4, 30) because the (20,)-shaped subspace has been -replaced with a (2, 3, 4)-shaped broadcasted indexing subspace. If -we let *i, j, k* loop over the (2, 3, 4)-shaped subspace then +shape (10, 2, 5, 2, 30) because the (20,)-shaped subspace has been +replaced with a (2, 5, 2)-shaped broadcasted indexing subspace. If +we let *i, j, k* loop over the (2, 5, 2)-shaped subspace then ``result[..., i, j, k, :] = x[..., ind[i, j, k], :]``. This example produces the same result as :meth:`x.take(ind, axis=-2) `. diff --git a/doc/source/user/basics.interoperability.rst b/doc/source/user/basics.interoperability.rst index e0faf0c052c9..ca0c39d7081f 100644 --- a/doc/source/user/basics.interoperability.rst +++ b/doc/source/user/basics.interoperability.rst @@ -113,6 +113,8 @@ We can check that ``arr`` and ``new_arr`` share the same data buffer: array([1000, 2, 3, 4]) +.. _dunder_array.interface: + The ``__array__()`` method ~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst index 83be116b7e7f..e0baba938f16 100644 --- a/doc/source/user/basics.subclassing.rst +++ b/doc/source/user/basics.subclassing.rst @@ -158,21 +158,21 @@ __new__ documentation For example, consider the following Python code: >>> class C: ->>> def __new__(cls, *args): ->>> print('Cls in __new__:', cls) ->>> print('Args in __new__:', args) ->>> # The `object` type __new__ method takes a single argument. ->>> return object.__new__(cls) ->>> def __init__(self, *args): ->>> print('type(self) in __init__:', type(self)) ->>> print('Args in __init__:', args) +... def __new__(cls, *args): +... print('Cls in __new__:', cls) +... print('Args in __new__:', args) +... # The `object` type __new__ method takes a single argument. +... return object.__new__(cls) +... def __init__(self, *args): +... print('type(self) in __init__:', type(self)) +... print('Args in __init__:', args) meaning that we get: >>> c = C('hello') -Cls in __new__: +Cls in __new__: Args in __new__: ('hello',) -type(self) in __init__: +type(self) in __init__: Args in __init__: ('hello',) When we call ``C('hello')``, the ``__new__`` method gets its own class diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst index 3dd947002c20..0b665574cbdc 100644 --- a/doc/source/user/basics.types.rst +++ b/doc/source/user/basics.types.rst @@ -56,7 +56,7 @@ dtype objects also contain information about the type, such as its bit-width and its byte-order. The data type can also be used indirectly to query properties of the type, such as whether it is an integer:: - >>> d = np.dtype(int64) + >>> d = np.dtype(np.int64) >>> d dtype('int64') @@ -142,8 +142,8 @@ Advanced types, not listed above, are explored in section .. _canonical-python-and-c-types: -Relationship Between NumPy Data Types and C Data Data Types -=========================================================== +Relationship Between NumPy Data Types and C Data Types +====================================================== NumPy provides both bit sized type names and names based on the names of C types. Since the definition of C types are platform dependent, this means the explicitly diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst index 697c0c045e4f..7bf793ae2e47 100644 --- a/doc/source/user/c-info.beyond-basics.rst +++ b/doc/source/user/c-info.beyond-basics.rst @@ -268,6 +268,9 @@ specifies your data-type. This type number should be stored and made available by your module so that other modules can use it to recognize your data-type. +Note that this API is inherently thread-unsafe. See `thread_safety` for more +details about thread safety in NumPy. + Registering a casting function ------------------------------ diff --git a/doc/source/user/c-info.ufunc-tutorial.rst b/doc/source/user/c-info.ufunc-tutorial.rst index 38baa28c7307..6b1aca65ed00 100644 --- a/doc/source/user/c-info.ufunc-tutorial.rst +++ b/doc/source/user/c-info.ufunc-tutorial.rst @@ -182,21 +182,16 @@ site-packages directory. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + import numpy as np - from distutils.core import setup, Extension + module1 = Extension('spam', sources=['spammodule.c']) - module1 = Extension('spam', sources=['spammodule.c'], - include_dirs=['/usr/local/lib']) - - setup(name = 'spam', - version='1.0', - description='This is my spam package', - ext_modules = [module1]) + setup(name='spam', version='1.0', ext_modules=[module1]) Once the spam module is imported into python, you can call logit @@ -355,8 +350,8 @@ using ``python setup.py build_ext --inplace``. ''' setup.py file for single_type_logit.c Note that since this is a numpy extension - we use numpy.distutils instead of - distutils from the python standard library. + we add an include_dirs=[get_include()] so that the + extension is built with numpy's C/C++ header files. Calling $python setup.py build_ext --inplace @@ -373,33 +368,26 @@ using ``python setup.py build_ext --inplace``. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org and the documentation - on numpy.distutils for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + from numpy import get_include - def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration - - config = Configuration('npufunc_directory', - parent_package, - top_path) - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) - return config + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) -0.0 +np.float64(0.0) >>> a = np.linspace(0,1,5) >>> npufunc.logit(a) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) @@ -607,8 +595,10 @@ or installed to site-packages via ``python setup.py install``. ''' setup.py file for multi_type_logit.c Note that since this is a numpy extension - we use numpy.distutils instead of - distutils from the python standard library. + we add an include_dirs=[get_include()] so that the + extension is built with numpy's C/C++ header files. + Furthermore, we also have to include the npymath + lib for half-float d-type. Calling $python setup.py build_ext --inplace @@ -625,38 +615,31 @@ or installed to site-packages via ``python setup.py install``. $python setup.py install will install the module in your site-packages file. - See the distutils section of - 'Extending and Embedding the Python Interpreter' - at docs.python.org and the documentation - on numpy.distutils for more information. + See the setuptools section 'Building Extension Modules' + at setuptools.pypa.io for more information. ''' + from setuptools import setup, Extension + from numpy import get_include + from os import path - def configuration(parent_package='', top_path=None): - from numpy.distutils.misc_util import Configuration, get_info - - #Necessary for the half-float d-type. - info = get_info('npymath') + path_to_npymath = path.join(get_include(), '..', 'lib') + npufunc = Extension('npufunc', + sources=['multi_type_logit.c'], + include_dirs=[get_include()], + # Necessary for the half-float d-type. + library_dirs=[path_to_npymath], + libraries=["npymath"]) - config = Configuration('npufunc_directory', - parent_package, - top_path) - config.add_extension('npufunc', - ['multi_type_logit.c'], - extra_info=info) + setup(name='npufunc', version='1.0', ext_modules=[npufunc]) - return config - - if __name__ == "__main__": - from numpy.distutils.core import setup - setup(configuration=configuration) After the above has been installed, it can be imported and used as follows. >>> import numpy as np >>> import npufunc >>> npufunc.logit(0.5) -0.0 +np.float64(0.0) >>> a = np.linspace(0,1,5) >>> npufunc.logit(a) array([ -inf, -1.09861229, 0. , 1.09861229, inf]) @@ -678,13 +661,17 @@ the line .. code-block:: python - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) is replaced with .. code-block:: python - config.add_extension('npufunc', ['multi_arg_logit.c']) + npufunc = Extension('npufunc', + sources=['multi_arg_logit.c'], + include_dirs=[get_include()]) The C file is given below. The ufunc generated takes two arguments ``A`` and ``B``. It returns a tuple whose first element is ``A * B`` and whose second @@ -809,13 +796,17 @@ the line .. code-block:: python - config.add_extension('npufunc', ['single_type_logit.c']) + npufunc = Extension('npufunc', + sources=['single_type_logit.c'], + include_dirs=[get_include()]) is replaced with .. code-block:: python - config.add_extension('npufunc', ['add_triplet.c']) + npufunc = Extension('npufunc', + sources=['add_triplet.c'], + include_dirs=[get_include()]) The C file is given below. @@ -892,7 +883,7 @@ The C file is given below. NULL }; - PyMODINIT_FUNC PyInit_struct_ufunc_test(void) + PyMODINIT_FUNC PyInit_npufunc(void) { PyObject *m, *add_triplet, *d; PyObject *dtype_dict; diff --git a/doc/source/user/conftest.py b/doc/source/user/conftest.py new file mode 100644 index 000000000000..54f9d6d3158c --- /dev/null +++ b/doc/source/user/conftest.py @@ -0,0 +1,4 @@ +# doctesting configuration from the main conftest +from numpy.conftest import dt_config # noqa: F401 + +#breakpoint() diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index 9b3a71fa40bb..ca4abcd13746 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -327,7 +327,7 @@ created with NumPy 1.26. Convert from a pandas DataFrame to a NumPy array ================================================ -See :meth:`pandas.DataFrame.to_numpy`. +See :meth:`pandas.Series.to_numpy`. Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile` ================================================================ diff --git a/doc/source/user/how-to-partition.rst b/doc/source/user/how-to-partition.rst index e90b39e9440c..74c37c1caa5f 100644 --- a/doc/source/user/how-to-partition.rst +++ b/doc/source/user/how-to-partition.rst @@ -244,10 +244,10 @@ fully-dimensional result array. :: >>> np.ogrid[0:4, 0:6] - [array([[0], + (array([[0], [1], [2], - [3]]), array([[0, 1, 2, 3, 4, 5]])] + [3]]), array([[0, 1, 2, 3, 4, 5]])) All three methods described here can be used to evaluate function values on a grid. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 8d4c500fd021..5a002ba8375e 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -26,7 +26,7 @@ details are found in :ref:`reference`. :maxdepth: 1 numpy-for-matlab-users - NumPy tutorials + NumPy tutorials howtos_index .. toctree:: diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 7c7fd0898490..d9b5c460944c 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -392,7 +392,7 @@ Linear algebra equivalents from numpy.random import default_rng rng = default_rng(42) - rng.random(3, 4) + rng.random((3, 4)) or older version: ``random.rand((3, 4))`` diff --git a/doc/source/user/troubleshooting-importerror.rst b/doc/source/user/troubleshooting-importerror.rst index 232f9f7e2bf2..d1b83d388aac 100644 --- a/doc/source/user/troubleshooting-importerror.rst +++ b/doc/source/user/troubleshooting-importerror.rst @@ -148,40 +148,75 @@ This may mainly help you if you are not running the python and/or NumPy version you are expecting to run. -C-API incompatibility ---------------------------- +Downstream ImportError, AttributeError or C-API/ABI incompatibility +=================================================================== + +If you see a message such as:: + + A module that was compiled using NumPy 1.x cannot be run in + NumPy 2.0.0 as it may crash. To support both 1.x and 2.x + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. -If you see an error like: +either as an ``ImportError`` or with:: + AttributeError: _ARRAY_API not found + +or other errors such as:: RuntimeError: module compiled against API version v1 but this version of numpy is v2 +or when a package implemented with Cython:: + + ValueError: numpy.dtype size changed, may indicate binary incompatibility. Expected 96 from C header, got 88 from PyObject + +This means that a package depending on NumPy was build in a way that is not +compatible with the NumPy version found. +If this error is due to a recent upgrade to NumPy 2, the easiest solution may +be to simply downgrade NumPy to ``'numpy<2'``. + +To understand the cause, search the traceback (from the back) to find the first +line that isn't inside NumPy to see which package has the incompatibility. +Note your NumPy version and the version of the incompatible package to +help you find the best solution. + +There can be various reason for the incompatibility: + +* You have recently upgraded NumPy, most likely to NumPy 2, and the other + module now also needs to be upgraded. (NumPy 2 was released in June 2024.) + +* You have version constraints and ``pip`` may + have installed a combination of incompatible packages. + +* You have compiled locally or have copied a compiled extension from + elsewhere (which is, in general, a bad idea). -You may have: +The best solution will usually be to upgrade the failing package: -* A bad extension "wheel" (binary install) that should use - `oldest-support-numpy `_ ( - with manual constraints if necessary) to build their binary packages. +* If you installed it for example through ``pip``, try upgrading it with + ``pip install package_name --upgrade``. -* An environment issue messing with package versions. +* If it is your own package or it is build locally, you need recompiled + for the new NumPy version (for details see :ref:`depending_on_numpy`). + It may be that a reinstall of the package is sufficient to fix it. -* Incompatible package versions somehow enforced manually. +When these steps fail, you should inform the package maintainers since they +probably need to make a new, compatible, release. -* An extension module compiled locally against a very recent version - followed by a NumPy downgrade. +However, upgrading may not always be possible because a compatible version does +not yet exist or cannot be installed for other reasons. In that case: -* A compiled extension copied to a different computer with an - older NumPy version. +* Install a compatible NumPy version: -The best thing to do if you see this error is to contact -the maintainers of the package that is causing problem -so that they can solve the problem properly. + * Try downgrading NumPy with ``pip install 'numpy<2'`` + (NumPy 2 was released in June 2024). + * If your NumPy version is old, you can try upgrading it for + example with ``pip install numpy --upgrade``. -However, while you wait for a solution, a work around -that usually works is to upgrade the NumPy version:: +* Add additional version pins to the failing package to help ``pip`` + resolve compatible versions of NumPy and the package. - pip install numpy --upgrade Segfaults or crashes ==================== diff --git a/environment.yml b/environment.yml index 0690d6fdac6c..e0d2ccdc1117 100644 --- a/environment.yml +++ b/environment.yml @@ -7,17 +7,17 @@ name: numpy-dev channels: - conda-forge dependencies: - - python=3.11 #need to pin to avoid issues with builds + - python=3.12 # need to pin to avoid issues with builds - cython>=3.0 - compilers - openblas - nomkl - - setuptools + - setuptools==65.5.1 - ninja - pkg-config - meson-python - pip - - spin + - spin=0.8 # Unpin when spin 0.9.1 is released - ccache # For testing - pytest @@ -26,17 +26,19 @@ dependencies: - hypothesis # For type annotations - typing_extensions>=4.2.0 # needed for python < 3.10 - - mypy=1.7.1 + - mypy=1.10.0 # For building docs - sphinx>=4.5.0 + - sphinx-copybutton - sphinx-design - numpydoc=1.4.0 - ipython - scipy - pandas - matplotlib - - pydata-sphinx-theme=0.13.3 + - pydata-sphinx-theme>=0.15.2 - doxygen + - towncrier # NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz - breathe>4.33.0 # For linting diff --git a/meson_cpu/main_config.h.in b/meson_cpu/main_config.h.in index c1fc2de349e0..0952adf67353 100644 --- a/meson_cpu/main_config.h.in +++ b/meson_cpu/main_config.h.in @@ -385,4 +385,8 @@ #ifdef @P@HAVE_NEON #include #endif + +#ifdef @P@HAVE_RVV + #include +#endif #endif // @P@_CPU_DISPATCHER_CONF_H_ diff --git a/meson_cpu/meson.build b/meson_cpu/meson.build index f96d9c315ea6..3afc54cae415 100644 --- a/meson_cpu/meson.build +++ b/meson_cpu/meson.build @@ -75,12 +75,14 @@ subdir('x86') subdir('ppc64') subdir('s390x') subdir('arm') +subdir('riscv64') CPU_FEATURES = {} CPU_FEATURES += ARM_FEATURES CPU_FEATURES += X86_FEATURES CPU_FEATURES += PPC64_FEATURES CPU_FEATURES += S390X_FEATURES +CPU_FEATURES += RV64_FEATURES # Parse the requested baseline (CPU_CONF_BASELINE) and dispatch features # (CPU_CONF_DISPATCH). @@ -93,6 +95,7 @@ min_features = { 's390x': [], 'arm': [], 'aarch64': [ASIMD], + 'riscv64': [], 'wasm32': [], }.get(cpu_family, []) if host_machine.endian() == 'little' and cpu_family == 'ppc64' @@ -107,6 +110,7 @@ max_features_dict = { 's390x': S390X_FEATURES, 'arm': ARM_FEATURES, 'aarch64': ARM_FEATURES, + 'riscv64': RV64_FEATURES, 'wasm32': {}, }.get(cpu_family, {}) max_features = [] diff --git a/meson_cpu/riscv64/meson.build b/meson_cpu/riscv64/meson.build new file mode 100644 index 000000000000..3f930f39e27e --- /dev/null +++ b/meson_cpu/riscv64/meson.build @@ -0,0 +1,8 @@ +source_root = meson.project_source_root() +mod_features = import('features') + +RVV = mod_features.new( + 'RVV', 1, args: ['-march=rv64gcv'], + test_code: files(source_root + '/numpy/distutils/checks/cpu_rvv.c')[0], +) +RV64_FEATURES = {'RVV': RVV} diff --git a/meson_cpu/x86/meson.build b/meson_cpu/x86/meson.build index 598f80ff0c89..8c7a0fb59a57 100644 --- a/meson_cpu/x86/meson.build +++ b/meson_cpu/x86/meson.build @@ -59,13 +59,14 @@ FMA3 = mod_features.new( 'FMA3', 24, implies: F16C, args: '-mfma', test_code: files(source_root + '/numpy/distutils/checks/cpu_fma3.c')[0] ) +# match this to HWY_AVX2 AVX2 = mod_features.new( - 'AVX2', 25, implies: F16C, args: '-mavx2', + 'AVX2', 25, implies: FMA3, args: ['-mavx2', '-maes', '-mpclmul', '-mbmi', '-mbmi2'], test_code: files(source_root + '/numpy/distutils/checks/cpu_avx2.c')[0] ) # 25-40 left as margin for any extra features AVX512F = mod_features.new( - 'AVX512F', 40, implies: [FMA3, AVX2], + 'AVX512F', 40, implies: [AVX2], # Disables mmx because of stack corruption that may happen during mask # conversions. # TODO (seiko2plus): provide more clarification diff --git a/numpy/__init__.cython-30.pxd b/numpy/__init__.cython-30.pxd index 1afbe3d8ebd0..2151a18b1e80 100644 --- a/numpy/__init__.cython-30.pxd +++ b/numpy/__init__.cython-30.pxd @@ -68,36 +68,28 @@ cdef extern from "numpy/arrayobject.h": ctypedef long double npy_float128 ctypedef struct npy_cfloat: - float real - float imag + pass ctypedef struct npy_cdouble: - double real - double imag + pass ctypedef struct npy_clongdouble: - long double real - long double imag + pass ctypedef struct npy_complex64: - float real - float imag + pass ctypedef struct npy_complex128: - double real - double imag + pass ctypedef struct npy_complex160: - long double real - long double imag + pass ctypedef struct npy_complex192: - long double real - long double imag + pass ctypedef struct npy_complex256: - long double real - long double imag + pass ctypedef struct PyArray_Dims: npy_intp *ptr @@ -291,11 +283,11 @@ cdef extern from "numpy/arrayobject.h": cdef int type_num @property - cdef inline npy_intp itemsize(self) nogil: + cdef inline npy_intp itemsize(self) noexcept nogil: return PyDataType_ELSIZE(self) @property - cdef inline npy_intp alignment(self) nogil: + cdef inline npy_intp alignment(self) noexcept nogil: return PyDataType_ALIGNMENT(self) # Use fields/names with care as they may be NULL. You must check @@ -312,11 +304,11 @@ cdef extern from "numpy/arrayobject.h": # valid (the pointer can be NULL). Most users should access # this field via the inline helper method PyDataType_SHAPE. @property - cdef inline PyArray_ArrayDescr* subarray(self) nogil: + cdef inline PyArray_ArrayDescr* subarray(self) noexcept nogil: return PyDataType_SUBARRAY(self) @property - cdef inline npy_uint64 flags(self) nogil: + cdef inline npy_uint64 flags(self) noexcept nogil: """The data types flags.""" return PyDataType_FLAGS(self) @@ -328,32 +320,32 @@ cdef extern from "numpy/arrayobject.h": ctypedef class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]: @property - cdef inline int numiter(self) nogil: + cdef inline int numiter(self) noexcept nogil: """The number of arrays that need to be broadcast to the same shape.""" return PyArray_MultiIter_NUMITER(self) @property - cdef inline npy_intp size(self) nogil: + cdef inline npy_intp size(self) noexcept nogil: """The total broadcasted size.""" return PyArray_MultiIter_SIZE(self) @property - cdef inline npy_intp index(self) nogil: + cdef inline npy_intp index(self) noexcept nogil: """The current (1-d) index into the broadcasted result.""" return PyArray_MultiIter_INDEX(self) @property - cdef inline int nd(self) nogil: + cdef inline int nd(self) noexcept nogil: """The number of dimensions in the broadcasted result.""" return PyArray_MultiIter_NDIM(self) @property - cdef inline npy_intp* dimensions(self) nogil: + cdef inline npy_intp* dimensions(self) noexcept nogil: """The shape of the broadcasted result.""" return PyArray_MultiIter_DIMS(self) @property - cdef inline void** iters(self) nogil: + cdef inline void** iters(self) noexcept nogil: """An array of iterator objects that holds the iterators for the arrays to be broadcast together. On return, the iterators are adjusted for broadcasting.""" return PyArray_MultiIter_ITERS(self) @@ -371,7 +363,7 @@ cdef extern from "numpy/arrayobject.h": # Instead, we use properties that map to the corresponding C-API functions. @property - cdef inline PyObject* base(self) nogil: + cdef inline PyObject* base(self) noexcept nogil: """Returns a borrowed reference to the object owning the data/memory. """ return PyArray_BASE(self) @@ -383,13 +375,13 @@ cdef extern from "numpy/arrayobject.h": return PyArray_DESCR(self) @property - cdef inline int ndim(self) nogil: + cdef inline int ndim(self) noexcept nogil: """Returns the number of dimensions in the array. """ return PyArray_NDIM(self) @property - cdef inline npy_intp *shape(self) nogil: + cdef inline npy_intp *shape(self) noexcept nogil: """Returns a pointer to the dimensions/shape of the array. The number of elements matches the number of dimensions of the array (ndim). Can return NULL for 0-dimensional arrays. @@ -397,20 +389,20 @@ cdef extern from "numpy/arrayobject.h": return PyArray_DIMS(self) @property - cdef inline npy_intp *strides(self) nogil: + cdef inline npy_intp *strides(self) noexcept nogil: """Returns a pointer to the strides of the array. The number of elements matches the number of dimensions of the array (ndim). """ return PyArray_STRIDES(self) @property - cdef inline npy_intp size(self) nogil: + cdef inline npy_intp size(self) noexcept nogil: """Returns the total size (in number of elements) of the array. """ return PyArray_SIZE(self) @property - cdef inline char* data(self) nogil: + cdef inline char* data(self) noexcept nogil: """The pointer to the data buffer as a char*. This is provided for legacy reasons to avoid direct struct field access. For new code that needs this access, you probably want to cast the result @@ -562,7 +554,6 @@ cdef extern from "numpy/arrayobject.h": object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran) object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran) void PyArray_FILLWBYTE(ndarray, int val) - npy_intp PyArray_REFCOUNT(object) object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth) unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2) bint PyArray_EquivByteorders(int b1, int b2) nogil @@ -617,7 +608,6 @@ cdef extern from "numpy/arrayobject.h": # more than is probably needed until it can be checked further. int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - void PyArray_SetStringFunction (object, int) dtype PyArray_DescrFromType (int) object PyArray_TypeObjectFromType (int) char * PyArray_Zero (ndarray) @@ -808,11 +798,10 @@ ctypedef npy_double float_t ctypedef npy_double double_t ctypedef npy_longdouble longdouble_t -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t cdef inline object PyArray_MultiIterNew1(a): return PyArray_MultiIterNew(1, a) @@ -851,6 +840,7 @@ cdef extern from "numpy/ndarraytypes.h": int64_t year int32_t month, day, hour, min, sec, us, ps, as + cdef extern from "numpy/arrayscalars.h": # abstract types @@ -1016,7 +1006,7 @@ cdef extern from "numpy/ufuncobject.h": int _import_umath() except -1 -cdef inline void set_array_base(ndarray arr, object base): +cdef inline void set_array_base(ndarray arr, object base) except *: Py_INCREF(base) # important to do this before stealing the reference below! PyArray_SetBaseObject(arr, base) @@ -1047,7 +1037,7 @@ cdef inline int import_ufunc() except -1: raise ImportError("numpy._core.umath failed to import") -cdef inline bint is_timedelta64_object(object obj): +cdef inline bint is_timedelta64_object(object obj) noexcept: """ Cython equivalent of `isinstance(obj, np.timedelta64)` @@ -1062,7 +1052,7 @@ cdef inline bint is_timedelta64_object(object obj): return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) -cdef inline bint is_datetime64_object(object obj): +cdef inline bint is_datetime64_object(object obj) noexcept: """ Cython equivalent of `isinstance(obj, np.datetime64)` @@ -1077,7 +1067,7 @@ cdef inline bint is_datetime64_object(object obj): return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) -cdef inline npy_datetime get_datetime64_value(object obj) nogil: +cdef inline npy_datetime get_datetime64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy datetime64 object @@ -1087,14 +1077,14 @@ cdef inline npy_datetime get_datetime64_value(object obj) nogil: return (obj).obval -cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: +cdef inline npy_timedelta get_timedelta64_value(object obj) noexcept nogil: """ returns the int64 value underlying scalar numpy timedelta64 object """ return (obj).obval -cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: +cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) noexcept nogil: """ returns the unit part of the dtype for a numpy datetime64 object. """ diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd index 096714f6d7cd..8e7583bcb97d 100644 --- a/numpy/__init__.pxd +++ b/numpy/__init__.pxd @@ -16,13 +16,27 @@ from cpython.buffer cimport PyObject_GetBuffer from cpython.type cimport type cimport libc.stdio as stdio + +cdef extern from *: + # Leave a marker that the NumPy declarations came from NumPy itself and not from Cython. + # See https://github.com/cython/cython/issues/3573 + """ + /* Using NumPy API declarations from "numpy/__init__.pxd" */ + """ + + cdef extern from "Python.h": ctypedef int Py_intptr_t bint PyObject_TypeCheck(object obj, PyTypeObject* type) cdef extern from "numpy/arrayobject.h": - ctypedef Py_intptr_t npy_intp - ctypedef size_t npy_uintp + # It would be nice to use size_t and ssize_t, but ssize_t has special + # implicit conversion rules, so just use "long". + # Note: The actual type only matters for Cython promotion, so long + # is closer than int, but could lead to incorrect promotion. + # (Not to worrying, and always the status-quo.) + ctypedef signed long npy_intp + ctypedef unsigned long npy_uintp ctypedef unsigned char npy_bool @@ -63,36 +77,28 @@ cdef extern from "numpy/arrayobject.h": ctypedef long double npy_float128 ctypedef struct npy_cfloat: - float real - float imag + pass ctypedef struct npy_cdouble: - double real - double imag + pass ctypedef struct npy_clongdouble: - long double real - long double imag + pass ctypedef struct npy_complex64: - float real - float imag + pass ctypedef struct npy_complex128: - double real - double imag + pass ctypedef struct npy_complex160: - long double real - long double imag + pass ctypedef struct npy_complex192: - long double real - long double imag + pass ctypedef struct npy_complex256: - long double real - long double imag + pass ctypedef struct PyArray_Dims: npy_intp *ptr @@ -154,7 +160,7 @@ cdef extern from "numpy/arrayobject.h": NPY_COMPLEX512 NPY_INTP - NPY_DEFAULT_INT + NPY_DEFAULT_INT # Not a compile time constant (normally)! ctypedef enum NPY_ORDER: NPY_ANYORDER @@ -350,7 +356,10 @@ cdef extern from "numpy/arrayobject.h": PyObject *PyArray_BASE(ndarray) nogil # returns borrowed reference! PyArray_Descr *PyArray_DESCR(ndarray) nogil # returns borrowed reference to dtype! + PyArray_Descr *PyArray_DTYPE(ndarray) nogil # returns borrowed reference to dtype! NP 1.7+ alias for descr. int PyArray_FLAGS(ndarray) nogil + void PyArray_CLEARFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 + void PyArray_ENABLEFLAGS(ndarray, int flags) nogil # Added in NumPy 1.7 npy_intp PyArray_ITEMSIZE(ndarray) nogil int PyArray_TYPE(ndarray arr) nogil @@ -371,7 +380,6 @@ cdef extern from "numpy/arrayobject.h": bint PyTypeNum_ISOBJECT(int) nogil npy_intp PyDataType_ELSIZE(dtype) nogil - void PyDataType_SET_ELSIZE(dtype, npy_intp) nogil npy_intp PyDataType_ALIGNMENT(dtype) nogil PyObject* PyDataType_METADATA(dtype) nogil PyArray_ArrayDescr* PyDataType_SUBARRAY(dtype) nogil @@ -501,6 +509,12 @@ cdef extern from "numpy/arrayobject.h": void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil + npy_intp PyArray_MultiIter_SIZE(broadcast multi) nogil + int PyArray_MultiIter_NDIM(broadcast multi) nogil + npy_intp PyArray_MultiIter_INDEX(broadcast multi) nogil + int PyArray_MultiIter_NUMITER(broadcast multi) nogil + npy_intp* PyArray_MultiIter_DIMS(broadcast multi) nogil + void** PyArray_MultiIter_ITERS(broadcast multi) nogil # Functions from __multiarray_api.h @@ -509,7 +523,6 @@ cdef extern from "numpy/arrayobject.h": # more than is probably needed until it can be checked further. int PyArray_INCREF (ndarray) except * # uses PyArray_Item_INCREF... int PyArray_XDECREF (ndarray) except * # uses PyArray_Item_DECREF... - void PyArray_SetStringFunction (object, int) dtype PyArray_DescrFromType (int) object PyArray_TypeObjectFromType (int) char * PyArray_Zero (ndarray) @@ -700,11 +713,10 @@ ctypedef npy_double float_t ctypedef npy_double double_t ctypedef npy_longdouble longdouble_t -ctypedef npy_cfloat cfloat_t -ctypedef npy_cdouble cdouble_t -ctypedef npy_clongdouble clongdouble_t - -ctypedef npy_cdouble complex_t +ctypedef float complex cfloat_t +ctypedef double complex cdouble_t +ctypedef double complex complex_t +ctypedef long double complex clongdouble_t cdef inline object PyArray_MultiIterNew1(a): return PyArray_MultiIterNew(1, a) @@ -939,13 +951,6 @@ cdef inline int import_ufunc() except -1: except Exception: raise ImportError("numpy._core.umath failed to import") -cdef extern from *: - # Leave a marker that the NumPy declarations came from this file - # See https://github.com/cython/cython/issues/3573 - """ - /* NumPy API declarations from "numpy/__init__.pxd" */ - """ - cdef inline bint is_timedelta64_object(object obj): """ @@ -999,3 +1004,137 @@ cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: returns the unit part of the dtype for a numpy datetime64 object. """ return (obj).obmeta.base + + +# Iterator API added in v1.6 +ctypedef int (*NpyIter_IterNextFunc)(NpyIter* it) noexcept nogil +ctypedef void (*NpyIter_GetMultiIndexFunc)(NpyIter* it, npy_intp* outcoords) noexcept nogil + +cdef extern from "numpy/arrayobject.h": + + ctypedef struct NpyIter: + pass + + cdef enum: + NPY_FAIL + NPY_SUCCEED + + cdef enum: + # Track an index representing C order + NPY_ITER_C_INDEX + # Track an index representing Fortran order + NPY_ITER_F_INDEX + # Track a multi-index + NPY_ITER_MULTI_INDEX + # User code external to the iterator does the 1-dimensional innermost loop + NPY_ITER_EXTERNAL_LOOP + # Convert all the operands to a common data type + NPY_ITER_COMMON_DTYPE + # Operands may hold references, requiring API access during iteration + NPY_ITER_REFS_OK + # Zero-sized operands should be permitted, iteration checks IterSize for 0 + NPY_ITER_ZEROSIZE_OK + # Permits reductions (size-0 stride with dimension size > 1) + NPY_ITER_REDUCE_OK + # Enables sub-range iteration + NPY_ITER_RANGED + # Enables buffering + NPY_ITER_BUFFERED + # When buffering is enabled, grows the inner loop if possible + NPY_ITER_GROWINNER + # Delay allocation of buffers until first Reset* call + NPY_ITER_DELAY_BUFALLOC + # When NPY_KEEPORDER is specified, disable reversing negative-stride axes + NPY_ITER_DONT_NEGATE_STRIDES + NPY_ITER_COPY_IF_OVERLAP + # The operand will be read from and written to + NPY_ITER_READWRITE + # The operand will only be read from + NPY_ITER_READONLY + # The operand will only be written to + NPY_ITER_WRITEONLY + # The operand's data must be in native byte order + NPY_ITER_NBO + # The operand's data must be aligned + NPY_ITER_ALIGNED + # The operand's data must be contiguous (within the inner loop) + NPY_ITER_CONTIG + # The operand may be copied to satisfy requirements + NPY_ITER_COPY + # The operand may be copied with WRITEBACKIFCOPY to satisfy requirements + NPY_ITER_UPDATEIFCOPY + # Allocate the operand if it is NULL + NPY_ITER_ALLOCATE + # If an operand is allocated, don't use any subtype + NPY_ITER_NO_SUBTYPE + # This is a virtual array slot, operand is NULL but temporary data is there + NPY_ITER_VIRTUAL + # Require that the dimension match the iterator dimensions exactly + NPY_ITER_NO_BROADCAST + # A mask is being used on this array, affects buffer -> array copy + NPY_ITER_WRITEMASKED + # This array is the mask for all WRITEMASKED operands + NPY_ITER_ARRAYMASK + # Assume iterator order data access for COPY_IF_OVERLAP + NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE + + # construction and destruction functions + NpyIter* NpyIter_New(ndarray arr, npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, dtype datatype) except NULL + NpyIter* NpyIter_MultiNew(npy_intp nop, PyArrayObject** op, npy_uint32 flags, + NPY_ORDER order, NPY_CASTING casting, npy_uint32* + op_flags, PyArray_Descr** op_dtypes) except NULL + NpyIter* NpyIter_AdvancedNew(npy_intp nop, PyArrayObject** op, + npy_uint32 flags, NPY_ORDER order, + NPY_CASTING casting, npy_uint32* op_flags, + PyArray_Descr** op_dtypes, int oa_ndim, + int** op_axes, const npy_intp* itershape, + npy_intp buffersize) except NULL + NpyIter* NpyIter_Copy(NpyIter* it) except NULL + int NpyIter_RemoveAxis(NpyIter* it, int axis) except NPY_FAIL + int NpyIter_RemoveMultiIndex(NpyIter* it) except NPY_FAIL + int NpyIter_EnableExternalLoop(NpyIter* it) except NPY_FAIL + int NpyIter_Deallocate(NpyIter* it) except NPY_FAIL + int NpyIter_Reset(NpyIter* it, char** errmsg) except NPY_FAIL + int NpyIter_ResetToIterIndexRange(NpyIter* it, npy_intp istart, + npy_intp iend, char** errmsg) except NPY_FAIL + int NpyIter_ResetBasePointers(NpyIter* it, char** baseptrs, char** errmsg) except NPY_FAIL + int NpyIter_GotoMultiIndex(NpyIter* it, const npy_intp* multi_index) except NPY_FAIL + int NpyIter_GotoIndex(NpyIter* it, npy_intp index) except NPY_FAIL + npy_intp NpyIter_GetIterSize(NpyIter* it) nogil + npy_intp NpyIter_GetIterIndex(NpyIter* it) nogil + void NpyIter_GetIterIndexRange(NpyIter* it, npy_intp* istart, + npy_intp* iend) nogil + int NpyIter_GotoIterIndex(NpyIter* it, npy_intp iterindex) except NPY_FAIL + npy_bool NpyIter_HasDelayedBufAlloc(NpyIter* it) nogil + npy_bool NpyIter_HasExternalLoop(NpyIter* it) nogil + npy_bool NpyIter_HasMultiIndex(NpyIter* it) nogil + npy_bool NpyIter_HasIndex(NpyIter* it) nogil + npy_bool NpyIter_RequiresBuffering(NpyIter* it) nogil + npy_bool NpyIter_IsBuffered(NpyIter* it) nogil + npy_bool NpyIter_IsGrowInner(NpyIter* it) nogil + npy_intp NpyIter_GetBufferSize(NpyIter* it) nogil + int NpyIter_GetNDim(NpyIter* it) nogil + int NpyIter_GetNOp(NpyIter* it) nogil + npy_intp* NpyIter_GetAxisStrideArray(NpyIter* it, int axis) except NULL + int NpyIter_GetShape(NpyIter* it, npy_intp* outshape) nogil + PyArray_Descr** NpyIter_GetDescrArray(NpyIter* it) + PyArrayObject** NpyIter_GetOperandArray(NpyIter* it) + ndarray NpyIter_GetIterView(NpyIter* it, npy_intp i) + void NpyIter_GetReadFlags(NpyIter* it, char* outreadflags) + void NpyIter_GetWriteFlags(NpyIter* it, char* outwriteflags) + int NpyIter_CreateCompatibleStrides(NpyIter* it, npy_intp itemsize, + npy_intp* outstrides) except NPY_FAIL + npy_bool NpyIter_IsFirstVisit(NpyIter* it, int iop) nogil + # functions for iterating an NpyIter object + NpyIter_IterNextFunc* NpyIter_GetIterNext(NpyIter* it, char** errmsg) except NULL + NpyIter_GetMultiIndexFunc* NpyIter_GetGetMultiIndex(NpyIter* it, + char** errmsg) except NULL + char** NpyIter_GetDataPtrArray(NpyIter* it) nogil + char** NpyIter_GetInitialDataPtrArray(NpyIter* it) nogil + npy_intp* NpyIter_GetIndexPtr(NpyIter* it) + npy_intp* NpyIter_GetInnerStrideArray(NpyIter* it) nogil + npy_intp* NpyIter_GetInnerLoopSizePtr(NpyIter* it) nogil + void NpyIter_GetInnerFixedStrideArray(NpyIter* it, npy_intp* outstrides) nogil + npy_bool NpyIter_IterationNeedsAPI(NpyIter* it) nogil + void NpyIter_DebugPrint(NpyIter* it) diff --git a/numpy/__init__.py b/numpy/__init__.py index e4696ba2108b..27e5d2d6801d 100644 --- a/numpy/__init__.py +++ b/numpy/__init__.py @@ -134,23 +134,23 @@ can_cast, cbrt, cdouble, ceil, character, choose, clip, clongdouble, complex128, complex64, complexfloating, compress, concat, concatenate, conj, conjugate, convolve, copysign, copyto, correlate, cos, cosh, - count_nonzero, cross, csingle, cumprod, cumsum, - datetime64, datetime_as_string, datetime_data, deg2rad, degrees, - diagonal, divide, divmod, dot, double, dtype, e, einsum, einsum_path, - empty, empty_like, equal, errstate, euler_gamma, exp, exp2, expm1, - fabs, finfo, flatiter, flatnonzero, flexible, float16, float32, - float64, float_power, floating, floor, floor_divide, fmax, fmin, fmod, - format_float_positional, format_float_scientific, frexp, from_dlpack, - frombuffer, fromfile, fromfunction, fromiter, frompyfunc, fromstring, - full, full_like, gcd, generic, geomspace, get_printoptions, - getbufsize, geterr, geterrcall, greater, greater_equal, half, - heaviside, hstack, hypot, identity, iinfo, iinfo, indices, inexact, - inf, inner, int16, int32, int64, int8, int_, intc, integer, intp, - invert, is_busday, isclose, isdtype, isfinite, isfortran, isinf, - isnan, isnat, isscalar, issubdtype, lcm, ldexp, left_shift, less, - less_equal, lexsort, linspace, little_endian, log, log10, log1p, log2, - logaddexp, logaddexp2, logical_and, logical_not, logical_or, - logical_xor, logspace, long, longdouble, longlong, matmul, + count_nonzero, cross, csingle, cumprod, cumsum, cumulative_prod, + cumulative_sum, datetime64, datetime_as_string, datetime_data, + deg2rad, degrees, diagonal, divide, divmod, dot, double, dtype, e, + einsum, einsum_path, empty, empty_like, equal, errstate, euler_gamma, + exp, exp2, expm1, fabs, finfo, flatiter, flatnonzero, flexible, + float16, float32, float64, float_power, floating, floor, floor_divide, + fmax, fmin, fmod, format_float_positional, format_float_scientific, + frexp, from_dlpack, frombuffer, fromfile, fromfunction, fromiter, + frompyfunc, fromstring, full, full_like, gcd, generic, geomspace, + get_printoptions, getbufsize, geterr, geterrcall, greater, + greater_equal, half, heaviside, hstack, hypot, identity, iinfo, iinfo, + indices, inexact, inf, inner, int16, int32, int64, int8, int_, intc, + integer, intp, invert, is_busday, isclose, isdtype, isfinite, + isfortran, isinf, isnan, isnat, isscalar, issubdtype, lcm, ldexp, + left_shift, less, less_equal, lexsort, linspace, little_endian, log, + log10, log1p, log2, logaddexp, logaddexp2, logical_and, logical_not, + logical_or, logical_xor, logspace, long, longdouble, longlong, matmul, matrix_transpose, max, maximum, may_share_memory, mean, memmap, min, min_scalar_type, minimum, mod, modf, moveaxis, multiply, nan, ndarray, ndim, nditer, negative, nested_iters, newaxis, nextafter, nonzero, @@ -165,8 +165,8 @@ str_, subtract, sum, swapaxes, take, tan, tanh, tensordot, timedelta64, trace, transpose, true_divide, trunc, typecodes, ubyte, ufunc, uint, uint16, uint32, uint64, uint8, uintc, uintp, ulong, - ulonglong, unsignedinteger, ushort, var, vdot, vecdot, void, vstack, - where, zeros, zeros_like + ulonglong, unsignedinteger, unstack, ushort, var, vdot, vecdot, void, + vstack, where, zeros, zeros_like ) # NOTE: It's still under discussion whether these aliases @@ -235,6 +235,7 @@ ix_, c_, r_, s_, ogrid, mgrid, unravel_index, ravel_multi_index, index_exp ) + from . import matrixlib as _mat from .matrixlib import ( asmatrix, bmat, matrix @@ -289,7 +290,9 @@ # import with `from numpy import *`. __future_scalars__ = {"str", "bytes", "object"} - __array_api_version__ = "2022.12" + __array_api_version__ = "2023.12" + + from ._array_api_info import __array_namespace_info__ # now that numpy core module is imported, can initialize limits _core.getlimits._register_known_types() @@ -312,7 +315,7 @@ set(lib._polynomial_impl.__all__) | set(lib._npyio_impl.__all__) | set(lib._index_tricks_impl.__all__) | - {"emath", "show_config", "__version__"} + {"emath", "show_config", "__version__", "__array_namespace_info__"} ) # Filter out Cython harmless warnings @@ -368,7 +371,7 @@ def __getattr__(attr): return char elif attr == "array_api": raise AttributeError("`numpy.array_api` is not available from " - "numpy 2.0 onwards") + "numpy 2.0 onwards", name=None) elif attr == "core": import numpy.core as core return core @@ -381,7 +384,7 @@ def __getattr__(attr): return distutils else: raise AttributeError("`numpy.distutils` is not available from " - "Python 3.12 onwards") + "Python 3.12 onwards", name=None) if attr in __future_scalars__: # And future warnings for those that will change, but also give @@ -391,12 +394,13 @@ def __getattr__(attr): "corresponding NumPy scalar.", FutureWarning, stacklevel=2) if attr in __former_attrs__: - raise AttributeError(__former_attrs__[attr]) + raise AttributeError(__former_attrs__[attr], name=None) if attr in __expired_attributes__: raise AttributeError( f"`np.{attr}` was removed in the NumPy 2.0 release. " - f"{__expired_attributes__[attr]}" + f"{__expired_attributes__[attr]}", + name=None ) if attr == "chararray": diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 51103aaa991f..e73d6f16765b 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -18,6 +18,7 @@ from numpy._typing import ( # Arrays ArrayLike, NDArray, + _ArrayLike, _SupportsArray, _NestedSequence, _FiniteNestedSequence, @@ -150,7 +151,10 @@ from numpy._typing._callable import ( _FloatDivMod, _ComplexOp, _NumberOp, - _ComparisonOp, + _ComparisonOpLT, + _ComparisonOpLE, + _ComparisonOpGT, + _ComparisonOpGE, ) # NOTE: Numpy's mypy plugin is used for removing the types unavailable @@ -170,6 +174,8 @@ from numpy._typing._extended_precision import ( complex512 as complex512, ) +from numpy._array_api_info import __array_namespace_info__ as __array_namespace_info__ + from collections.abc import ( Callable, Iterable, @@ -178,6 +184,7 @@ from collections.abc import ( Sequence, ) from typing import ( + TYPE_CHECKING, Literal as L, Any, Generator, @@ -193,8 +200,16 @@ from typing import ( Final, final, ClassVar, + TypeAlias, ) +if sys.version_info >= (3, 11): + from typing import LiteralString +elif TYPE_CHECKING: + from typing_extensions import LiteralString +else: + LiteralString: TypeAlias = str + # Ensures that the stubs are picked up from numpy import ( ctypeslib as ctypeslib, @@ -258,6 +273,7 @@ from numpy._core.fromnumeric import ( all as all, any as any, cumsum as cumsum, + cumulative_sum as cumulative_sum, ptp as ptp, max as max, min as min, @@ -265,6 +281,7 @@ from numpy._core.fromnumeric import ( amin as amin, prod as prod, cumprod as cumprod, + cumulative_prod as cumulative_prod, ndim as ndim, size as size, around as around, @@ -364,7 +381,6 @@ from numpy._core.numeric import ( convolve as convolve, outer as outer, tensordot as tensordot, - vecdot as vecdot, roll as roll, rollaxis as rollaxis, moveaxis as moveaxis, @@ -385,7 +401,6 @@ from numpy._core.numeric import ( from numpy._core.numerictypes import ( isdtype as isdtype, issubdtype as issubdtype, - cast as cast, ScalarType as ScalarType, typecodes as typecodes, ) @@ -398,6 +413,7 @@ from numpy._core.shape_base import ( hstack as hstack, stack as stack, vstack as vstack, + unstack as unstack, ) from numpy.lib import ( @@ -434,7 +450,6 @@ from numpy.lib._function_base_impl import ( angle as angle, unwrap as unwrap, sort_complex as sort_complex, - disp as disp, flip as flip, rot90 as rot90, extract as extract, @@ -459,6 +474,7 @@ from numpy.lib._function_base_impl import ( append as append, interp as interp, quantile as quantile, + trapezoid as trapezoid, ) from numpy.lib._histograms_impl import ( @@ -597,7 +613,7 @@ from numpy.matrixlib import ( bmat as bmat, ) -_AnyStr_contra = TypeVar("_AnyStr_contra", str, bytes, contravariant=True) +_AnyStr_contra = TypeVar("_AnyStr_contra", LiteralString, builtins.str, bytes, contravariant=True) # Protocol for representing file-like-objects accepted # by `ndarray.tofile` and `fromfile` @@ -622,10 +638,10 @@ class _SupportsWrite(Protocol[_AnyStr_contra]): def write(self, s: _AnyStr_contra, /) -> object: ... __all__: list[str] -__dir__: list[str] -__version__: str -__git_version__: str -__array_api_version__: str +def __dir__() -> Sequence[str]: ... + +__version__: LiteralString +__array_api_version__: LiteralString test: PytestTester # TODO: Move placeholders to their respective module once @@ -636,8 +652,101 @@ test: PytestTester def show_config() -> None: ... _NdArraySubClass = TypeVar("_NdArraySubClass", bound=NDArray[Any]) +_NdArraySubClass_co = TypeVar("_NdArraySubClass_co", bound=NDArray[Any], covariant=True) _DTypeScalar_co = TypeVar("_DTypeScalar_co", covariant=True, bound=generic) -_ByteOrder = L["S", "<", ">", "=", "|", "L", "B", "N", "I"] +_SCT = TypeVar("_SCT", bound=generic) + +_ByteOrderChar: TypeAlias = L[ + "<", # little-endian + ">", # big-endian + "=", # native order + "|", # ignore +] +# can be anything, is case-insensitive, and only the first character matters +_ByteOrder: TypeAlias = L[ + "S", # swap the current order (default) + "<", "L", "little", # little-endian + ">", "B", "big", # big endian + "=", "N", "native", # native order + "|", "I", # ignore +] +_DTypeKind: TypeAlias = L[ + "b", # boolean + "i", # signed integer + "u", # unsigned integer + "f", # floating-point + "c", # complex floating-point + "m", # timedelta64 + "M", # datetime64 + "O", # python object + "S", # byte-string (fixed-width) + "U", # unicode-string (fixed-width) + "V", # void + "T", # unicode-string (variable-width) +] +_DTypeChar: TypeAlias = L[ + "?", # bool + "b", # byte + "B", # ubyte + "h", # short + "H", # ushort + "i", # intc + "I", # uintc + "l", # long + "L", # ulong + "q", # longlong + "Q", # ulonglong + "e", # half + "f", # single + "d", # double + "g", # longdouble + "F", # csingle + "D", # cdouble + "G", # clongdouble + "O", # object + "S", # bytes_ (S0) + "a", # bytes_ (deprecated) + "U", # str_ + "V", # void + "M", # datetime64 + "m", # timedelta64 + "c", # bytes_ (S1) + "T", # StringDType +] +_DTypeNum: TypeAlias = L[ + 0, # bool + 1, # byte + 2, # ubyte + 3, # short + 4, # ushort + 5, # intc + 6, # uintc + 7, # long + 8, # ulong + 9, # longlong + 10, # ulonglong + 23, # half + 11, # single + 12, # double + 13, # longdouble + 14, # csingle + 15, # cdouble + 16, # clongdouble + 17, # object + 18, # bytes_ + 19, # str_ + 20, # void + 21, # datetime64 + 22, # timedelta64 + 25, # no type + 256, # user-defined + 2056, # StringDType +] +_DTypeBuiltinKind: TypeAlias = L[ + 0, # structured array type, with fields + 1, # compiled into numpy + 2, # user-defined +] @final class dtype(Generic[_DTypeScalar_co]): @@ -810,60 +919,58 @@ class dtype(Generic[_DTypeScalar_co]): metadata: dict[builtins.str, Any] = ..., ) -> dtype[object_]: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload - def __getitem__(self: dtype[void], key: list[builtins.str]) -> dtype[void]: ... + def __getitem__(self: dtype[void], key: list[builtins.str], /) -> dtype[void]: ... @overload - def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex) -> dtype[Any]: ... + def __getitem__(self: dtype[void], key: builtins.str | SupportsIndex, /) -> dtype[Any]: ... # NOTE: In the future 1-based multiplications will also yield `flexible` dtypes @overload - def __mul__(self: _DType, value: L[1]) -> _DType: ... + def __mul__(self: _DType, value: L[1], /) -> _DType: ... @overload - def __mul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + def __mul__(self: _FlexDType, value: SupportsIndex, /) -> _FlexDType: ... @overload - def __mul__(self, value: SupportsIndex) -> dtype[void]: ... + def __mul__(self, value: SupportsIndex, /) -> dtype[void]: ... # NOTE: `__rmul__` seems to be broken when used in combination with # literals as of mypy 0.902. Set the return-type to `dtype[Any]` for # now for non-flexible dtypes. @overload - def __rmul__(self: _FlexDType, value: SupportsIndex) -> _FlexDType: ... + def __rmul__(self: _FlexDType, value: SupportsIndex, /) -> _FlexDType: ... @overload - def __rmul__(self, value: SupportsIndex) -> dtype[Any]: ... + def __rmul__(self, value: SupportsIndex, /) -> dtype[Any]: ... - def __gt__(self, other: DTypeLike) -> builtins.bool: ... - def __ge__(self, other: DTypeLike) -> builtins.bool: ... - def __lt__(self, other: DTypeLike) -> builtins.bool: ... - def __le__(self, other: DTypeLike) -> builtins.bool: ... + def __gt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __ge__(self, other: DTypeLike, /) -> builtins.bool: ... + def __lt__(self, other: DTypeLike, /) -> builtins.bool: ... + def __le__(self, other: DTypeLike, /) -> builtins.bool: ... # Explicitly defined `__eq__` and `__ne__` to get around mypy's # `strict_equality` option; even though their signatures are # identical to their `object`-based counterpart - def __eq__(self, other: Any) -> builtins.bool: ... - def __ne__(self, other: Any) -> builtins.bool: ... + def __eq__(self, other: Any, /) -> builtins.bool: ... + def __ne__(self, other: Any, /) -> builtins.bool: ... @property def alignment(self) -> int: ... @property def base(self) -> dtype[Any]: ... @property - def byteorder(self) -> builtins.str: ... + def byteorder(self) -> _ByteOrderChar: ... @property - def char(self) -> builtins.str: ... + def char(self) -> _DTypeChar: ... @property - def descr(self) -> list[tuple[builtins.str, builtins.str] | tuple[builtins.str, builtins.str, _Shape]]: ... + def descr(self) -> list[tuple[LiteralString, LiteralString] | tuple[LiteralString, LiteralString, _Shape]]: ... @property - def fields( - self, - ) -> None | MappingProxyType[builtins.str, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... + def fields(self,) -> None | MappingProxyType[LiteralString, tuple[dtype[Any], int] | tuple[dtype[Any], int, Any]]: ... @property def flags(self) -> int: ... @property def hasobject(self) -> builtins.bool: ... @property - def isbuiltin(self) -> int: ... + def isbuiltin(self) -> _DTypeBuiltinKind: ... @property def isnative(self) -> builtins.bool: ... @property @@ -871,26 +978,26 @@ class dtype(Generic[_DTypeScalar_co]): @property def itemsize(self) -> int: ... @property - def kind(self) -> builtins.str: ... + def kind(self) -> _DTypeKind: ... @property def metadata(self) -> None | MappingProxyType[builtins.str, Any]: ... @property - def name(self) -> builtins.str: ... + def name(self) -> LiteralString: ... @property - def num(self) -> int: ... + def num(self) -> _DTypeNum: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> tuple[()] | _Shape: ... @property def ndim(self) -> int: ... @property def subdtype(self) -> None | tuple[dtype[Any], _Shape]: ... - def newbyteorder(self: _DType, __new_order: _ByteOrder = ...) -> _DType: ... + def newbyteorder(self: _DType, new_order: _ByteOrder = ..., /) -> _DType: ... @property - def str(self) -> builtins.str: ... + def str(self) -> LiteralString: ... @property def type(self) -> type[_DTypeScalar_co]: ... -_ArrayLikeInt = ( +_ArrayLikeInt: TypeAlias = ( int | integer[Any] | Sequence[int | integer[Any]] @@ -899,17 +1006,18 @@ _ArrayLikeInt = ( ) _FlatIterSelf = TypeVar("_FlatIterSelf", bound=flatiter[Any]) +_FlatShapeType = TypeVar("_FlatShapeType", bound=tuple[int]) @final -class flatiter(Generic[_NdArraySubClass]): +class flatiter(Generic[_NdArraySubClass_co]): __hash__: ClassVar[None] @property - def base(self) -> _NdArraySubClass: ... + def base(self) -> _NdArraySubClass_co: ... @property def coords(self) -> _Shape: ... @property def index(self) -> int: ... - def copy(self) -> _NdArraySubClass: ... + def copy(self) -> _NdArraySubClass_co: ... def __iter__(self: _FlatIterSelf) -> _FlatIterSelf: ... def __next__(self: flatiter[NDArray[_ScalarType]]) -> _ScalarType: ... def __len__(self) -> int: ... @@ -922,7 +1030,7 @@ class flatiter(Generic[_NdArraySubClass]): def __getitem__( self, key: _ArrayLikeInt | slice | ellipsis | tuple[_ArrayLikeInt | slice | ellipsis], - ) -> _NdArraySubClass: ... + ) -> _NdArraySubClass_co: ... # TODO: `__setitem__` operates via `unsafe` casting rules, and can # thus accept any type accepted by the relevant underlying `np.generic` # constructor. @@ -933,18 +1041,29 @@ class flatiter(Generic[_NdArraySubClass]): value: Any, ) -> None: ... @overload + def __array__(self: flatiter[ndarray[_FlatShapeType, _DType]], dtype: None = ..., /) -> ndarray[_FlatShapeType, _DType]: ... + @overload + def __array__(self: flatiter[ndarray[_FlatShapeType, Any]], dtype: _DType, /) -> ndarray[_FlatShapeType, _DType]: ... + @overload def __array__(self: flatiter[ndarray[Any, _DType]], dtype: None = ..., /) -> ndarray[Any, _DType]: ... @overload def __array__(self, dtype: _DType, /) -> ndarray[Any, _DType]: ... -_OrderKACF = L[None, "K", "A", "C", "F"] -_OrderACF = L[None, "A", "C", "F"] -_OrderCF = L[None, "C", "F"] - -_ModeKind = L["raise", "wrap", "clip"] -_PartitionKind = L["introselect"] -_SortKind = L["quicksort", "mergesort", "heapsort", "stable"] -_SortSide = L["left", "right"] +_OrderKACF: TypeAlias = L[None, "K", "A", "C", "F"] +_OrderACF: TypeAlias = L[None, "A", "C", "F"] +_OrderCF: TypeAlias = L[None, "C", "F"] + +_ModeKind: TypeAlias = L["raise", "wrap", "clip"] +_PartitionKind: TypeAlias = L["introselect"] +# in practice, only the first case-insensitive character is considered (so e.g. +# "QuantumSort3000" will be interpreted as quicksort). +_SortKind: TypeAlias = L[ + "Q", "quick", "quicksort", + "M", "merge", "mergesort", + "H", "heap", "heapsort", + "S", "stable", "stablesort", +] +_SortSide: TypeAlias = L["left", "right"] _ArraySelf = TypeVar("_ArraySelf", bound=_ArrayOrScalarCommon) @@ -961,6 +1080,8 @@ class _ArrayOrScalarCommon: def itemsize(self) -> int: ... @property def nbytes(self) -> int: ... + @property + def device(self) -> L["cpu"]: ... def __bool__(self) -> builtins.bool: ... def __bytes__(self) -> bytes: ... def __str__(self) -> str: ... @@ -970,8 +1091,8 @@ class _ArrayOrScalarCommon: # TODO: How to deal with the non-commutative nature of `==` and `!=`? # xref numpy/numpy#17368 - def __eq__(self, other: Any) -> Any: ... - def __ne__(self, other: Any) -> Any: ... + def __eq__(self, other: Any, /) -> Any: ... + def __ne__(self, other: Any, /) -> Any: ... def copy(self: _ArraySelf, order: _OrderKACF = ...) -> _ArraySelf: ... def dump(self, file: str | bytes | os.PathLike[str] | os.PathLike[bytes] | _SupportsWrite[bytes]) -> None: ... def dumps(self) -> bytes: ... @@ -993,6 +1114,7 @@ class _ArrayOrScalarCommon: def __array_priority__(self) -> float: ... @property def __array_struct__(self) -> Any: ... # builtins.PyCapsule + def __array_namespace__(self, *, api_version: None | _ArrayAPIVersion = ...) -> Any: ... def __setstate__(self, state: tuple[ SupportsIndex, # version _ShapeLike, # Shape @@ -1378,16 +1500,15 @@ _DType = TypeVar("_DType", bound=dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) _FlexDType = TypeVar("_FlexDType", bound=dtype[flexible]) -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) -_ShapeType2 = TypeVar("_ShapeType2", bound=Any) +_ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) +_ShapeType2 = TypeVar("_ShapeType2", bound=tuple[int, ...]) +_Shape2DType_co = TypeVar("_Shape2DType_co", covariant=True, bound=tuple[int, int]) _NumberType = TypeVar("_NumberType", bound=number[Any]) if sys.version_info >= (3, 12): from collections.abc import Buffer as _SupportsBuffer else: - _SupportsBuffer = ( + _SupportsBuffer: TypeAlias = ( bytes | bytearray | memoryview @@ -1400,22 +1521,25 @@ else: _T = TypeVar("_T") _T_co = TypeVar("_T_co", covariant=True) _T_contra = TypeVar("_T_contra", contravariant=True) -_2Tuple = tuple[_T, _T] -_CastingKind = L["no", "equiv", "safe", "same_kind", "unsafe"] +_2Tuple: TypeAlias = tuple[_T, _T] +_CastingKind: TypeAlias = L["no", "equiv", "safe", "same_kind", "unsafe"] -_ArrayUInt_co = NDArray[np.bool | unsignedinteger[Any]] -_ArrayInt_co = NDArray[np.bool | integer[Any]] -_ArrayFloat_co = NDArray[np.bool | integer[Any] | floating[Any]] -_ArrayComplex_co = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] -_ArrayNumber_co = NDArray[np.bool | number[Any]] -_ArrayTD64_co = NDArray[np.bool | integer[Any] | timedelta64] +_ArrayUInt_co: TypeAlias = NDArray[np.bool | unsignedinteger[Any]] +_ArrayInt_co: TypeAlias = NDArray[np.bool | integer[Any]] +_ArrayFloat_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any]] +_ArrayComplex_co: TypeAlias = NDArray[np.bool | integer[Any] | floating[Any] | complexfloating[Any, Any]] +_ArrayNumber_co: TypeAlias = NDArray[np.bool | number[Any]] +_ArrayTD64_co: TypeAlias = NDArray[np.bool | integer[Any] | timedelta64] # Introduce an alias for `dtype` to avoid naming conflicts. -_dtype = dtype +_dtype: TypeAlias = dtype[_ScalarType] -# `builtins.PyCapsule` unfortunately lacks annotations as of the moment; -# use `Any` as a stopgap measure -_PyCapsule = Any +if sys.version_info >= (3, 13): + from types import CapsuleType as _PyCapsule +else: + _PyCapsule: TypeAlias = Any + +_ArrayAPIVersion: TypeAlias = L["2021.12", "2022.12", "2023.12"] class _SupportsItem(Protocol[_T_co]): def item(self, args: Any, /) -> _T_co: ... @@ -1428,7 +1552,7 @@ class _SupportsImag(Protocol[_T_co]): @property def imag(self) -> _T_co: ... -class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): +class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType_co, _DType_co]): __hash__: ClassVar[None] @property def base(self) -> None | NDArray[Any]: ... @@ -1438,14 +1562,14 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): def size(self) -> int: ... @property def real( - self: ndarray[_ShapeType, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + self: ndarray[_ShapeType_co, dtype[_SupportsReal[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType_co, _dtype[_ScalarType]]: ... @real.setter def real(self, value: ArrayLike) -> None: ... @property def imag( - self: ndarray[_ShapeType, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] - ) -> ndarray[_ShapeType, _dtype[_ScalarType]]: ... + self: ndarray[_ShapeType_co, dtype[_SupportsImag[_ScalarType]]], # type: ignore[type-var] + ) -> ndarray[_ShapeType_co, _dtype[_ScalarType]]: ... @imag.setter def imag(self, value: ArrayLike) -> None: ... def __new__( @@ -1461,16 +1585,16 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... @overload def __array__( self, dtype: None = ..., /, *, copy: None | bool = ... - ) -> ndarray[Any, _DType_co]: ... + ) -> ndarray[_ShapeType_co, _DType_co]: ... @overload def __array__( self, dtype: _DType, /, *, copy: None | bool = ... - ) -> ndarray[Any, _DType]: ... + ) -> ndarray[_ShapeType_co, _DType]: ... def __array_ufunc__( self, @@ -1521,12 +1645,12 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ... @overload - def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, _dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType_co, _dtype[void]]: ... @property def ctypes(self) -> _ctypes[int]: ... @property - def shape(self) -> _Shape: ... + def shape(self) -> _ShapeType_co: ... @shape.setter def shape(self, value: _ShapeLike) -> None: ... @property @@ -1701,11 +1825,13 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): axis: None | SupportsIndex = ..., ) -> ndarray[Any, _DType_co]: ... + # TODO: use `tuple[int]` as shape type once covariant (#26081) def flatten( self, order: _OrderKACF = ..., ) -> ndarray[Any, _DType_co]: ... + # TODO: use `tuple[int]` as shape type once covariant (#26081) def ravel( self, order: _OrderKACF = ..., @@ -1713,11 +1839,19 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): @overload def reshape( - self, shape: _ShapeLike, /, *, order: _OrderACF = ... + self, + shape: _ShapeLike, + /, + *, + order: _OrderACF = ..., + copy: None | bool = ..., ) -> ndarray[Any, _DType_co]: ... @overload def reshape( - self, *shape: SupportsIndex, order: _OrderACF = ... + self, + *shape: SupportsIndex, + order: _OrderACF = ..., + copy: None | bool = ..., ) -> ndarray[Any, _DType_co]: ... @overload @@ -1796,48 +1930,48 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # strings, it will pass through the final overload otherwise @overload - def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __lt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __lt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __le__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __le__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __le__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __le__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __le__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __gt__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __gt__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayNumber_co, other: _ArrayLikeNumber_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[np.bool]: ... + def __ge__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[object_], other: Any) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[object_], other: Any, /) -> NDArray[np.bool]: ... @overload - def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co) -> NDArray[np.bool]: ... + def __ge__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> NDArray[np.bool]: ... # Unary ops @overload @@ -1878,500 +2012,500 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # Binary ops @overload - def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __matmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __matmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __matmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __matmul__(self: NDArray[object_], other: Any) -> Any: ... + def __matmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmatmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ... + def __rmatmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __mod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __mod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... @overload - def __mod__(self: NDArray[object_], other: Any) -> Any: ... + def __mod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __rmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... @overload - def __rmod__(self: NDArray[object_], other: Any) -> Any: ... + def __rmod__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmod__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + def __divmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> _2Tuple[NDArray[Any]]: ... @overload def __divmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __divmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __divmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> _2Tuple[NDArray[Any]]: ... + def __rdivmod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> _2Tuple[NDArray[Any]]: ... @overload - def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] + def __rdivmod__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> _2Tuple[NDArray[int8]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> _2Tuple[NDArray[unsignedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> _2Tuple[NDArray[signedinteger[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] + def __rdivmod__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> _2Tuple[NDArray[floating[Any]]]: ... # type: ignore[misc] @overload - def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... + def __rdivmod__(self: _ArrayTD64_co, other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> tuple[NDArray[int64], NDArray[timedelta64]]: ... @overload - def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __add__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __add__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload - def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __add__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __add__(self: NDArray[object_], other: Any) -> Any: ... + def __add__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __add__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __add__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __radd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __radd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... + def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... @overload - def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __radd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __radd__(self: NDArray[object_], other: Any) -> Any: ... + def __radd__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __radd__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __sub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + def __sub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + def __sub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload - def __sub__(self: NDArray[object_], other: Any) -> Any: ... + def __sub__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __sub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rsub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NoReturn: ... + def __rsub__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... # type: ignore[misc] @overload - def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc] + def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co, /) -> NDArray[datetime64]: ... # type: ignore[misc] @overload - def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co) -> NDArray[timedelta64]: ... + def __rsub__(self: NDArray[datetime64], other: _ArrayLikeDT64_co, /) -> NDArray[timedelta64]: ... @overload - def __rsub__(self: NDArray[object_], other: Any) -> Any: ... + def __rsub__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rsub__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __mul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __mul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __mul__(self: NDArray[object_], other: Any) -> Any: ... + def __mul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __mul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rmul__(self: NDArray[object_], other: Any) -> Any: ... + def __rmul__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rmul__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __floordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __floordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __floordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __floordiv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __floordiv__(self: NDArray[object_], other: Any) -> Any: ... + def __floordiv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __floordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rfloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[int64]: ... + def __rfloordiv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[int64]: ... @overload - def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __rfloordiv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... @overload - def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rfloordiv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rfloordiv__(self: NDArray[object_], other: Any) -> Any: ... + def __rfloordiv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __pow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __pow__(self: NDArray[object_], other: Any) -> Any: ... + def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rpow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... @overload - def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rpow__(self: NDArray[object_], other: Any) -> Any: ... + def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __truediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + def __truediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __truediv__(self: NDArray[object_], other: Any) -> Any: ... + def __truediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __truediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rtruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co) -> NDArray[float64]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayInt_co, other: _ArrayInt_co, /) -> NDArray[float64]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating[Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] + def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc] @overload - def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ... + def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co, /) -> NDArray[number[Any]]: ... @overload - def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ... + def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[float64]: ... @overload - def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co) -> NoReturn: ... + def __rtruediv__(self: NDArray[np.bool], other: _ArrayLikeTD64_co, /) -> NoReturn: ... @overload - def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __rtruediv__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __rtruediv__(self: NDArray[object_], other: Any) -> Any: ... + def __rtruediv__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rtruediv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __lshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __lshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __lshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __lshift__(self: NDArray[object_], other: Any) -> Any: ... + def __lshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __lshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rlshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rlshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rlshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rlshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rlshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rlshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rlshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rrshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[int8]: ... # type: ignore[misc] + def __rrshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rrshift__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rrshift__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rrshift__(self: NDArray[object_], other: Any) -> Any: ... + def __rrshift__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rrshift__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __and__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __and__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __and__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __and__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __and__(self: NDArray[object_], other: Any) -> Any: ... + def __and__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __and__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __and__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rand__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rand__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rand__(self: NDArray[object_], other: Any) -> Any: ... + def __rand__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rand__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __xor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __xor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __xor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __xor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __xor__(self: NDArray[object_], other: Any) -> Any: ... + def __xor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __xor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __rxor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __rxor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __rxor__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __rxor__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __rxor__(self: NDArray[object_], other: Any) -> Any: ... + def __rxor__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __rxor__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __or__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __or__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __or__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __or__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __or__(self: NDArray[object_], other: Any) -> Any: ... + def __or__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __or__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __or__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ror__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc] + def __ror__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] + def __ror__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc] @overload - def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... + def __ror__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[Any]]: ... @overload - def __ror__(self: NDArray[object_], other: Any) -> Any: ... + def __ror__(self: NDArray[object_], other: Any, /) -> Any: ... @overload - def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ... + def __ror__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... # `np.generic` does not support inplace operations @@ -2380,189 +2514,195 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]): # also accepts a signed integer for the right operand as long it is a 0D # object and its value is >= 0 @overload - def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __iadd__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __iadd__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __iadd__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __iadd__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __iadd__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __iadd__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __iadd__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __iadd__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __iadd__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __isub__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __isub__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __isub__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __isub__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __isub__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... + def __isub__(self: NDArray[timedelta64], other: _ArrayLikeTD64_co, /) -> NDArray[timedelta64]: ... @overload - def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ... + def __isub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co, /) -> NDArray[datetime64]: ... @overload - def __isub__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __isub__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __imul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ... + def __imul__(self: NDArray[timedelta64], other: _ArrayLikeFloat_co, /) -> NDArray[timedelta64]: ... @overload - def __imul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imul__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __itruediv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __itruediv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __itruediv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + def __itruediv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co, /) -> NDArray[timedelta64]: ... @overload - def __itruediv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __itruediv__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ifloordiv__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ifloordiv__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __ifloordiv__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __ifloordiv__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co, /) -> NoReturn: ... @overload - def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co) -> NDArray[timedelta64]: ... + def __ifloordiv__(self: NDArray[timedelta64], other: _ArrayLikeInt_co, /) -> NDArray[timedelta64]: ... @overload - def __ifloordiv__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ifloordiv__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ipow__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ipow__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __ipow__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __ipow__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __ipow__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ipow__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imod__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imod__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imod__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imod__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[timedelta64]: ... + def __imod__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]], /) -> NDArray[timedelta64]: ... @overload - def __imod__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imod__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ilshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ilshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ilshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ilshift__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __irshift__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __irshift__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __irshift__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __irshift__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __iand__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __iand__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __iand__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __iand__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __iand__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __iand__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ixor__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __ixor__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ixor__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ixor__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ixor__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __ior__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __ior__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __ior__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co | _IntLike_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __ior__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __ior__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __ior__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... @overload - def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown) -> NDArray[Any]: ... + def __imatmul__(self: NDArray[_UnknownType], other: _ArrayLikeUnknown, /) -> NDArray[Any]: ... @overload - def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co) -> NDArray[np.bool]: ... + def __imatmul__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[np.bool]: ... @overload - def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[_NBit1]]: ... + def __imatmul__(self: NDArray[unsignedinteger[_NBit1]], other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger[_NBit1]]: ... @overload - def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co) -> NDArray[signedinteger[_NBit1]]: ... + def __imatmul__(self: NDArray[signedinteger[_NBit1]], other: _ArrayLikeInt_co, /) -> NDArray[signedinteger[_NBit1]]: ... @overload - def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co) -> NDArray[floating[_NBit1]]: ... + def __imatmul__(self: NDArray[floating[_NBit1]], other: _ArrayLikeFloat_co, /) -> NDArray[floating[_NBit1]]: ... @overload - def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... + def __imatmul__(self: NDArray[complexfloating[_NBit1, _NBit1]], other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating[_NBit1, _NBit1]]: ... @overload - def __imatmul__(self: NDArray[object_], other: Any) -> NDArray[object_]: ... + def __imatmul__(self: NDArray[object_], other: Any, /) -> NDArray[object_]: ... - def __dlpack__(self: NDArray[number[Any]], *, stream: None = ...) -> _PyCapsule: ... - def __dlpack_device__(self) -> tuple[int, L[0]]: ... - - def __array_namespace__(self, *, api_version: str | None = ...) -> Any: ... + def __dlpack__( + self: NDArray[number[Any]], + *, + stream: int | Any | None = ..., + max_version: tuple[int, int] | None = ..., + dl_device: tuple[int, L[0]] | None = ..., + copy: bool | None = ..., + ) -> _PyCapsule: ... - def to_device(self, device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... + def __dlpack_device__(self) -> tuple[int, L[0]]: ... - @property - def device(self) -> L["cpu"]: ... + @overload + def to_device(self: NDArray[_SCT], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[_SCT]: ... + @overload + def to_device(self: NDArray[Any], device: L["cpu"], /, *, stream: None | int | Any = ...) -> NDArray[Any]: ... def bitwise_count( self, @@ -2594,6 +2734,7 @@ _NBit2 = TypeVar("_NBit2", bound=NBitBase) class generic(_ArrayOrScalarCommon): @abstractmethod def __init__(self, *args: Any, **kwargs: Any) -> None: ... + # TODO: use `tuple[()]` as shape type once covariant (#26081) @overload def __array__(self: _ScalarType, dtype: None = ..., /) -> NDArray[_ScalarType]: ... @overload @@ -2616,6 +2757,8 @@ class generic(_ArrayOrScalarCommon): if sys.version_info >= (3, 12): def __buffer__(self, flags: int, /) -> memoryview: ... + def to_device(self: _ScalarType, device: L["cpu"], /, *, stream: None | int | Any = ...) -> _ScalarType: ... + @overload def astype( self, @@ -2746,7 +2889,7 @@ class number(generic, Generic[_NBit1]): # type: ignore def real(self: _ArraySelf) -> _ArraySelf: ... @property def imag(self: _ArraySelf) -> _ArraySelf: ... - def __class_getitem__(self, item: Any) -> GenericAlias: ... + def __class_getitem__(cls, item: Any, /) -> GenericAlias: ... def __int__(self) -> int: ... def __float__(self) -> float: ... def __complex__(self) -> complex: ... @@ -2766,10 +2909,10 @@ class number(generic, Generic[_NBit1]): # type: ignore __rpow__: _NumberOp __truediv__: _NumberOp __rtruediv__: _NumberOp - __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] class bool(generic): def __init__(self, value: object = ..., /) -> None: ... @@ -2812,11 +2955,14 @@ class bool(generic): __rmod__: _BoolMod __divmod__: _BoolDivMod __rdivmod__: _BoolDivMod - __lt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __le__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __gt__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] - __ge__: _ComparisonOp[_NumberLike_co, _ArrayLikeNumber_co] + __lt__: _ComparisonOpLT[_NumberLike_co, _ArrayLikeNumber_co] + __le__: _ComparisonOpLE[_NumberLike_co, _ArrayLikeNumber_co] + __gt__: _ComparisonOpGT[_NumberLike_co, _ArrayLikeNumber_co] + __ge__: _ComparisonOpGE[_NumberLike_co, _ArrayLikeNumber_co] + +bool_: TypeAlias = bool +@final class object_(generic): def __init__(self, value: object = ..., /) -> None: ... @property @@ -2859,21 +3005,21 @@ class datetime64(generic): format: _CharLike_co | tuple[_CharLike_co, _IntLike_co], /, ) -> None: ... - def __add__(self, other: _TD64Like_co) -> datetime64: ... - def __radd__(self, other: _TD64Like_co) -> datetime64: ... + def __add__(self, other: _TD64Like_co, /) -> datetime64: ... + def __radd__(self, other: _TD64Like_co, /) -> datetime64: ... @overload - def __sub__(self, other: datetime64) -> timedelta64: ... + def __sub__(self, other: datetime64, /) -> timedelta64: ... @overload - def __sub__(self, other: _TD64Like_co) -> datetime64: ... - def __rsub__(self, other: datetime64) -> timedelta64: ... - __lt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __le__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __gt__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] - __ge__: _ComparisonOp[datetime64, _ArrayLikeDT64_co] + def __sub__(self, other: _TD64Like_co, /) -> datetime64: ... + def __rsub__(self, other: datetime64, /) -> timedelta64: ... + __lt__: _ComparisonOpLT[datetime64, _ArrayLikeDT64_co] + __le__: _ComparisonOpLE[datetime64, _ArrayLikeDT64_co] + __gt__: _ComparisonOpGT[datetime64, _ArrayLikeDT64_co] + __ge__: _ComparisonOpGE[datetime64, _ArrayLikeDT64_co] -_IntValue = SupportsInt | _CharLike_co | SupportsIndex -_FloatValue = None | _CharLike_co | SupportsFloat | SupportsIndex -_ComplexValue = ( +_IntValue: TypeAlias = SupportsInt | _CharLike_co | SupportsIndex +_FloatValue: TypeAlias = None | _CharLike_co | SupportsFloat | SupportsIndex +_ComplexValue: TypeAlias = ( None | _CharLike_co | SupportsFloat @@ -2888,9 +3034,9 @@ class integer(number[_NBit1]): # type: ignore @property def denominator(self) -> L[1]: ... @overload - def __round__(self, ndigits: None = ...) -> int: ... + def __round__(self, ndigits: None = ..., /) -> int: ... @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + def __round__(self: _ScalarType, ndigits: SupportsIndex, /) -> _ScalarType: ... # NOTE: `__index__` is technically defined in the bottom-most # sub-classes (`int64`, `uint32`, etc) @@ -2903,20 +3049,20 @@ class integer(number[_NBit1]): # type: ignore def __index__(self) -> int: ... __truediv__: _IntTrueDiv[_NBit1] __rtruediv__: _IntTrueDiv[_NBit1] - def __mod__(self, value: _IntLike_co) -> integer[Any]: ... - def __rmod__(self, value: _IntLike_co) -> integer[Any]: ... + def __mod__(self, value: _IntLike_co, /) -> integer[Any]: ... + def __rmod__(self, value: _IntLike_co, /) -> integer[Any]: ... def __invert__(self: _IntType) -> _IntType: ... # Ensure that objects annotated as `integer` support bit-wise operations - def __lshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rlshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __rrshift__(self, other: _IntLike_co) -> integer[Any]: ... - def __and__(self, other: _IntLike_co) -> integer[Any]: ... - def __rand__(self, other: _IntLike_co) -> integer[Any]: ... - def __or__(self, other: _IntLike_co) -> integer[Any]: ... - def __ror__(self, other: _IntLike_co) -> integer[Any]: ... - def __xor__(self, other: _IntLike_co) -> integer[Any]: ... - def __rxor__(self, other: _IntLike_co) -> integer[Any]: ... + def __lshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rlshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rrshift__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __and__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rand__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __or__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __ror__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __xor__(self, other: _IntLike_co, /) -> integer[Any]: ... + def __rxor__(self, other: _IntLike_co, /) -> integer[Any]: ... class signedinteger(integer[_NBit1]): def __init__(self, value: _IntValue = ..., /) -> None: ... @@ -2980,24 +3126,24 @@ class timedelta64(generic): def __neg__(self: _ArraySelf) -> _ArraySelf: ... def __pos__(self: _ArraySelf) -> _ArraySelf: ... def __abs__(self: _ArraySelf) -> _ArraySelf: ... - def __add__(self, other: _TD64Like_co) -> timedelta64: ... - def __radd__(self, other: _TD64Like_co) -> timedelta64: ... - def __sub__(self, other: _TD64Like_co) -> timedelta64: ... - def __rsub__(self, other: _TD64Like_co) -> timedelta64: ... - def __mul__(self, other: _FloatLike_co) -> timedelta64: ... - def __rmul__(self, other: _FloatLike_co) -> timedelta64: ... + def __add__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __radd__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __sub__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __rsub__(self, other: _TD64Like_co, /) -> timedelta64: ... + def __mul__(self, other: _FloatLike_co, /) -> timedelta64: ... + def __rmul__(self, other: _FloatLike_co, /) -> timedelta64: ... __truediv__: _TD64Div[float64] __floordiv__: _TD64Div[int64] - def __rtruediv__(self, other: timedelta64) -> float64: ... - def __rfloordiv__(self, other: timedelta64) -> int64: ... - def __mod__(self, other: timedelta64) -> timedelta64: ... - def __rmod__(self, other: timedelta64) -> timedelta64: ... - def __divmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... - def __rdivmod__(self, other: timedelta64) -> tuple[int64, timedelta64]: ... - __lt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __le__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __gt__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] - __ge__: _ComparisonOp[_TD64Like_co, _ArrayLikeTD64_co] + def __rtruediv__(self, other: timedelta64, /) -> float64: ... + def __rfloordiv__(self, other: timedelta64, /) -> int64: ... + def __mod__(self, other: timedelta64, /) -> timedelta64: ... + def __rmod__(self, other: timedelta64, /) -> timedelta64: ... + def __divmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... + def __rdivmod__(self, other: timedelta64, /) -> tuple[int64, timedelta64]: ... + __lt__: _ComparisonOpLT[_TD64Like_co, _ArrayLikeTD64_co] + __le__: _ComparisonOpLE[_TD64Like_co, _ArrayLikeTD64_co] + __gt__: _ComparisonOpGT[_TD64Like_co, _ArrayLikeTD64_co] + __ge__: _ComparisonOpGE[_TD64Like_co, _ArrayLikeTD64_co] class unsignedinteger(integer[_NBit1]): # NOTE: `uint64 + signedinteger -> float64` @@ -3027,18 +3173,18 @@ class unsignedinteger(integer[_NBit1]): __divmod__: _UnsignedIntDivMod[_NBit1] __rdivmod__: _UnsignedIntDivMod[_NBit1] -uint8 = unsignedinteger[_8Bit] -uint16 = unsignedinteger[_16Bit] -uint32 = unsignedinteger[_32Bit] -uint64 = unsignedinteger[_64Bit] +uint8: TypeAlias = unsignedinteger[_8Bit] +uint16: TypeAlias = unsignedinteger[_16Bit] +uint32: TypeAlias = unsignedinteger[_32Bit] +uint64: TypeAlias = unsignedinteger[_64Bit] -ubyte = unsignedinteger[_NBitByte] -ushort = unsignedinteger[_NBitShort] -uintc = unsignedinteger[_NBitIntC] -uintp = unsignedinteger[_NBitIntP] -uint = uintp -ulong = unsignedinteger[_NBitLong] -ulonglong = unsignedinteger[_NBitLongLong] +ubyte: TypeAlias = unsignedinteger[_NBitByte] +ushort: TypeAlias = unsignedinteger[_NBitShort] +uintc: TypeAlias = unsignedinteger[_NBitIntC] +uintp: TypeAlias = unsignedinteger[_NBitIntP] +uint: TypeAlias = uintp +ulong: TypeAlias = unsignedinteger[_NBitLong] +ulonglong: TypeAlias = unsignedinteger[_NBitLongLong] class inexact(number[_NBit1]): # type: ignore def __getnewargs__(self: inexact[_64Bit]) -> tuple[float, ...]: ... @@ -3064,9 +3210,9 @@ class floating(inexact[_NBit1]): def __getnewargs__(self: float64) -> tuple[float]: ... def __getformat__(self: float64, typestr: L["double", "float"], /) -> str: ... @overload - def __round__(self, ndigits: None = ...) -> int: ... + def __round__(self, ndigits: None = ..., /) -> int: ... @overload - def __round__(self: _ScalarType, ndigits: SupportsIndex) -> _ScalarType: ... + def __round__(self: _ScalarType, ndigits: SupportsIndex, /) -> _ScalarType: ... __add__: _FloatOp[_NBit1] __radd__: _FloatOp[_NBit1] __sub__: _FloatOp[_NBit1] @@ -3084,14 +3230,14 @@ class floating(inexact[_NBit1]): __divmod__: _FloatDivMod[_NBit1] __rdivmod__: _FloatDivMod[_NBit1] -float16 = floating[_16Bit] -float32 = floating[_32Bit] -float64 = floating[_64Bit] +float16: TypeAlias = floating[_16Bit] +float32: TypeAlias = floating[_32Bit] +float64: TypeAlias = floating[_64Bit] -half = floating[_NBitHalf] -single = floating[_NBitSingle] -double = floating[_NBitDouble] -longdouble = floating[_NBitLongDouble] +half: TypeAlias = floating[_NBitHalf] +single: TypeAlias = floating[_NBitSingle] +double: TypeAlias = floating[_NBitDouble] +longdouble: TypeAlias = floating[_NBitLongDouble] # The main reason for `complexfloating` having two typevars is cosmetic. # It is used to clarify why `complex128`s precision is `_64Bit`, the latter @@ -3122,12 +3268,12 @@ class complexfloating(inexact[_NBit1], Generic[_NBit1, _NBit2]): __pow__: _ComplexOp[_NBit1] __rpow__: _ComplexOp[_NBit1] -complex64 = complexfloating[_32Bit, _32Bit] -complex128 = complexfloating[_64Bit, _64Bit] +complex64: TypeAlias = complexfloating[_32Bit, _32Bit] +complex128: TypeAlias = complexfloating[_64Bit, _64Bit] -csingle = complexfloating[_NBitSingle, _NBitSingle] -cdouble = complexfloating[_NBitDouble, _NBitDouble] -clongdouble = complexfloating[_NBitLongDouble, _NBitLongDouble] +csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] +cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] +clongdouble: TypeAlias = complexfloating[_NBitLongDouble, _NBitLongDouble] class flexible(generic): ... # type: ignore @@ -3147,13 +3293,14 @@ class void(flexible): self, val: ArrayLike, dtype: DTypeLike, offset: int = ... ) -> None: ... @overload - def __getitem__(self, key: str | SupportsIndex) -> Any: ... + def __getitem__(self, key: str | SupportsIndex, /) -> Any: ... @overload - def __getitem__(self, key: list[str]) -> void: ... + def __getitem__(self, key: list[str], /) -> void: ... def __setitem__( self, key: str | list[str] | SupportsIndex, value: ArrayLike, + /, ) -> None: ... class character(flexible): # type: ignore @@ -3207,10 +3354,9 @@ newaxis: None @final class ufunc: @property - def __name__(self) -> str: ... + def __name__(self) -> LiteralString: ... @property def __doc__(self) -> str: ... - __call__: Callable[..., Any] @property def nin(self) -> int: ... @property @@ -3220,7 +3366,7 @@ class ufunc: @property def ntypes(self) -> int: ... @property - def types(self) -> list[str]: ... + def types(self) -> list[LiteralString]: ... # Broad return type because it has to encompass things like # # >>> np.logical_and.identity is True @@ -3235,18 +3381,20 @@ class ufunc: def identity(self) -> Any: ... # This is None for ufuncs and a string for gufuncs. @property - def signature(self) -> None | str: ... + def signature(self) -> None | LiteralString: ... + + def __call__(self, *args: Any, **kwargs: Any) -> Any: ... # The next four methods will always exist, but they will just # raise a ValueError ufuncs with that don't accept two input # arguments and return one output argument. Because of that we # can't type them very precisely. - reduce: Any - accumulate: Any - reduceat: Any - outer: Any + def reduce(self, /, *args: Any, **kwargs: Any) -> NoReturn | Any: ... + def accumulate(self, /, *args: Any, **kwargs: Any) -> NoReturn | NDArray[Any]: ... + def reduceat(self, /, *args: Any, **kwargs: Any) -> NoReturn | NDArray[Any]: ... + def outer(self, *args: Any, **kwargs: Any) -> NoReturn | Any: ... # Similarly at won't be defined for ufuncs that return multiple # outputs, so we can't type it very precisely. - at: Any + def at(self, /, *args: Any, **kwargs: Any) -> NoReturn | None: ... # Parameters: `__name__`, `ntypes` and `identity` absolute: _UFunc_Nin1_Nout1[L['absolute'], L[20], None] @@ -3311,7 +3459,7 @@ logical_and: _UFunc_Nin2_Nout1[L['logical_and'], L[20], L[True]] logical_not: _UFunc_Nin1_Nout1[L['logical_not'], L[20], None] logical_or: _UFunc_Nin2_Nout1[L['logical_or'], L[20], L[False]] logical_xor: _UFunc_Nin2_Nout1[L['logical_xor'], L[19], L[False]] -matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None] +matmul: _GUFunc_Nin2_Nout1[L['matmul'], L[19], None, L["(n?,k),(k,m?)->(n?,m?)"]] maximum: _UFunc_Nin2_Nout1[L['maximum'], L[21], None] minimum: _UFunc_Nin2_Nout1[L['minimum'], L[21], None] mod: _UFunc_Nin2_Nout1[L['remainder'], L[16], None] @@ -3340,7 +3488,7 @@ tan: _UFunc_Nin1_Nout1[L['tan'], L[8], None] tanh: _UFunc_Nin1_Nout1[L['tanh'], L[8], None] true_divide: _UFunc_Nin2_Nout1[L['true_divide'], L[11], None] trunc: _UFunc_Nin1_Nout1[L['trunc'], L[7], None] -vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None] +vecdot: _GUFunc_Nin2_Nout1[L['vecdot'], L[19], None, L["(n),(n)->()"]] abs = absolute acos = arccos @@ -3390,8 +3538,12 @@ def _no_nep50_warning() -> Generator[None, None, None]: ... def _get_promotion_state() -> str: ... def _set_promotion_state(state: str, /) -> None: ... -class ndenumerate(Generic[_ScalarType]): - iter: flatiter[NDArray[_ScalarType]] +_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True) + +class ndenumerate(Generic[_ScalarType_co]): + @property + def iter(self) -> flatiter[NDArray[_ScalarType_co]]: ... + @overload def __new__( cls, arr: _FiniteNestedSequence[_SupportsArray[dtype[_ScalarType]]], @@ -3408,7 +3560,20 @@ class ndenumerate(Generic[_ScalarType]): def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[float64]: ... @overload def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[complex128]: ... - def __next__(self: ndenumerate[_ScalarType]) -> tuple[_Shape, _ScalarType]: ... + @overload + def __new__(cls, arr: object) -> ndenumerate[object_]: ... + + # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11) + @overload + def __next__( + self: ndenumerate[np.bool | datetime64 | timedelta64 | number[Any] | flexible], + /, + ) -> tuple[_Shape, _ScalarType_co]: ... + @overload + def __next__(self: ndenumerate[object_], /) -> tuple[_Shape, Any]: ... + @overload + def __next__(self, /) -> tuple[_Shape, _ScalarType_co]: ... + def __iter__(self: _T) -> _T: ... class ndindex: @@ -3491,9 +3656,9 @@ class finfo(Generic[_FloatType]): class iinfo(Generic[_IntType]): dtype: dtype[_IntType] - kind: str + kind: LiteralString bits: int - key: str + key: LiteralString @property def min(self) -> int: ... @property @@ -3506,7 +3671,7 @@ class iinfo(Generic[_IntType]): @overload def __new__(cls, dtype: str) -> iinfo[Any]: ... -_NDIterFlagsKind = L[ +_NDIterFlagsKind: TypeAlias = L[ "buffered", "c_index", "copy_if_overlap", @@ -3522,7 +3687,7 @@ _NDIterFlagsKind = L[ "zerosize_ok", ] -_NDIterOpFlagsKind = L[ +_NDIterOpFlagsKind: TypeAlias = L[ "aligned", "allocate", "arraymask", @@ -3613,14 +3778,14 @@ class nditer: @property def value(self) -> tuple[NDArray[Any], ...]: ... -_MemMapModeKind = L[ +_MemMapModeKind: TypeAlias = L[ "readonly", "r", "copyonwrite", "c", "readwrite", "r+", "write", "w+", ] -class memmap(ndarray[_ShapeType, _DType_co]): +class memmap(ndarray[_ShapeType_co, _DType_co]): __array_priority__: ClassVar[float] filename: str | None offset: int @@ -3658,7 +3823,7 @@ class memmap(ndarray[_ShapeType, _DType_co]): def __array_finalize__(self, obj: object) -> None: ... def __array_wrap__( self, - array: memmap[_ShapeType, _DType_co], + array: memmap[_ShapeType_co, _DType_co], context: None | tuple[ufunc, tuple[Any, ...], int] = ..., return_scalar: builtins.bool = ..., ) -> Any: ... @@ -3669,8 +3834,8 @@ class memmap(ndarray[_ShapeType, _DType_co]): class vectorize: pyfunc: Callable[..., Any] cache: builtins.bool - signature: None | str - otypes: None | str + signature: None | LiteralString + otypes: None | LiteralString excluded: set[int | str] __doc__: None | str def __init__( @@ -3686,7 +3851,7 @@ class vectorize: class poly1d: @property - def variable(self) -> str: ... + def variable(self) -> LiteralString: ... @property def order(self) -> int: ... @property @@ -3718,6 +3883,7 @@ class poly1d: __hash__: ClassVar[None] # type: ignore + # TODO: use `tuple[int]` as shape type once covariant (#26081) @overload def __array__(self, t: None = ..., copy: None | bool = ...) -> NDArray[Any]: ... @overload @@ -3739,19 +3905,19 @@ class poly1d: def __len__(self) -> int: ... def __neg__(self) -> poly1d: ... def __pos__(self) -> poly1d: ... - def __mul__(self, other: ArrayLike) -> poly1d: ... - def __rmul__(self, other: ArrayLike) -> poly1d: ... - def __add__(self, other: ArrayLike) -> poly1d: ... - def __radd__(self, other: ArrayLike) -> poly1d: ... - def __pow__(self, val: _FloatLike_co) -> poly1d: ... # Integral floats are accepted - def __sub__(self, other: ArrayLike) -> poly1d: ... - def __rsub__(self, other: ArrayLike) -> poly1d: ... - def __div__(self, other: ArrayLike) -> poly1d: ... - def __truediv__(self, other: ArrayLike) -> poly1d: ... - def __rdiv__(self, other: ArrayLike) -> poly1d: ... - def __rtruediv__(self, other: ArrayLike) -> poly1d: ... - def __getitem__(self, val: int) -> Any: ... - def __setitem__(self, key: int, val: Any) -> None: ... + def __mul__(self, other: ArrayLike, /) -> poly1d: ... + def __rmul__(self, other: ArrayLike, /) -> poly1d: ... + def __add__(self, other: ArrayLike, /) -> poly1d: ... + def __radd__(self, other: ArrayLike, /) -> poly1d: ... + def __pow__(self, val: _FloatLike_co, /) -> poly1d: ... # Integral floats are accepted + def __sub__(self, other: ArrayLike, /) -> poly1d: ... + def __rsub__(self, other: ArrayLike, /) -> poly1d: ... + def __div__(self, other: ArrayLike, /) -> poly1d: ... + def __truediv__(self, other: ArrayLike, /) -> poly1d: ... + def __rdiv__(self, other: ArrayLike, /) -> poly1d: ... + def __rtruediv__(self, other: ArrayLike, /) -> poly1d: ... + def __getitem__(self, val: int, /) -> Any: ... + def __setitem__(self, key: int, val: Any, /) -> None: ... def __iter__(self) -> Iterator[Any]: ... def deriv(self, m: SupportsInt | SupportsIndex = ...) -> poly1d: ... def integ( @@ -3760,7 +3926,9 @@ class poly1d: k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ..., ) -> poly1d: ... -class matrix(ndarray[_ShapeType, _DType_co]): + + +class matrix(ndarray[_Shape2DType_co, _DType_co]): __array_priority__: ClassVar[float] def __new__( subtype, @@ -3771,30 +3939,38 @@ class matrix(ndarray[_ShapeType, _DType_co]): def __array_finalize__(self, obj: object) -> None: ... @overload - def __getitem__(self, key: ( - SupportsIndex - | _ArrayLikeInt_co - | tuple[SupportsIndex | _ArrayLikeInt_co, ...] - )) -> Any: ... + def __getitem__( + self, + key: ( + SupportsIndex + | _ArrayLikeInt_co + | tuple[SupportsIndex | _ArrayLikeInt_co, ...] + ), + /, + ) -> Any: ... @overload - def __getitem__(self, key: ( - None - | slice - | ellipsis - | SupportsIndex - | _ArrayLikeInt_co - | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] - )) -> matrix[Any, _DType_co]: ... + def __getitem__( + self, + key: ( + None + | slice + | ellipsis + | SupportsIndex + | _ArrayLikeInt_co + | tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...] + ), + /, + ) -> matrix[Any, _DType_co]: ... @overload - def __getitem__(self: NDArray[void], key: str) -> matrix[Any, dtype[Any]]: ... + def __getitem__(self: NDArray[void], key: str, /) -> matrix[Any, dtype[Any]]: ... @overload - def __getitem__(self: NDArray[void], key: list[str]) -> matrix[_ShapeType, dtype[void]]: ... + def __getitem__(self: NDArray[void], key: list[str], /) -> matrix[_Shape2DType_co, dtype[void]]: ... - def __mul__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __rmul__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __imul__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... - def __pow__(self, other: ArrayLike) -> matrix[Any, Any]: ... - def __ipow__(self, other: ArrayLike) -> matrix[_ShapeType, _DType_co]: ... + def __mul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... + def __rmul__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... + def __imul__(self, other: ArrayLike, /) -> matrix[_Shape2DType_co, _DType_co]: ... + def __pow__(self, other: ArrayLike, /) -> matrix[Any, Any]: ... + def __ipow__(self, other: ArrayLike, /) -> matrix[_Shape2DType_co, _DType_co]: ... @overload def sum(self, axis: None = ..., dtype: DTypeLike = ..., out: None = ...) -> Any: ... @@ -3890,14 +4066,14 @@ class matrix(ndarray[_ShapeType, _DType_co]): @property def I(self) -> matrix[Any, Any]: ... @property - def A(self) -> ndarray[_ShapeType, _DType_co]: ... + def A(self) -> ndarray[_Shape2DType_co, _DType_co]: ... @property def A1(self) -> ndarray[Any, _DType_co]: ... @property def H(self) -> matrix[Any, _DType_co]: ... def getT(self) -> matrix[Any, _DType_co]: ... def getI(self) -> matrix[Any, Any]: ... - def getA(self) -> ndarray[_ShapeType, _DType_co]: ... + def getA(self) -> ndarray[_Shape2DType_co, _DType_co]: ... def getA1(self) -> ndarray[Any, _DType_co]: ... def getH(self) -> matrix[Any, _DType_co]: ... @@ -3910,4 +4086,10 @@ _CharDType = TypeVar("_CharDType", dtype[str_], dtype[bytes_]) class _SupportsDLPack(Protocol[_T_contra]): def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ... -def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ... +def from_dlpack( + obj: _SupportsDLPack[None], + /, + *, + device: L["cpu"] | None = ..., + copy: bool | None = ..., +) -> NDArray[Any]: ... diff --git a/numpy/_array_api_info.py b/numpy/_array_api_info.py new file mode 100644 index 000000000000..0167a2fe7985 --- /dev/null +++ b/numpy/_array_api_info.py @@ -0,0 +1,346 @@ +""" +Array API Inspection namespace + +This is the namespace for inspection functions as defined by the array API +standard. See +https://data-apis.org/array-api/latest/API_specification/inspection.html for +more details. + +""" +from numpy._core import ( + dtype, + bool, + intp, + int8, + int16, + int32, + int64, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + complex64, + complex128, +) + + +class __array_namespace_info__: + """ + Get the array API inspection namespace for NumPy. + + The array API inspection namespace defines the following functions: + + - capabilities() + - default_device() + - default_dtypes() + - dtypes() + - devices() + + See + https://data-apis.org/array-api/latest/API_specification/inspection.html + for more details. + + Returns + ------- + info : ModuleType + The array API inspection namespace for NumPy. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + + __module__ = 'numpy' + + def capabilities(self): + """ + Return a dictionary of array API library capabilities. + + The resulting dictionary has the following keys: + + - **"boolean indexing"**: boolean indicating whether an array library + supports boolean indexing. Always ``True`` for NumPy. + + - **"data-dependent shapes"**: boolean indicating whether an array + library supports data-dependent output shapes. Always ``True`` for + NumPy. + + See + https://data-apis.org/array-api/latest/API_specification/generated/array_api.info.capabilities.html + for more details. + + See Also + -------- + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + capabilities : dict + A dictionary of array API library capabilities. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.capabilities() + {'boolean indexing': True, + 'data-dependent shapes': True} + + """ + return { + "boolean indexing": True, + "data-dependent shapes": True, + # 'max rank' will be part of the 2024.12 standard + # "max rank": 64, + } + + def default_device(self): + """ + The default device used for new NumPy arrays. + + For NumPy, this always returns ``'cpu'``. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Returns + ------- + device : str + The default device used for new NumPy arrays. + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_device() + 'cpu' + + """ + return "cpu" + + def default_dtypes(self, *, device=None): + """ + The default data types used for new NumPy arrays. + + For NumPy, this always returns the following dictionary: + + - **"real floating"**: ``numpy.float64`` + - **"complex floating"**: ``numpy.complex128`` + - **"integral"**: ``numpy.intp`` + - **"indexing"**: ``numpy.intp`` + + Parameters + ---------- + device : str, optional + The device to get the default data types for. For NumPy, only + ``'cpu'`` is allowed. + + Returns + ------- + dtypes : dict + A dictionary describing the default data types used for new NumPy + arrays. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.default_dtypes() + {'real floating': numpy.float64, + 'complex floating': numpy.complex128, + 'integral': numpy.int64, + 'indexing': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + return { + "real floating": dtype(float64), + "complex floating": dtype(complex128), + "integral": dtype(intp), + "indexing": dtype(intp), + } + + def dtypes(self, *, device=None, kind=None): + """ + The array API data types supported by NumPy. + + Note that this function only returns data types that are defined by + the array API. + + Parameters + ---------- + device : str, optional + The device to get the data types for. For NumPy, only ``'cpu'`` is + allowed. + kind : str or tuple of str, optional + The kind of data types to return. If ``None``, all data types are + returned. If a string, only data types of that kind are returned. + If a tuple, a dictionary containing the union of the given kinds + is returned. The following kinds are supported: + + - ``'bool'``: boolean data types (i.e., ``bool``). + - ``'signed integer'``: signed integer data types (i.e., ``int8``, + ``int16``, ``int32``, ``int64``). + - ``'unsigned integer'``: unsigned integer data types (i.e., + ``uint8``, ``uint16``, ``uint32``, ``uint64``). + - ``'integral'``: integer data types. Shorthand for ``('signed + integer', 'unsigned integer')``. + - ``'real floating'``: real-valued floating-point data types + (i.e., ``float32``, ``float64``). + - ``'complex floating'``: complex floating-point data types (i.e., + ``complex64``, ``complex128``). + - ``'numeric'``: numeric data types. Shorthand for ``('integral', + 'real floating', 'complex floating')``. + + Returns + ------- + dtypes : dict + A dictionary mapping the names of data types to the corresponding + NumPy data types. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.devices + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.dtypes(kind='signed integer') + {'int8': numpy.int8, + 'int16': numpy.int16, + 'int32': numpy.int32, + 'int64': numpy.int64} + + """ + if device not in ["cpu", None]: + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' + ) + if kind is None: + return { + "bool": dtype(bool), + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "bool": + return {"bool": bool} + if kind == "signed integer": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + } + if kind == "unsigned integer": + return { + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "integral": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + } + if kind == "real floating": + return { + "float32": dtype(float32), + "float64": dtype(float64), + } + if kind == "complex floating": + return { + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if kind == "numeric": + return { + "int8": dtype(int8), + "int16": dtype(int16), + "int32": dtype(int32), + "int64": dtype(int64), + "uint8": dtype(uint8), + "uint16": dtype(uint16), + "uint32": dtype(uint32), + "uint64": dtype(uint64), + "float32": dtype(float32), + "float64": dtype(float64), + "complex64": dtype(complex64), + "complex128": dtype(complex128), + } + if isinstance(kind, tuple): + res = {} + for k in kind: + res.update(self.dtypes(kind=k)) + return res + raise ValueError(f"unsupported kind: {kind!r}") + + def devices(self): + """ + The devices supported by NumPy. + + For NumPy, this always returns ``['cpu']``. + + Returns + ------- + devices : list of str + The devices supported by NumPy. + + See Also + -------- + __array_namespace_info__.capabilities, + __array_namespace_info__.default_device, + __array_namespace_info__.default_dtypes, + __array_namespace_info__.dtypes + + Examples + -------- + >>> info = np.__array_namespace_info__() + >>> info.devices() + ['cpu'] + + """ + return ["cpu"] diff --git a/numpy/_array_api_info.pyi b/numpy/_array_api_info.pyi new file mode 100644 index 000000000000..52b98fc0039b --- /dev/null +++ b/numpy/_array_api_info.pyi @@ -0,0 +1,213 @@ +import sys +from typing import ( + TYPE_CHECKING, + ClassVar, + Literal, + TypeAlias, + TypedDict, + TypeVar, + final, + overload, +) + +import numpy as np + +if sys.version_info >= (3, 11): + from typing import Never +elif TYPE_CHECKING: + from typing_extensions import Never +else: + # `NoReturn` and `Never` are equivalent (but not equal) for type-checkers, + # but are used in different places by convention + from typing import NoReturn as Never + +_Device: TypeAlias = Literal["cpu"] +_DeviceLike: TypeAlias = None | _Device + +_Capabilities = TypedDict( + "_Capabilities", + { + "boolean indexing": Literal[True], + "data-dependent shapes": Literal[True], + }, +) + +_DefaultDTypes = TypedDict( + "_DefaultDTypes", + { + "real floating": np.dtype[np.float64], + "complex floating": np.dtype[np.complex128], + "integral": np.dtype[np.intp], + "indexing": np.dtype[np.intp], + }, +) + + +_KindBool: TypeAlias = Literal["bool"] +_KindInt: TypeAlias = Literal["signed integer"] +_KindUInt: TypeAlias = Literal["unsigned integer"] +_KindInteger: TypeAlias = Literal["integral"] +_KindFloat: TypeAlias = Literal["real floating"] +_KindComplex: TypeAlias = Literal["complex floating"] +_KindNumber: TypeAlias = Literal["numeric"] +_Kind: TypeAlias = ( + _KindBool + | _KindInt + | _KindUInt + | _KindInteger + | _KindFloat + | _KindComplex + | _KindNumber +) + + +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_Permute1: TypeAlias = _T1 | tuple[_T1] +_Permute2: TypeAlias = tuple[_T1, _T2] | tuple[_T2, _T1] +_Permute3: TypeAlias = ( + tuple[_T1, _T2, _T3] | tuple[_T1, _T3, _T2] + | tuple[_T2, _T1, _T3] | tuple[_T2, _T3, _T1] + | tuple[_T3, _T1, _T2] | tuple[_T3, _T2, _T1] +) + +class _DTypesBool(TypedDict): + bool: np.dtype[np.bool] + +class _DTypesInt(TypedDict): + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + +class _DTypesUInt(TypedDict): + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + +class _DTypesInteger(_DTypesInt, _DTypesUInt): + ... + +class _DTypesFloat(TypedDict): + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + +class _DTypesComplex(TypedDict): + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +class _DTypesNumber(_DTypesInteger, _DTypesFloat, _DTypesComplex): + ... + +class _DTypes(_DTypesBool, _DTypesNumber): + ... + +class _DTypesUnion(TypedDict, total=False): + bool: np.dtype[np.bool] + int8: np.dtype[np.int8] + int16: np.dtype[np.int16] + int32: np.dtype[np.int32] + int64: np.dtype[np.int64] + uint8: np.dtype[np.uint8] + uint16: np.dtype[np.uint16] + uint32: np.dtype[np.uint32] + uint64: np.dtype[np.uint64] + float32: np.dtype[np.float32] + float64: np.dtype[np.float64] + complex64: np.dtype[np.complex64] + complex128: np.dtype[np.complex128] + +_EmptyDict: TypeAlias = dict[Never, Never] + + +@final +class __array_namespace_info__: + __module__: ClassVar[Literal['numpy']] + + def capabilities(self) -> _Capabilities: ... + def default_device(self) -> _Device: ... + def default_dtypes( + self, + *, + device: _DeviceLike = ..., + ) -> _DefaultDTypes: ... + def devices(self) -> list[_Device]: ... + + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: None = ..., + ) -> _DTypes: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindBool], + ) -> _DTypesBool: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindInt], + ) -> _DTypesInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindUInt], + ) -> _DTypesUInt: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindFloat], + ) -> _DTypesFloat: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: _Permute1[_KindComplex], + ) -> _DTypesComplex: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindInteger] + | _Permute2[_KindInt, _KindUInt] + ), + ) -> _DTypesInteger: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: ( + _Permute1[_KindNumber] + | _Permute3[_KindInteger, _KindFloat, _KindComplex] + ), + ) -> _DTypesNumber: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[()], + ) -> _EmptyDict: ... + @overload + def dtypes( + self, + *, + device: _DeviceLike = ..., + kind: tuple[_Kind, ...], + ) -> _DTypesUnion: ... diff --git a/numpy/_build_utils/gitversion.py b/numpy/_build_utils/gitversion.py index 4ee6e00bbd65..defc704c41eb 100644 --- a/numpy/_build_utils/gitversion.py +++ b/numpy/_build_utils/gitversion.py @@ -70,6 +70,9 @@ def git_version(version): # For NumPy 2.0, this should only have one field: `version` template = textwrap.dedent(f''' + """ + Module to expose more detailed version info for the installed `numpy` + """ version = "{version}" __version__ = version full_version = version diff --git a/numpy/_build_utils/tempita.py b/numpy/_build_utils/tempita.py index 0743b892436b..32e400f9c907 100644 --- a/numpy/_build_utils/tempita.py +++ b/numpy/_build_utils/tempita.py @@ -3,9 +3,7 @@ import os import argparse -from Cython import Tempita as tempita - -# XXX: If this import ever fails (does it really?), vendor cython.tempita +import tempita def process_tempita(fromfile, outfile=None): diff --git a/numpy/_build_utils/tempita/LICENSE.txt b/numpy/_build_utils/tempita/LICENSE.txt new file mode 100644 index 000000000000..0ba6f23c440f --- /dev/null +++ b/numpy/_build_utils/tempita/LICENSE.txt @@ -0,0 +1,20 @@ +Copyright (c) 2008 Ian Bicking and Contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/numpy/_build_utils/tempita/__init__.py b/numpy/_build_utils/tempita/__init__.py new file mode 100644 index 000000000000..41a0ce3d0efa --- /dev/null +++ b/numpy/_build_utils/tempita/__init__.py @@ -0,0 +1,4 @@ +# The original Tempita implements all of its templating code here. +# Moved it to _tempita.py to make the compilation portable. + +from ._tempita import * diff --git a/numpy/_build_utils/tempita/_looper.py b/numpy/_build_utils/tempita/_looper.py new file mode 100644 index 000000000000..4864f2949605 --- /dev/null +++ b/numpy/_build_utils/tempita/_looper.py @@ -0,0 +1,156 @@ +""" +Helper for looping over sequences, particular in templates. + +Often in a loop in a template it's handy to know what's next up, +previously up, if this is the first or last item in the sequence, etc. +These can be awkward to manage in a normal Python loop, but using the +looper you can get a better sense of the context. Use like:: + + >>> for loop, item in looper(['a', 'b', 'c']): + ... print loop.number, item + ... if not loop.last: + ... print '---' + 1 a + --- + 2 b + --- + 3 c + +""" + +basestring_ = (bytes, str) + +__all__ = ['looper'] + + +class looper: + """ + Helper for looping (particularly in templates) + + Use this like:: + + for loop, item in looper(seq): + if loop.first: + ... + """ + + def __init__(self, seq): + self.seq = seq + + def __iter__(self): + return looper_iter(self.seq) + + def __repr__(self): + return '<%s for %r>' % ( + self.__class__.__name__, self.seq) + + +class looper_iter: + + def __init__(self, seq): + self.seq = list(seq) + self.pos = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.pos >= len(self.seq): + raise StopIteration + result = loop_pos(self.seq, self.pos), self.seq[self.pos] + self.pos += 1 + return result + + +class loop_pos: + + def __init__(self, seq, pos): + self.seq = seq + self.pos = pos + + def __repr__(self): + return '' % ( + self.seq[self.pos], self.pos) + + def index(self): + return self.pos + index = property(index) + + def number(self): + return self.pos + 1 + number = property(number) + + def item(self): + return self.seq[self.pos] + item = property(item) + + def __next__(self): + try: + return self.seq[self.pos + 1] + except IndexError: + return None + __next__ = property(__next__) + + def previous(self): + if self.pos == 0: + return None + return self.seq[self.pos - 1] + previous = property(previous) + + def odd(self): + return not self.pos % 2 + odd = property(odd) + + def even(self): + return self.pos % 2 + even = property(even) + + def first(self): + return self.pos == 0 + first = property(first) + + def last(self): + return self.pos == len(self.seq) - 1 + last = property(last) + + def length(self): + return len(self.seq) + length = property(length) + + def first_group(self, getter=None): + """ + Returns true if this item is the start of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.first: + return True + return self._compare_group(self.item, self.previous, getter) + + def last_group(self, getter=None): + """ + Returns true if this item is the end of a new group, + where groups mean that some attribute has changed. The getter + can be None (the item itself changes), an attribute name like + ``'.attr'``, a function, or a dict key or list index. + """ + if self.last: + return True + return self._compare_group(self.item, self.__next__, getter) + + def _compare_group(self, item, other, getter): + if getter is None: + return item != other + elif (isinstance(getter, basestring_) + and getter.startswith('.')): + getter = getter[1:] + if getter.endswith('()'): + getter = getter[:-2] + return getattr(item, getter)() != getattr(other, getter)() + else: + return getattr(item, getter) != getattr(other, getter) + elif hasattr(getter, '__call__'): + return getter(item) != getter(other) + else: + return item[getter] != other[getter] diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py new file mode 100644 index 000000000000..c30b6547ade6 --- /dev/null +++ b/numpy/_build_utils/tempita/_tempita.py @@ -0,0 +1,1132 @@ +""" +A small templating language + +This implements a small templating language. This language implements +if/elif/else, for/continue/break, expressions, and blocks of Python +code. The syntax is:: + + {{any expression (function calls etc)}} + {{any expression | filter}} + {{for x in y}}...{{endfor}} + {{if x}}x{{elif y}}y{{else}}z{{endif}} + {{py:x=1}} + {{py: + def foo(bar): + return 'baz' + }} + {{default var = default_value}} + {{# comment}} + +You use this with the ``Template`` class or the ``sub`` shortcut. +The ``Template`` class takes the template string and the name of +the template (for errors) and a default namespace. Then (like +``string.Template``) you can call the ``tmpl.substitute(**kw)`` +method to make a substitution (or ``tmpl.substitute(a_dict)``). + +``sub(content, **kw)`` substitutes the template immediately. You +can use ``__name='tmpl.html'`` to set the name of the template. + +If there are syntax errors ``TemplateError`` will be raised. +""" + +import re +import sys +import os +import tokenize +from io import StringIO + +from ._looper import looper + +__all__ = ["TemplateError", "Template", "sub", "bunch"] + +in_re = re.compile(r"\s+in\s+") +var_re = re.compile(r"^[a-z_][a-z0-9_]*$", re.I) +basestring_ = (bytes, str) + + +def coerce_text(v): + if not isinstance(v, basestring_): + if hasattr(v, "__str__"): + return str(v) + else: + return bytes(v) + return v + + +class TemplateError(Exception): + """Exception raised while parsing a template""" + + def __init__(self, message, position, name=None): + Exception.__init__(self, message) + self.position = position + self.name = name + + def __str__(self): + msg = " ".join(self.args) + if self.position: + msg = "%s at line %s column %s" % (msg, self.position[0], self.position[1]) + if self.name: + msg += " in %s" % self.name + return msg + + +class _TemplateContinue(Exception): + pass + + +class _TemplateBreak(Exception): + pass + + +def get_file_template(name, from_template): + path = os.path.join(os.path.dirname(from_template.name), name) + return from_template.__class__.from_filename( + path, namespace=from_template.namespace, get_template=from_template.get_template + ) + + +class Template: + default_namespace = { + "start_braces": "{{", + "end_braces": "}}", + "looper": looper, + } + + default_encoding = "utf8" + default_inherit = None + + def __init__( + self, + content, + name=None, + namespace=None, + stacklevel=None, + get_template=None, + default_inherit=None, + line_offset=0, + delimiters=None, + ): + self.content = content + + # set delimiters + if delimiters is None: + delimiters = ( + self.default_namespace["start_braces"], + self.default_namespace["end_braces"], + ) + else: + # assert len(delimiters) == 2 and all([isinstance(delimiter, basestring) + # for delimiter in delimiters]) + self.default_namespace = self.__class__.default_namespace.copy() + self.default_namespace["start_braces"] = delimiters[0] + self.default_namespace["end_braces"] = delimiters[1] + self.delimiters = delimiters + + self._unicode = isinstance(content, str) + if name is None and stacklevel is not None: + try: + caller = sys._getframe(stacklevel) + except ValueError: + pass + else: + globals = caller.f_globals + lineno = caller.f_lineno + if "__file__" in globals: + name = globals["__file__"] + if name.endswith(".pyc") or name.endswith(".pyo"): + name = name[:-1] + elif "__name__" in globals: + name = globals["__name__"] + else: + name = "" + if lineno: + name += ":%s" % lineno + self.name = name + self._parsed = parse( + content, name=name, line_offset=line_offset, delimiters=self.delimiters + ) + if namespace is None: + namespace = {} + self.namespace = namespace + self.get_template = get_template + if default_inherit is not None: + self.default_inherit = default_inherit + + def from_filename( + cls, + filename, + namespace=None, + encoding=None, + default_inherit=None, + get_template=get_file_template, + ): + with open(filename, "rb") as f: + c = f.read() + if encoding: + c = c.decode(encoding) + return cls( + content=c, + name=filename, + namespace=namespace, + default_inherit=default_inherit, + get_template=get_template, + ) + + from_filename = classmethod(from_filename) + + def __repr__(self): + return "<%s %s name=%r>" % ( + self.__class__.__name__, + hex(id(self))[2:], + self.name, + ) + + def substitute(self, *args, **kw): + if args: + if kw: + raise TypeError("You can only give positional *or* keyword arguments") + if len(args) > 1: + raise TypeError("You can only give one positional argument") + if not hasattr(args[0], "items"): + raise TypeError( + "If you pass in a single argument, you must pass in a " + "dictionary-like object (with a .items() method); you gave %r" + % (args[0],) + ) + kw = args[0] + ns = kw + ns["__template_name__"] = self.name + if self.namespace: + ns.update(self.namespace) + result, defs, inherit = self._interpret(ns) + if not inherit: + inherit = self.default_inherit + if inherit: + result = self._interpret_inherit(result, defs, inherit, ns) + return result + + def _interpret(self, ns): + __traceback_hide__ = True + parts = [] + defs = {} + self._interpret_codes(self._parsed, ns, out=parts, defs=defs) + if "__inherit__" in defs: + inherit = defs.pop("__inherit__") + else: + inherit = None + return "".join(parts), defs, inherit + + def _interpret_inherit(self, body, defs, inherit_template, ns): + __traceback_hide__ = True + if not self.get_template: + raise TemplateError( + "You cannot use inheritance without passing in get_template", + position=None, + name=self.name, + ) + templ = self.get_template(inherit_template, self) + self_ = TemplateObject(self.name) + for name, value in defs.items(): + setattr(self_, name, value) + self_.body = body + ns = ns.copy() + ns["self"] = self_ + return templ.substitute(ns) + + def _interpret_codes(self, codes, ns, out, defs): + __traceback_hide__ = True + for item in codes: + if isinstance(item, basestring_): + out.append(item) + else: + self._interpret_code(item, ns, out, defs) + + def _interpret_code(self, code, ns, out, defs): + __traceback_hide__ = True + name, pos = code[0], code[1] + if name == "py": + self._exec(code[2], ns, pos) + elif name == "continue": + raise _TemplateContinue() + elif name == "break": + raise _TemplateBreak() + elif name == "for": + vars, expr, content = code[2], code[3], code[4] + expr = self._eval(expr, ns, pos) + self._interpret_for(vars, expr, content, ns, out, defs) + elif name == "cond": + parts = code[2:] + self._interpret_if(parts, ns, out, defs) + elif name == "expr": + parts = code[2].split("|") + base = self._eval(parts[0], ns, pos) + for part in parts[1:]: + func = self._eval(part, ns, pos) + base = func(base) + out.append(self._repr(base, pos)) + elif name == "default": + var, expr = code[2], code[3] + if var not in ns: + result = self._eval(expr, ns, pos) + ns[var] = result + elif name == "inherit": + expr = code[2] + value = self._eval(expr, ns, pos) + defs["__inherit__"] = value + elif name == "def": + name = code[2] + signature = code[3] + parts = code[4] + ns[name] = defs[name] = TemplateDef( + self, name, signature, body=parts, ns=ns, pos=pos + ) + elif name == "comment": + return + else: + assert 0, "Unknown code: %r" % name + + def _interpret_for(self, vars, expr, content, ns, out, defs): + __traceback_hide__ = True + for item in expr: + if len(vars) == 1: + ns[vars[0]] = item + else: + if len(vars) != len(item): + raise ValueError( + "Need %i items to unpack (got %i items)" + % (len(vars), len(item)) + ) + for name, value in zip(vars, item): + ns[name] = value + try: + self._interpret_codes(content, ns, out, defs) + except _TemplateContinue: + continue + except _TemplateBreak: + break + + def _interpret_if(self, parts, ns, out, defs): + __traceback_hide__ = True + # @@: if/else/else gets through + for part in parts: + assert not isinstance(part, basestring_) + name, pos = part[0], part[1] + if name == "else": + result = True + else: + result = self._eval(part[2], ns, pos) + if result: + self._interpret_codes(part[3], ns, out, defs) + break + + def _eval(self, code, ns, pos): + __traceback_hide__ = True + try: + try: + value = eval(code, self.default_namespace, ns) + except SyntaxError as e: + raise SyntaxError("invalid syntax in expression: %s" % code) + return value + except Exception as e: + if getattr(e, "args", None): + arg0 = e.args[0] + else: + arg0 = coerce_text(e) + e.args = (self._add_line_info(arg0, pos),) + raise + + def _exec(self, code, ns, pos): + __traceback_hide__ = True + try: + exec(code, self.default_namespace, ns) + except Exception as e: + if e.args: + e.args = (self._add_line_info(e.args[0], pos),) + else: + e.args = (self._add_line_info(None, pos),) + raise + + def _repr(self, value, pos): + __traceback_hide__ = True + try: + if value is None: + return "" + if self._unicode: + try: + value = str(value) + except UnicodeDecodeError: + value = bytes(value) + else: + if not isinstance(value, basestring_): + value = coerce_text(value) + if isinstance(value, str) and self.default_encoding: + value = value.encode(self.default_encoding) + except Exception as e: + e.args = (self._add_line_info(e.args[0], pos),) + raise + else: + if self._unicode and isinstance(value, bytes): + if not self.default_encoding: + raise UnicodeDecodeError( + "Cannot decode bytes value %r into unicode " + "(no default_encoding provided)" % value + ) + try: + value = value.decode(self.default_encoding) + except UnicodeDecodeError as e: + raise UnicodeDecodeError( + e.encoding, + e.object, + e.start, + e.end, + e.reason + " in string %r" % value, + ) + elif not self._unicode and isinstance(value, str): + if not self.default_encoding: + raise UnicodeEncodeError( + "Cannot encode unicode value %r into bytes " + "(no default_encoding provided)" % value + ) + value = value.encode(self.default_encoding) + return value + + def _add_line_info(self, msg, pos): + msg = "%s at line %s column %s" % (msg, pos[0], pos[1]) + if self.name: + msg += " in file %s" % self.name + return msg + + +def sub(content, delimiters=None, **kw): + name = kw.get("__name") + tmpl = Template(content, name=name, delimiters=delimiters) + return tmpl.substitute(kw) + + +def paste_script_template_renderer(content, vars, filename=None): + tmpl = Template(content, name=filename) + return tmpl.substitute(vars) + + +class bunch(dict): + def __init__(self, **kw): + for name, value in kw.items(): + setattr(self, name, value) + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + raise AttributeError(name) + + def __getitem__(self, key): + if "default" in self: + try: + return dict.__getitem__(self, key) + except KeyError: + return dict.__getitem__(self, "default") + else: + return dict.__getitem__(self, key) + + def __repr__(self): + return "<%s %s>" % ( + self.__class__.__name__, + " ".join(["%s=%r" % (k, v) for k, v in sorted(self.items())]), + ) + + +class TemplateDef: + def __init__( + self, template, func_name, func_signature, body, ns, pos, bound_self=None + ): + self._template = template + self._func_name = func_name + self._func_signature = func_signature + self._body = body + self._ns = ns + self._pos = pos + self._bound_self = bound_self + + def __repr__(self): + return "" % ( + self._func_name, + self._func_signature, + self._template.name, + self._pos, + ) + + def __str__(self): + return self() + + def __call__(self, *args, **kw): + values = self._parse_signature(args, kw) + ns = self._ns.copy() + ns.update(values) + if self._bound_self is not None: + ns["self"] = self._bound_self + out = [] + subdefs = {} + self._template._interpret_codes(self._body, ns, out, subdefs) + return "".join(out) + + def __get__(self, obj, type=None): + if obj is None: + return self + return self.__class__( + self._template, + self._func_name, + self._func_signature, + self._body, + self._ns, + self._pos, + bound_self=obj, + ) + + def _parse_signature(self, args, kw): + values = {} + sig_args, var_args, var_kw, defaults = self._func_signature + extra_kw = {} + for name, value in kw.items(): + if not var_kw and name not in sig_args: + raise TypeError("Unexpected argument %s" % name) + if name in sig_args: + values[sig_args] = value + else: + extra_kw[name] = value + args = list(args) + sig_args = list(sig_args) + while args: + while sig_args and sig_args[0] in values: + sig_args.pop(0) + if sig_args: + name = sig_args.pop(0) + values[name] = args.pop(0) + elif var_args: + values[var_args] = tuple(args) + break + else: + raise TypeError( + "Extra position arguments: %s" % ", ".join([repr(v) for v in args]) + ) + for name, value_expr in defaults.items(): + if name not in values: + values[name] = self._template._eval(value_expr, self._ns, self._pos) + for name in sig_args: + if name not in values: + raise TypeError("Missing argument: %s" % name) + if var_kw: + values[var_kw] = extra_kw + return values + + +class TemplateObject: + def __init__(self, name): + self.__name = name + self.get = TemplateObjectGetter(self) + + def __repr__(self): + return "<%s %s>" % (self.__class__.__name__, self.__name) + + +class TemplateObjectGetter: + def __init__(self, template_obj): + self.__template_obj = template_obj + + def __getattr__(self, attr): + return getattr(self.__template_obj, attr, Empty) + + def __repr__(self): + return "<%s around %r>" % (self.__class__.__name__, self.__template_obj) + + +class _Empty: + def __call__(self, *args, **kw): + return self + + def __str__(self): + return "" + + def __repr__(self): + return "Empty" + + def __unicode__(self): + return "" + + def __iter__(self): + return iter(()) + + def __bool__(self): + return False + + +Empty = _Empty() +del _Empty + +############################################################ +## Lexing and Parsing +############################################################ + + +def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None): + """ + Lex a string into chunks: + + >>> lex('hey') + ['hey'] + >>> lex('hey {{you}}') + ['hey ', ('you', (1, 7))] + >>> lex('hey {{') + Traceback (most recent call last): + ... + TemplateError: No }} to finish last expression at line 1 column 7 + >>> lex('hey }}') + Traceback (most recent call last): + ... + TemplateError: }} outside expression at line 1 column 7 + >>> lex('hey {{ {{') + Traceback (most recent call last): + ... + TemplateError: {{ inside expression at line 1 column 10 + + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + in_expr = False + chunks = [] + last = 0 + last_pos = (line_offset + 1, 1) + + token_re = re.compile( + r"%s|%s" % (re.escape(delimiters[0]), re.escape(delimiters[1])) + ) + for match in token_re.finditer(s): + expr = match.group(0) + pos = find_position(s, match.end(), last, last_pos) + if expr == delimiters[0] and in_expr: + raise TemplateError( + "%s inside expression" % delimiters[0], position=pos, name=name + ) + elif expr == delimiters[1] and not in_expr: + raise TemplateError( + "%s outside expression" % delimiters[1], position=pos, name=name + ) + if expr == delimiters[0]: + part = s[last:match.start()] + if part: + chunks.append(part) + in_expr = True + else: + chunks.append((s[last: match.start()], last_pos)) + in_expr = False + last = match.end() + last_pos = pos + if in_expr: + raise TemplateError( + "No %s to finish last expression" % delimiters[1], + name=name, + position=last_pos, + ) + part = s[last:] + if part: + chunks.append(part) + if trim_whitespace: + chunks = trim_lex(chunks) + return chunks + + +statement_re = re.compile(r"^(?:if |elif |for |def |inherit |default |py:)") +single_statements = ["else", "endif", "endfor", "enddef", "continue", "break"] +trail_whitespace_re = re.compile(r"\n\r?[\t ]*$") +lead_whitespace_re = re.compile(r"^[\t ]*\n") + + +def trim_lex(tokens): + r""" + Takes a lexed set of tokens, and removes whitespace when there is + a directive on a line by itself: + + >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) + >>> tokens + [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] + >>> trim_lex(tokens) + [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] + """ + last_trim = None + for i, current in enumerate(tokens): + if isinstance(current, basestring_): + # we don't trim this + continue + item = current[0] + if not statement_re.search(item) and item not in single_statements: + continue + if not i: + prev = "" + else: + prev = tokens[i - 1] + if i + 1 >= len(tokens): + next_chunk = "" + else: + next_chunk = tokens[i + 1] + if not isinstance(next_chunk, basestring_) or not isinstance(prev, basestring_): + continue + prev_ok = not prev or trail_whitespace_re.search(prev) + if i == 1 and not prev.strip(): + prev_ok = True + if last_trim is not None and last_trim + 2 == i and not prev.strip(): + prev_ok = "last" + if prev_ok and ( + not next_chunk + or lead_whitespace_re.search(next_chunk) + or (i == len(tokens) - 2 and not next_chunk.strip()) + ): + if prev: + if (i == 1 and not prev.strip()) or prev_ok == "last": + tokens[i - 1] = "" + else: + m = trail_whitespace_re.search(prev) + # +1 to leave the leading \n on: + prev = prev[: m.start() + 1] + tokens[i - 1] = prev + if next_chunk: + last_trim = i + if i == len(tokens) - 2 and not next_chunk.strip(): + tokens[i + 1] = "" + else: + m = lead_whitespace_re.search(next_chunk) + next_chunk = next_chunk[m.end():] + tokens[i + 1] = next_chunk + return tokens + + +def find_position(string, index, last_index, last_pos): + """Given a string and index, return (line, column)""" + lines = string.count("\n", last_index, index) + if lines > 0: + column = index - string.rfind("\n", last_index, index) + else: + column = last_pos[1] + (index - last_index) + return (last_pos[0] + lines, column) + + +def parse(s, name=None, line_offset=0, delimiters=None): + r""" + Parses a string into a kind of AST + + >>> parse('{{x}}') + [('expr', (1, 3), 'x')] + >>> parse('foo') + ['foo'] + >>> parse('{{if x}}test{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] + >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') + ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] + >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') + [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] + >>> parse('{{py:x=1}}') + [('py', (1, 3), 'x=1')] + >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') + [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] # noqa: E501 + + Some exceptions:: + + >>> parse('{{continue}}') + Traceback (most recent call last): + ... + TemplateError: continue outside of for loop at line 1 column 3 + >>> parse('{{if x}}foo') + Traceback (most recent call last): + ... + TemplateError: No {{endif}} at line 1 column 3 + >>> parse('{{else}}') + Traceback (most recent call last): + ... + TemplateError: else outside of an if block at line 1 column 3 + >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Unexpected endif at line 1 column 25 + >>> parse('{{if}}{{endif}}') + Traceback (most recent call last): + ... + TemplateError: if with no expression at line 1 column 3 + >>> parse('{{for x y}}{{endfor}}') + Traceback (most recent call last): + ... + TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 + >>> parse('{{py:x=1\ny=2}}') + Traceback (most recent call last): + ... + TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 + """ + if delimiters is None: + delimiters = ( + Template.default_namespace["start_braces"], + Template.default_namespace["end_braces"], + ) + tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters) + result = [] + while tokens: + next_chunk, tokens = parse_expr(tokens, name) + result.append(next_chunk) + return result + + +def parse_expr(tokens, name, context=()): + if isinstance(tokens[0], basestring_): + return tokens[0], tokens[1:] + expr, pos = tokens[0] + expr = expr.strip() + if expr.startswith("py:"): + expr = expr[3:].lstrip(" \t") + if expr.startswith("\n") or expr.startswith("\r"): + expr = expr.lstrip("\r\n") + if "\r" in expr: + expr = expr.replace("\r\n", "\n") + expr = expr.replace("\r", "") + expr += "\n" + else: + if "\n" in expr: + raise TemplateError( + "Multi-line py blocks must start with a newline", + position=pos, + name=name, + ) + return ("py", pos, expr), tokens[1:] + elif expr in ("continue", "break"): + if "for" not in context: + raise TemplateError("continue outside of for loop", position=pos, name=name) + return (expr, pos), tokens[1:] + elif expr.startswith("if "): + return parse_cond(tokens, name, context) + elif expr.startswith("elif ") or expr == "else": + raise TemplateError( + "%s outside of an if block" % expr.split()[0], position=pos, name=name + ) + elif expr in ("if", "elif", "for"): + raise TemplateError("%s with no expression" % expr, position=pos, name=name) + elif expr in ("endif", "endfor", "enddef"): + raise TemplateError("Unexpected %s" % expr, position=pos, name=name) + elif expr.startswith("for "): + return parse_for(tokens, name, context) + elif expr.startswith("default "): + return parse_default(tokens, name, context) + elif expr.startswith("inherit "): + return parse_inherit(tokens, name, context) + elif expr.startswith("def "): + return parse_def(tokens, name, context) + elif expr.startswith("#"): + return ("comment", pos, tokens[0][0]), tokens[1:] + return ("expr", pos, tokens[0][0]), tokens[1:] + + +def parse_cond(tokens, name, context): + start = tokens[0][1] + pieces = [] + context = context + ("if",) + while 1: + if not tokens: + raise TemplateError("Missing {{endif}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endif": + return ("cond", start) + tuple(pieces), tokens[1:] + next_chunk, tokens = parse_one_cond(tokens, name, context) + pieces.append(next_chunk) + + +def parse_one_cond(tokens, name, context): + (first, pos), tokens = tokens[0], tokens[1:] + content = [] + if first.endswith(":"): + first = first[:-1] + if first.startswith("if "): + part = ("if", pos, first[3:].lstrip(), content) + elif first.startswith("elif "): + part = ("elif", pos, first[5:].lstrip(), content) + elif first == "else": + part = ("else", pos, None, content) + else: + assert 0, "Unexpected token %r at %s" % (first, pos) + while 1: + if not tokens: + raise TemplateError("No {{endif}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and ( + tokens[0][0] == "endif" + or tokens[0][0].startswith("elif ") + or tokens[0][0] == "else" + ): + return part, tokens + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_for(tokens, name, context): + first, pos = tokens[0] + tokens = tokens[1:] + context = ("for",) + context + content = [] + assert first.startswith("for "), first + if first.endswith(":"): + first = first[:-1] + first = first[3:].strip() + match = in_re.search(first) + if not match: + raise TemplateError('Bad for (no "in") in %r' % first, position=pos, name=name) + vars = first[: match.start()] + if "(" in vars: + raise TemplateError( + "You cannot have () in the variable section of a for loop (%r)" % vars, + position=pos, + name=name, + ) + vars = tuple([v.strip() for v in first[: match.start()].split(",") if v.strip()]) + expr = first[match.end():] + while 1: + if not tokens: + raise TemplateError("No {{endfor}}", position=pos, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "endfor": + return ("for", pos, vars, expr, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_default(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("default ") + first = first.split(None, 1)[1] + parts = first.split("=", 1) + if len(parts) == 1: + raise TemplateError( + "Expression must be {{default var=value}}; no = found in %r" % first, + position=pos, + name=name, + ) + var = parts[0].strip() + if "," in var: + raise TemplateError( + "{{default x, y = ...}} is not supported", position=pos, name=name + ) + if not var_re.search(var): + raise TemplateError( + "Not a valid variable name for {{default}}: %r" % var, + position=pos, + name=name, + ) + expr = parts[1].strip() + return ("default", pos, var, expr), tokens[1:] + + +def parse_inherit(tokens, name, context): + first, pos = tokens[0] + assert first.startswith("inherit ") + expr = first.split(None, 1)[1] + return ("inherit", pos, expr), tokens[1:] + + +def parse_def(tokens, name, context): + first, start = tokens[0] + tokens = tokens[1:] + assert first.startswith("def ") + first = first.split(None, 1)[1] + if first.endswith(":"): + first = first[:-1] + if "(" not in first: + func_name = first + sig = ((), None, None, {}) + elif not first.endswith(")"): + raise TemplateError( + "Function definition doesn't end with ): %s" % first, + position=start, + name=name, + ) + else: + first = first[:-1] + func_name, sig_text = first.split("(", 1) + sig = parse_signature(sig_text, name, start) + context = context + ("def",) + content = [] + while 1: + if not tokens: + raise TemplateError("Missing {{enddef}}", position=start, name=name) + if isinstance(tokens[0], tuple) and tokens[0][0] == "enddef": + return ("def", start, func_name, sig, content), tokens[1:] + next_chunk, tokens = parse_expr(tokens, name, context) + content.append(next_chunk) + + +def parse_signature(sig_text, name, pos): + tokens = tokenize.generate_tokens(StringIO(sig_text).readline) + sig_args = [] + var_arg = None + var_kw = None + defaults = {} + + def get_token(pos=False): + try: + tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) + except StopIteration: + return tokenize.ENDMARKER, "" + if pos: + return tok_type, tok_string, (srow, scol), (erow, ecol) + else: + return tok_type, tok_string + + while 1: + var_arg_type = None + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER: + break + if tok_type == tokenize.OP and (tok_string == "*" or tok_string == "**"): + var_arg_type = tok_string + tok_type, tok_string = get_token() + if tok_type != tokenize.NAME: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + var_name = tok_string + tok_type, tok_string = get_token() + if tok_type == tokenize.ENDMARKER or ( + tok_type == tokenize.OP and tok_string == "," + ): + if var_arg_type == "*": + var_arg = var_name + elif var_arg_type == "**": + var_kw = var_name + else: + sig_args.append(var_name) + if tok_type == tokenize.ENDMARKER: + break + continue + if var_arg_type is not None: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if tok_type == tokenize.OP and tok_string == "=": + nest_type = None + unnest_type = None + nest_count = 0 + start_pos = end_pos = None + parts = [] + while 1: + tok_type, tok_string, s, e = get_token(True) + if start_pos is None: + start_pos = s + end_pos = e + if tok_type == tokenize.ENDMARKER and nest_count: + raise TemplateError( + "Invalid signature: (%s)" % sig_text, position=pos, name=name + ) + if not nest_count and ( + tok_type == tokenize.ENDMARKER + or (tok_type == tokenize.OP and tok_string == ",") + ): + default_expr = isolate_expression(sig_text, start_pos, end_pos) + defaults[var_name] = default_expr + sig_args.append(var_name) + break + parts.append((tok_type, tok_string)) + if nest_count and tok_type == tokenize.OP and tok_string == nest_type: + nest_count += 1 + elif ( + nest_count and tok_type == tokenize.OP and tok_string == unnest_type + ): + nest_count -= 1 + if not nest_count: + nest_type = unnest_type = None + elif ( + not nest_count + and tok_type == tokenize.OP + and tok_string in ("(", "[", "{") + ): + nest_type = tok_string + nest_count = 1 + unnest_type = {"(": ")", "[": "]", "{": "}"}[nest_type] + return sig_args, var_arg, var_kw, defaults + + +def isolate_expression(string, start_pos, end_pos): + srow, scol = start_pos + srow -= 1 + erow, ecol = end_pos + erow -= 1 + lines = string.splitlines(True) + if srow == erow: + return lines[srow][scol:ecol] + parts = [lines[srow][scol:]] + parts.extend(lines[srow + 1:erow]) + if erow < len(lines): + # It'll sometimes give (end_row_past_finish, 0) + parts.append(lines[erow][:ecol]) + return "".join(parts) + + +_fill_command_usage = """\ +%prog [OPTIONS] TEMPLATE arg=value + +Use py:arg=value to set a Python value; otherwise all values are +strings. +""" + + +def fill_command(args=None): + import sys + import optparse + import pkg_resources + import os + + if args is None: + args = sys.argv[1:] + dist = pkg_resources.get_distribution("Paste") + parser = optparse.OptionParser(version=coerce_text(dist), usage=_fill_command_usage) + parser.add_option( + "-o", + "--output", + dest="output", + metavar="FILENAME", + help="File to write output to (default stdout)", + ) + parser.add_option( + "--env", + dest="use_env", + action="store_true", + help="Put the environment in as top-level variables", + ) + options, args = parser.parse_args(args) + if len(args) < 1: + print("You must give a template filename") + sys.exit(2) + template_name = args[0] + args = args[1:] + vars = {} + if options.use_env: + vars.update(os.environ) + for value in args: + if "=" not in value: + print("Bad argument: %r" % value) + sys.exit(2) + name, value = value.split("=", 1) + if name.startswith("py:"): + name = name[:3] + value = eval(value) + vars[name] = value + if template_name == "-": + template_content = sys.stdin.read() + template_name = "" + else: + with open(template_name, "rb") as f: + template_content = f.read() + template = Template(template_content, name=template_name) + result = template.substitute(vars) + if options.output: + with open(options.output, "wb") as f: + f.write(result) + else: + sys.stdout.write(result) + + +if __name__ == "__main__": + fill_command() diff --git a/numpy/_core/_add_newdocs.py b/numpy/_core/_add_newdocs.py index 0a0322ae292a..3a2bf40d0565 100644 --- a/numpy/_core/_add_newdocs.py +++ b/numpy/_core/_add_newdocs.py @@ -45,6 +45,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) @@ -72,6 +73,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(5) >>> fl = x.flat >>> fl.base is x @@ -86,6 +88,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.coords @@ -104,6 +107,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> fl.index @@ -131,6 +135,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> x array([[0, 1, 2], @@ -321,6 +326,8 @@ Here is how we might write an ``iter_add`` function, using the Python iterator protocol: + >>> import numpy as np + >>> def iter_add_py(x, y, out=None): ... addop = np.add ... it = np.nditer([x, y, out], [], @@ -426,6 +433,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) @@ -542,6 +550,7 @@ [a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified the first iter's axes as [1] + >>> import numpy as np >>> a = np.arange(12).reshape(2, 3, 2) >>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) >>> for x in i: @@ -616,6 +625,7 @@ Manually adding two vectors, using broadcasting: + >>> import numpy as np >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) @@ -644,6 +654,8 @@ Examples -------- + + >>> import numpy as np >>> x = np.array([[1], [2], [3]]) >>> y = np.array([4, 5, 6]) >>> b = np.broadcast(x, y) @@ -669,6 +681,8 @@ Examples -------- + + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -686,6 +700,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -701,6 +716,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -715,6 +731,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -729,6 +746,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -743,6 +761,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -767,6 +786,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) @@ -870,6 +890,7 @@ Examples -------- + >>> import numpy as np >>> np.array([1, 2, 3]) array([1, 2, 3]) @@ -937,7 +958,7 @@ 'K' (keep) preserve input order Defaults to 'K'. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -948,6 +969,8 @@ the other requirements (``dtype``, ``order``, etc.). For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. Default: ``None``. + + .. versionadded:: 2.0.0 ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -975,6 +998,7 @@ Convert a list into an array: >>> a = [1, 2] + >>> import numpy as np >>> np.asarray(a) array([1, 2]) @@ -1028,6 +1052,22 @@ 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise 'K' (keep) preserve input order Defaults to 'C'. + device : str, optional + The device on which to place the created array. Default: ``None``. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 + + copy : bool, optional + If ``True``, then the object is copied. If ``None`` then the object is + copied only if needed, i.e. if ``__array__`` returns a copy, if obj + is a nested sequence, or if a copy is needed to satisfy any of + the other requirements (``dtype``, ``order``, etc.). + For ``False`` it raises a ``ValueError`` if a copy cannot be avoided. + Default: ``None``. + + .. versionadded:: 2.1.0 + ${ARRAY_FUNCTION_LIKE} .. versionadded:: 1.20.0 @@ -1055,6 +1095,7 @@ Convert a list into an array: >>> a = [1, 2] + >>> import numpy as np >>> np.asanyarray(a) array([1, 2]) @@ -1102,6 +1143,7 @@ -------- Starting with a Fortran-contiguous array: + >>> import numpy as np >>> x = np.ones((2, 3), order='F') >>> x.flags['F_CONTIGUOUS'] True @@ -1167,6 +1209,7 @@ -------- Starting with a C-contiguous array: + >>> import numpy as np >>> x = np.ones((2, 3), order='C') >>> x.flags['C_CONTIGUOUS'] True @@ -1217,7 +1260,7 @@ (C-style) or column-major (Fortran-style) order in memory. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -1248,6 +1291,7 @@ Examples -------- + >>> import numpy as np >>> np.empty([2, 2]) array([[ -9.74499359e+001, 6.69583040e-309], [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized @@ -1310,6 +1354,7 @@ Examples -------- + >>> import numpy as np >>> np.zeros(5) array([ 0., 0., 0., 0., 0.]) @@ -1398,6 +1443,7 @@ Examples -------- + >>> import numpy as np >>> np.fromstring('1 2', dtype=int, sep=' ') array([1, 2]) >>> np.fromstring('1, 2', dtype=int, sep=',') @@ -1438,6 +1484,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "cde"]) >>> b = np.array(["a", "a", "dec"]) >>> np.char.compare_chararrays(a, b, ">", True) @@ -1481,6 +1528,7 @@ Examples -------- + >>> import numpy as np >>> iterable = (x*x for x in range(5)) >>> np.fromiter(iterable, float) array([ 0., 1., 4., 9., 16.]) @@ -1565,6 +1613,7 @@ -------- Construct an ndarray: + >>> import numpy as np >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) @@ -1645,6 +1694,7 @@ Examples -------- + >>> import numpy as np >>> s = b'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') @@ -1661,7 +1711,7 @@ add_newdoc('numpy._core.multiarray', 'from_dlpack', """ - from_dlpack(x, /) + from_dlpack(x, /, *, device=None, copy=None) Create a NumPy array from an object implementing the ``__dlpack__`` protocol. Generally, the returned NumPy array is a read-only view @@ -1672,6 +1722,19 @@ x : object A Python object that implements the ``__dlpack__`` and ``__dlpack_device__`` methods. + device : device, optional + Device on which to place the created array. Default: ``None``. + Must be ``"cpu"`` if passed which may allow importing an array + that is not already CPU available. + copy : bool, optional + Boolean indicating whether or not to copy the input. If ``True``, + the copy will be made. If ``False``, the function will never copy, + and will raise ``BufferError`` in case a copy is deemed necessary. + Passing it requests a copy from the exporter who may or may not + implement the capability. + If ``None``, the function will reuse the existing memory buffer if + possible and copy otherwise. Default: ``None``. + Returns ------- @@ -1740,7 +1803,7 @@ The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. device : str, optional - The device on which to place the created array. Default: None. + The device on which to place the created array. Default: ``None``. For Array-API interoperability only, so must be ``"cpu"`` if passed. .. versionadded:: 2.0.0 @@ -1800,6 +1863,7 @@ Examples -------- + >>> import numpy as np >>> np.arange(3) array([0, 1, 2]) >>> np.arange(3.0) @@ -1828,15 +1892,6 @@ """) - -add_newdoc('numpy._core.multiarray', 'set_string_function', - """ - set_string_function(f, repr=1) - - Internal method to set a function to be used when pretty printing arrays. - - """) - add_newdoc('numpy._core.multiarray', 'promote_types', """ promote_types(type1, type2) @@ -1884,6 +1939,7 @@ Examples -------- + >>> import numpy as np >>> np.promote_types('f4', 'f8') dtype('float64') @@ -2032,7 +2088,7 @@ To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. - ``np.einsum('...i->...', a)`` is like + ``np.einsum('...i->...', a)`` is like :py:func:`np.sum(a, axis=-1) ` for array ``a`` of any shape. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix @@ -2061,6 +2117,7 @@ Examples -------- + >>> import numpy as np >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) @@ -2338,6 +2395,7 @@ First mode, `buffer` is None: + >>> import numpy as np >>> np.ndarray(shape=(2,2), dtype=float, order='F') array([[0.0e+000, 0.0e+000], # random [ nan, 2.5e-323]]) @@ -2371,14 +2429,20 @@ """Array protocol: C-struct side.""")) add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack__', - """a.__dlpack__(*, stream=None) + """ + a.__dlpack__(*, stream=None, max_version=None, dl_device=None, copy=None) - DLPack Protocol: Part of the Array API.""")) + DLPack Protocol: Part of the Array API. + + """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('__dlpack_device__', - """a.__dlpack_device__() + """ + a.__dlpack_device__() + + DLPack Protocol: Part of the Array API. - DLPack Protocol: Part of the Array API.""")) + """)) add_newdoc('numpy._core.multiarray', 'ndarray', ('base', """ @@ -2388,6 +2452,7 @@ -------- The base of an array that owns its memory is None: + >>> import numpy as np >>> x = np.array([1,2,3,4]) >>> x.base is None True @@ -2457,6 +2522,7 @@ Examples -------- + >>> import numpy as np >>> import ctypes >>> x = np.array([[0, 1], [2, 3]], dtype=np.int32) >>> x @@ -2525,6 +2591,7 @@ Examples -------- + >>> import numpy as np >>> x = np.sqrt([1+0j, 0+1j]) >>> x.imag array([ 0. , 0.70710678]) @@ -2540,6 +2607,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1,2,3], dtype=np.float64) >>> x.itemsize 8 @@ -2636,6 +2704,7 @@ Examples -------- + >>> import numpy as np >>> x = np.arange(1, 7).reshape(2, 3) >>> x array([[1, 2, 3], @@ -2680,6 +2749,7 @@ Examples -------- + >>> import numpy as np >>> x = np.zeros((3,5,2), dtype=np.complex128) >>> x.nbytes 480 @@ -2695,6 +2765,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> x.ndim 1 @@ -2711,6 +2782,7 @@ Examples -------- + >>> import numpy as np >>> x = np.sqrt([1+0j, 0+1j]) >>> x.real array([ 1. , 0.70710678]) @@ -2742,6 +2814,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 4]) >>> x.shape (4,) @@ -2789,6 +2862,7 @@ Examples -------- + >>> import numpy as np >>> x = np.zeros((3, 5, 2), dtype=np.complex128) >>> x.size 30 @@ -2837,6 +2911,7 @@ Examples -------- + >>> import numpy as np >>> y = np.reshape(np.arange(2*3*4), (2,3,4)) >>> y array([[[ 0, 1, 2, 3], @@ -2874,6 +2949,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -2911,6 +2987,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -2943,16 +3020,20 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('__array__', """ - a.__array__([dtype], /, *, copy=None) + a.__array__([dtype], *, copy=None) - For ``dtype`` parameter it returns either a new reference to self if - ``dtype`` is not given or a new array of provided data type if ``dtype`` + For ``dtype`` parameter it returns a new reference to self if + ``dtype`` is not given or it matches array's data type. + A new array of provided data type is returned if ``dtype`` is different from the current data type of the array. For ``copy`` parameter it returns a new reference to self if ``copy=False`` or ``copy=None`` and copying isn't enforced by ``dtype`` parameter. The method returns a new array for ``copy=True``, regardless of ``dtype`` parameter. + A more detailed explanation of the ``__array__`` interface + can be found in :ref:`dunder_array.interface`. + """)) @@ -3211,6 +3292,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 2.5]) >>> x array([1. , 2. , 2.5]) @@ -3245,6 +3327,7 @@ Examples -------- + >>> import numpy as np >>> A = np.array([1, 256, 8755], dtype=np.int16) >>> list(map(hex, A)) ['0x1', '0x100', '0x2233'] @@ -3380,6 +3463,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3],[4,5,6]], order='F') >>> y = x.copy() @@ -3518,6 +3602,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([1, 2]) >>> a.fill(0) >>> a @@ -3577,6 +3662,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1,2], [3,4]]) >>> a.flatten() array([1, 2, 3, 4]) @@ -3609,6 +3695,7 @@ Examples -------- + >>> import numpy as np >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x @@ -3669,6 +3756,7 @@ Examples -------- + >>> import numpy as np >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x @@ -3818,7 +3906,7 @@ add_newdoc('numpy._core.multiarray', 'ndarray', ('reshape', """ - a.reshape(shape, /, *, order='C') + a.reshape(shape, /, *, order='C', copy=None) Returns an array containing the same data with a new shape. @@ -3891,6 +3979,8 @@ Shrinking an array: array is flattened (in the order that the data are stored in memory), resized, and reshaped: + >>> import numpy as np + >>> a = np.array([[0, 1], [2, 3]], order='C') >>> a.resize((2, 1)) >>> a @@ -3988,6 +4078,7 @@ Examples -------- + >>> import numpy as np >>> x = np.eye(3) >>> x.getfield(np.float64) array([[1., 0., 0.], @@ -4057,6 +4148,7 @@ Examples -------- + >>> import numpy as np >>> y = np.array([[3, 1, 7], ... [2, 0, 0], ... [8, 5, 9]]) @@ -4128,6 +4220,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1,4], [3,1]]) >>> a.sort(axis=1) >>> a @@ -4199,6 +4292,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([3, 4, 2, 1]) >>> a.partition(3) >>> a @@ -4361,6 +4455,7 @@ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``, except that ``tolist`` changes numpy scalars to Python scalars: + >>> import numpy as np >>> a = np.uint32([1, 2]) >>> a_list = list(a) >>> a_list @@ -4424,6 +4519,7 @@ Examples -------- + >>> import numpy as np >>> x = np.array([[0, 1], [2, 3]], dtype='>> x.tobytes() b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' @@ -4495,6 +4591,7 @@ Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -4580,15 +4677,17 @@ Examples -------- - >>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) + >>> import numpy as np + >>> x = np.array([(-1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) Viewing array data using a different type and dtype: - >>> y = x.view(dtype=np.int16, type=np.matrix) - >>> y - matrix([[513]], dtype=int16) - >>> print(type(y)) - + >>> nonneg = np.dtype([("a", np.uint8), ("b", np.uint8)]) + >>> y = x.view(dtype=nonneg, type=np.recarray) + >>> x["a"] + array([-1], dtype=int8) + >>> y.a + array([255], dtype=uint8) Creating a view on a structured array so it can be used in calculations @@ -4700,6 +4799,7 @@ -------- Use frompyfunc to add broadcasting to the Python function ``oct``: + >>> import numpy as np >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) array(['0o12', '0o36', '0o144'], dtype=object) @@ -4743,7 +4843,7 @@ Notes ----- This method allocates memory for new_docstring on - the heap. Technically this creates a mempory leak, since this + the heap. Technically this creates a memory leak, since this memory will not be reclaimed until the end of the program even if the ufunc itself is removed. However this will only be a problem if the user is repeatedly creating ufuncs with @@ -4865,37 +4965,6 @@ See `global_state` for more information. """) -add_newdoc('numpy._core._multiarray_tests', 'format_float_OSprintf_g', - """ - format_float_OSprintf_g(val, precision) - - Print a floating point scalar using the system's printf function, - equivalent to: - - printf("%.*g", precision, val); - - for half/float/double, or replacing 'g' by 'Lg' for longdouble. This - method is designed to help cross-validate the format_float_* methods. - - Parameters - ---------- - val : python float or numpy floating scalar - Value to format. - - precision : non-negative integer, optional - Precision given to printf. - - Returns - ------- - rep : string - The string representation of the floating point value - - See Also - -------- - format_float_scientific - format_float_positional - """) - ############################################################################## # @@ -4977,6 +5046,7 @@ Examples -------- + >>> import numpy as np >>> np.add.identity 0 >>> np.multiply.identity @@ -5001,6 +5071,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nargs 3 >>> np.multiply.nargs @@ -5019,6 +5090,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nin 2 >>> np.multiply.nin @@ -5041,6 +5113,7 @@ Examples -------- + >>> import numpy as np >>> np.add.nout 1 >>> np.multiply.nout @@ -5065,6 +5138,7 @@ Examples -------- + >>> import numpy as np >>> np.add.ntypes 18 >>> np.multiply.ntypes @@ -5091,6 +5165,7 @@ Examples -------- + >>> import numpy as np >>> np.add.types ['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', @@ -5138,6 +5213,7 @@ Examples -------- + >>> import numpy as np >>> np.linalg._umath_linalg.det.signature '(m,m)->()' >>> np.matmul.signature @@ -5235,6 +5311,7 @@ Examples -------- + >>> import numpy as np >>> np.multiply.reduce([2,3,5]) 30 @@ -5333,6 +5410,7 @@ -------- 1-D array examples: + >>> import numpy as np >>> np.add.accumulate([2, 3, 5]) array([ 2, 5, 10]) >>> np.multiply.accumulate([2, 3, 5]) @@ -5430,6 +5508,7 @@ -------- To take the running sum of four successive values: + >>> import numpy as np >>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] array([ 6, 10, 14, 18]) @@ -5571,6 +5650,7 @@ -------- Set items 0 and 1 to their negative values: + >>> import numpy as np >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) >>> a @@ -5642,6 +5722,7 @@ -------- This API requires passing dtypes, define them for convenience: + >>> import numpy as np >>> int32 = np.dtype("int32") >>> float32 = np.dtype("float32") @@ -5791,6 +5872,7 @@ -------- Using array-scalar type: + >>> import numpy as np >>> np.dtype(np.int16) dtype('int16') @@ -5860,6 +5942,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype('i4') >>> x.alignment 4 @@ -5888,6 +5971,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype('i2') >>> dt.byteorder '=' @@ -5919,6 +6003,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.char 'd' @@ -5939,6 +6024,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.descr [('', '>> import numpy as np >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) >>> print(dt.fields) {'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} @@ -5993,6 +6081,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)]) >>> x.flags 16 @@ -6031,6 +6120,8 @@ Examples -------- + + >>> import numpy as np >>> dt = np.dtype('i2') >>> dt.isbuiltin 1 @@ -6069,6 +6160,7 @@ Examples -------- + >>> import numpy as np >>> arr = np.array([[1, 2], [3, 4]]) >>> arr.dtype dtype('int64') @@ -6102,6 +6194,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype('i4') >>> dt.kind 'i' @@ -6132,6 +6225,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(float, metadata={"key": "value"}) >>> dt.metadata["key"] 'value' @@ -6162,6 +6256,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.name 'float64' @@ -6195,6 +6290,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(str) >>> dt.num 19 @@ -6213,6 +6309,7 @@ Examples -------- + >>> import numpy as np >>> dt = np.dtype(('i4', 4)) >>> dt.shape (4,) @@ -6232,6 +6329,7 @@ Examples -------- + >>> import numpy as np >>> x = np.dtype(float) >>> x.ndim 0 @@ -6267,6 +6365,7 @@ Examples -------- + >>> import numpy as np >>> x = numpy.dtype('8f') >>> x.subdtype (dtype('float32'), (8,)) @@ -6288,6 +6387,7 @@ Examples -------- + >>> import numpy as np >>> x = numpy.dtype('8f') >>> x.base dtype('float32') @@ -6343,6 +6443,7 @@ >>> sys_is_le = sys.byteorder == 'little' >>> native_code = '<' if sys_is_le else '>' >>> swapped_code = '>' if sys_is_le else '<' + >>> import numpy as np >>> native_dt = np.dtype(native_code+'i2') >>> swapped_dt = np.dtype(swapped_code+'i2') >>> native_dt.newbyteorder('S') == swapped_dt @@ -6518,6 +6619,7 @@ Examples -------- + >>> import numpy as np >>> # Some important days in July ... bdd = np.busdaycalendar( ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) @@ -6569,6 +6671,7 @@ Examples -------- + >>> import numpy as np >>> from numpy.lib.array_utils import normalize_axis_index >>> normalize_axis_index(0, ndim=3) 0 @@ -6611,6 +6714,7 @@ Examples -------- + >>> import numpy as np >>> dt_25s = np.dtype('timedelta64[25s]') >>> np.datetime_data(dt_25s) ('s', 25) @@ -6621,7 +6725,7 @@ as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00','25s') + np.datetime64('2010-01-01T00:00:00','25s') """) @@ -6957,24 +7061,26 @@ def refer_to_array_attribute(attr, method=True): Examples -------- + >>> import numpy as np + >>> from numpy.dtypes import StringDType >>> np.array(["hello", "world"], dtype=StringDType()) array(["hello", "world"], dtype=StringDType()) >>> arr = np.array(["hello", None, "world"], - dtype=StringDType(na_object=None)) + ... dtype=StringDType(na_object=None)) >>> arr - array(["hello", None, "world", dtype=StringDType(na_object=None)) + array(["hello", None, "world"], dtype=StringDType(na_object=None)) >>> arr[1] is None True >>> arr = np.array(["hello", np.nan, "world"], - dtype=StringDType(na_object=np.nan)) + ... dtype=StringDType(na_object=np.nan)) >>> np.isnan(arr) array([False, True, False]) >>> np.array([1.2, object(), "hello world"], - dtype=StringDType(coerce=True)) + ... dtype=StringDType(coerce=True)) ValueError: StringDType only allows string data when string coercion is disabled. diff --git a/numpy/_core/_add_newdocs_scalars.py b/numpy/_core/_add_newdocs_scalars.py index 2ad1d22ee8f1..d7f2853e94ca 100644 --- a/numpy/_core/_add_newdocs_scalars.py +++ b/numpy/_core/_add_newdocs_scalars.py @@ -301,11 +301,11 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): offset of +0000. >>> np.datetime64(10, 'Y') - numpy.datetime64('1980') + np.datetime64('1980') >>> np.datetime64('1980', 'Y') - numpy.datetime64('1980') + np.datetime64('1980') >>> np.datetime64(10, 'D') - numpy.datetime64('1970-01-11') + np.datetime64('1970-01-11') See :ref:`arrays.datetime` for more information. """) @@ -327,6 +327,7 @@ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): Examples -------- + >>> import numpy as np >>> np.int64(-2).is_integer() True >>> np.uint32(5).is_integer() diff --git a/numpy/_core/_asarray.py b/numpy/_core/_asarray.py index 75eabb21f996..2908813e7747 100644 --- a/numpy/_core/_asarray.py +++ b/numpy/_core/_asarray.py @@ -75,6 +75,7 @@ def require(a, dtype=None, requirements=None, *, like=None): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x.flags C_CONTIGUOUS : True diff --git a/numpy/_core/_dtype.py b/numpy/_core/_dtype.py index 328a0e3959f3..ee9b96590263 100644 --- a/numpy/_core/_dtype.py +++ b/numpy/_core/_dtype.py @@ -277,9 +277,7 @@ def _is_packed(dtype): if align: total_offset = _aligned_offset(total_offset, max_alignment) - if total_offset != dtype.itemsize: - return False - return True + return total_offset == dtype.itemsize def _struct_list_str(dtype): diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index 8d6dc04851b5..c0142bf44f03 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -5,6 +5,7 @@ """ import ast +import math import re import sys import warnings @@ -560,7 +561,7 @@ def _view_is_safe(oldtype, newtype): return if newtype.hasobject or oldtype.hasobject: - raise TypeError("Cannot change data-type for object array.") + raise TypeError("Cannot change data-type for array of references.") return @@ -860,6 +861,9 @@ def _prod(a): def _gcd(a, b): """Calculate the greatest common divisor of a and b""" + if not (math.isfinite(a) and math.isfinite(b)): + raise ValueError('Can only find greatest common divisor of ' + f'finite arguments, found "{a}" and "{b}"') while b: a, b = b, a % b return a diff --git a/numpy/_core/_methods.py b/numpy/_core/_methods.py index f214ff957370..388854e664a5 100644 --- a/numpy/_core/_methods.py +++ b/numpy/_core/_methods.py @@ -8,6 +8,7 @@ import warnings from contextlib import nullcontext +import numpy as np from numpy._core import multiarray as mu from numpy._core import umath as um from numpy._core.multiarray import asanyarray @@ -97,10 +98,18 @@ def _count_reduce_items(arr, axis, keepdims=False, where=True): return items def _clip(a, min=None, max=None, out=None, **kwargs): - if min is None and max is None: - raise ValueError("One of max or min must be given") + if a.dtype.kind in "iu": + # If min/max is a Python integer, deal with out-of-bound values here. + # (This enforces NEP 50 rules as no value based promotion is done.) + if type(min) is int and min <= np.iinfo(a.dtype).min: + min = None + if type(max) is int and max >= np.iinfo(a.dtype).max: + max = None - if min is None: + if min is None and max is None: + # return identity + return um.positive(a, out=out, **kwargs) + elif min is None: return um.minimum(a, max, out=out, **kwargs) elif max is None: return um.maximum(a, min, out=out, **kwargs) diff --git a/numpy/_core/_type_aliases.py b/numpy/_core/_type_aliases.py index 13f39a11cb9b..80a59e7b3f52 100644 --- a/numpy/_core/_type_aliases.py +++ b/numpy/_core/_type_aliases.py @@ -93,9 +93,10 @@ # Building `sctypes` #################### -sctypes = {"int": [], "uint": [], "float": [], "complex": [], "others": []} +sctypes = {"int": set(), "uint": set(), "float": set(), + "complex": set(), "others": set()} -for type_info in set(typeinfo.values()): +for type_info in typeinfo.values(): if type_info.kind in ["M", "m"]: # exclude timedelta and datetime continue @@ -108,9 +109,11 @@ ("others", ma.generic) ]: if issubclass(concrete_type, abstract_type): - sctypes[type_group].append(concrete_type) + sctypes[type_group].add(concrete_type) break # sort sctype groups by bitsize -for sctype_list in sctypes.values(): +for sctype_key in sctypes.keys(): + sctype_list = list(sctypes[sctype_key]) sctype_list.sort(key=lambda x: dtype(x).itemsize) + sctypes[sctype_key] = sctype_list diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index 1dee8a84a23d..d60e7cbbda97 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -74,6 +74,7 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): Examples -------- + >>> import numpy as np >>> orig_settings = np.seterr(all='ignore') # seterr to known value >>> np.int16(32000) * np.int16(3) 30464 @@ -130,6 +131,7 @@ def geterr(): Examples -------- + >>> import numpy as np >>> np.geterr() {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'} >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP @@ -167,6 +169,25 @@ def setbufsize(size): size : int Size of buffer. + Returns + ------- + bufsize : int + Previous size of ufunc buffer in bytes. + + Examples + -------- + When exiting a `numpy.errstate` context manager the bufsize is restored: + + >>> import numpy as np + >>> with np.errstate(): + ... np.setbufsize(4096) + ... print(np.getbufsize()) + ... + 8192 + 4096 + >>> np.getbufsize() + 8192 + """ old = _get_extobj_dict()["bufsize"] extobj = _make_extobj(bufsize=size) @@ -184,6 +205,12 @@ def getbufsize(): getbufsize : int Size of ufunc buffer in bytes. + Examples + -------- + >>> import numpy as np + >>> np.getbufsize() + 8192 + """ return _get_extobj_dict()["bufsize"] @@ -237,6 +264,8 @@ def seterrcall(func): ... print("Floating point error (%s), with flag %s" % (type, flag)) ... + >>> import numpy as np + >>> orig_handler = np.seterrcall(err_handler) >>> orig_err = np.seterr(all='call') @@ -304,6 +333,7 @@ def geterrcall(): Examples -------- + >>> import numpy as np >>> np.geterrcall() # we did not yet set a handler, returns None >>> orig_settings = np.seterr(all='call') @@ -371,6 +401,7 @@ class errstate: Examples -------- + >>> import numpy as np >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. diff --git a/numpy/_core/arrayprint.py b/numpy/_core/arrayprint.py index ec11beae3f58..fde0d7d4a162 100644 --- a/numpy/_core/arrayprint.py +++ b/numpy/_core/arrayprint.py @@ -35,35 +35,22 @@ from .umath import absolute, isinf, isfinite, isnat from . import multiarray from .multiarray import (array, dragon4_positional, dragon4_scientific, - datetime_as_string, datetime_data, ndarray, - set_legacy_print_mode) + datetime_as_string, datetime_data, ndarray) from .fromnumeric import any from .numeric import concatenate, asarray, errstate from .numerictypes import (longlong, intc, int_, float64, complex128, flexible) from .overrides import array_function_dispatch, set_module +from .printoptions import format_options import operator import warnings import contextlib -_format_options = { - 'edgeitems': 3, # repr N leading and trailing items of each dimension - 'threshold': 1000, # total items > triggers array summarization - 'floatmode': 'maxprec', - 'precision': 8, # precision of floating point representations - 'suppress': False, # suppress printing small floating values in exp format - 'linewidth': 75, - 'nanstr': 'nan', - 'infstr': 'inf', - 'sign': '-', - 'formatter': None, - # Internally stored as an int to simplify comparisons; converted from/to - # str/False on the way in/out. - 'legacy': sys.maxsize} def _make_options_dict(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, - sign=None, formatter=None, floatmode=None, legacy=None): + sign=None, formatter=None, floatmode=None, legacy=None, + override_repr=None): """ Make a dictionary out of the non-None arguments, plus conversion of *legacy* and sanity checks. @@ -119,7 +106,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None, def set_printoptions(precision=None, threshold=None, edgeitems=None, linewidth=None, suppress=None, nanstr=None, infstr=None, formatter=None, sign=None, floatmode=None, - *, legacy=None): + *, legacy=None, override_repr=None): """ Set printing options. @@ -217,6 +204,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, by not inserting spaces after commas that separate fields and after colons. + If set to ``'1.25'`` approximates printing of 1.25 which mainly means + that numeric scalars are printed without their type information, e.g. + as ``3.0`` rather than ``np.float64(3.0)``. + If set to `False`, disables legacy mode. Unrecognized strings will be ignored with a warning for forward @@ -224,6 +215,11 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, .. versionadded:: 1.14.0 .. versionchanged:: 1.22.0 + .. versionchanged:: 2.0 + + override_repr: callable, optional + If set a passed function will be used for generating arrays' repr. + Other options will be ignored. See Also -------- @@ -239,6 +235,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, -------- Floating point precision can be set: + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.array([1.123456789]) [1.1235] @@ -283,24 +280,29 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ]) """ - opt = _make_options_dict(precision, threshold, edgeitems, linewidth, - suppress, nanstr, infstr, sign, formatter, - floatmode, legacy) - # formatter is always reset - opt['formatter'] = formatter - _format_options.update(opt) - - # set the C variable for legacy mode - if _format_options['legacy'] == 113: - set_legacy_print_mode(113) - # reset the sign option in legacy mode to avoid confusion - _format_options['sign'] = '-' - elif _format_options['legacy'] == 121: - set_legacy_print_mode(121) - elif _format_options['legacy'] == 125: - set_legacy_print_mode(125) - elif _format_options['legacy'] == sys.maxsize: - set_legacy_print_mode(0) + _set_printoptions(precision, threshold, edgeitems, linewidth, suppress, + nanstr, infstr, formatter, sign, floatmode, + legacy=legacy, override_repr=override_repr) + + +def _set_printoptions(precision=None, threshold=None, edgeitems=None, + linewidth=None, suppress=None, nanstr=None, + infstr=None, formatter=None, sign=None, floatmode=None, + *, legacy=None, override_repr=None): + new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth, + suppress, nanstr, infstr, sign, formatter, + floatmode, legacy) + # formatter and override_repr are always reset + new_opt['formatter'] = formatter + new_opt['override_repr'] = override_repr + + updated_opt = format_options.get() | new_opt + updated_opt.update(new_opt) + + if updated_opt['legacy'] == 113: + updated_opt['sign'] = '-' + + return format_options.set(updated_opt) @set_module('numpy') @@ -320,8 +322,10 @@ def get_printoptions(): - suppress : bool - nanstr : str - infstr : str - - formatter : dict of callables - sign : str + - formatter : dict of callables + - floatmode : str + - legacy : str or False For a full description of these options, see `set_printoptions`. @@ -329,8 +333,21 @@ def get_printoptions(): -------- set_printoptions, printoptions + Examples + -------- + >>> import numpy as np + + >>> np.get_printoptions() + {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} + + >>> np.get_printoptions()['linewidth'] + 75 + >>> np.set_printoptions(linewidth=100) + >>> np.get_printoptions()['linewidth'] + 100 + """ - opts = _format_options.copy() + opts = format_options.get().copy() opts['legacy'] = { 113: '1.13', 121: '1.21', 125: '1.25', sys.maxsize: False, }[opts['legacy']] @@ -339,7 +356,7 @@ def get_printoptions(): def _get_legacy_print_mode(): """Return the legacy print mode as an int.""" - return _format_options['legacy'] + return format_options.get()['legacy'] @set_module('numpy') @@ -353,6 +370,7 @@ def printoptions(*args, **kwargs): Examples -------- + >>> import numpy as np >>> from numpy.testing import assert_equal >>> with np.printoptions(precision=2): @@ -369,12 +387,12 @@ def printoptions(*args, **kwargs): set_printoptions, get_printoptions """ - opts = np.get_printoptions() + token = _set_printoptions(*args, **kwargs) + try: - np.set_printoptions(*args, **kwargs) - yield np.get_printoptions() + yield get_printoptions() finally: - np.set_printoptions(**opts) + format_options.reset(token) def _leading_trailing(a, edgeitems, index=()): @@ -714,6 +732,7 @@ def array2string(a, max_line_width=None, precision=None, Examples -------- + >>> import numpy as np >>> x = np.array([1e-16,1,2,3]) >>> np.array2string(x, precision=2, separator=',', ... suppress_small=True) @@ -732,7 +751,7 @@ def array2string(a, max_line_width=None, precision=None, overrides = _make_options_dict(precision, threshold, edgeitems, max_line_width, suppress_small, None, None, sign, formatter, floatmode, legacy) - options = _format_options.copy() + options = format_options.get().copy() options.update(overrides) if options['legacy'] <= 113: @@ -955,7 +974,6 @@ def __init__(self, data, precision, floatmode, suppress_small, sign=False, self.sign = sign self.exp_format = False self.large_exponent = False - self.fillFormat(data) def fillFormat(self, data): @@ -1037,22 +1055,23 @@ def fillFormat(self, data): # if there are non-finite values, may need to increase pad_left if data.size != finite_vals.size: neginf = self.sign != '-' or any(data[isinf(data)] < 0) - nanlen = len(_format_options['nanstr']) - inflen = len(_format_options['infstr']) + neginf offset = self.pad_right + 1 # +1 for decimal pt + current_options = format_options.get() self.pad_left = max( - self.pad_left, nanlen - offset, inflen - offset + self.pad_left, len(current_options['nanstr']) - offset, + len(current_options['infstr']) + neginf - offset ) def __call__(self, x): if not np.isfinite(x): with errstate(invalid='ignore'): + current_options = format_options.get() if np.isnan(x): sign = '+' if self.sign == '+' else '' - ret = sign + _format_options['nanstr'] + ret = sign + current_options['nanstr'] else: # isinf sign = '-' if x < 0 else '+' if self.sign == '+' else '' - ret = sign + _format_options['infstr'] + ret = sign + current_options['infstr'] return ' '*( self.pad_left + self.pad_right + 1 - len(ret) ) + ret @@ -1139,6 +1158,7 @@ def format_float_scientific(x, precision=None, unique=True, trim='k', Examples -------- + >>> import numpy as np >>> np.format_float_scientific(np.float32(np.pi)) '3.1415927e+00' >>> s = np.float32(1.23e24) @@ -1226,6 +1246,7 @@ def format_float_positional(x, precision=None, unique=True, Examples -------- + >>> import numpy as np >>> np.format_float_positional(np.float32(np.pi)) '3.1415927' >>> np.format_float_positional(np.float16(np.pi)) @@ -1443,10 +1464,10 @@ def _void_scalar_to_string(x, is_repr=True): scalartypes.c.src code, and is placed here because it uses the elementwise formatters defined above. """ - options = _format_options.copy() + options = format_options.get().copy() if options["legacy"] <= 125: - return StructuredVoidFormat.from_data(array(x), **_format_options)(x) + return StructuredVoidFormat.from_data(array(x), **options)(x) if options.get('formatter') is None: options['formatter'] = {} @@ -1480,6 +1501,7 @@ def dtype_is_implied(dtype): Examples -------- + >>> import numpy as np >>> np._core.arrayprint.dtype_is_implied(int) True >>> np.array([1, 2, 3], int) @@ -1490,7 +1512,7 @@ def dtype_is_implied(dtype): array([1, 2, 3], dtype=int8) """ dtype = np.dtype(dtype) - if _format_options['legacy'] <= 113 and dtype.type == np.bool: + if format_options.get()['legacy'] <= 113 and dtype.type == np.bool: return False # not just void types can be structured, and names are not part of the repr @@ -1540,8 +1562,13 @@ def _array_repr_implementation( arr, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_repr() that allows overriding array2string.""" + current_options = format_options.get() + override_repr = current_options["override_repr"] + if override_repr is not None: + return override_repr(arr) + if max_line_width is None: - max_line_width = _format_options['linewidth'] + max_line_width = current_options['linewidth'] if type(arr) is not ndarray: class_name = type(arr).__name__ @@ -1553,7 +1580,7 @@ def _array_repr_implementation( prefix = class_name + "(" suffix = ")" if skipdtype else "," - if (_format_options['legacy'] <= 113 and + if (current_options['legacy'] <= 113 and arr.shape == () and not arr.dtype.names): lst = repr(arr.item()) elif arr.size > 0 or arr.shape == (0,): @@ -1574,7 +1601,7 @@ def _array_repr_implementation( # Note: This line gives the correct result even when rfind returns -1. last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) spacer = " " - if _format_options['legacy'] <= 113: + if current_options['legacy'] <= 113: if issubclass(arr.dtype.type, flexible): spacer = '\n' + ' '*len(class_name + "(") elif last_line_len + len(dtype_str) + 1 > max_line_width: @@ -1621,6 +1648,7 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): Examples -------- + >>> import numpy as np >>> np.array_repr(np.array([1,2])) 'array([1, 2])' >>> np.array_repr(np.ma.array([0.])) @@ -1648,7 +1676,7 @@ def _array_str_implementation( a, max_line_width=None, precision=None, suppress_small=None, array2string=array2string): """Internal version of array_str() that allows overriding array2string.""" - if (_format_options['legacy'] <= 113 and + if (format_options.get()['legacy'] <= 113 and a.shape == () and not a.dtype.names): return str(a.item()) @@ -1701,6 +1729,7 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): Examples -------- + >>> import numpy as np >>> np.array_str(np.arange(3)) '[0 1 2]' @@ -1715,78 +1744,3 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None): array2string=_array2string_impl) _default_array_repr = functools.partial(_array_repr_implementation, array2string=_array2string_impl) - - -def set_string_function(f, repr=True): - """ - Set a Python function to be used when pretty printing arrays. - - .. deprecated:: 2.0 - Use `np.set_printoptions` instead with a formatter for custom - printing of NumPy objects. - - Parameters - ---------- - f : function or None - Function to be used to pretty print arrays. The function should expect - a single array argument and return a string of the representation of - the array. If None, the function is reset to the default NumPy function - to print arrays. - repr : bool, optional - If True (default), the function for pretty printing (``__repr__``) - is set, if False the function that returns the default string - representation (``__str__``) is set. - - See Also - -------- - set_printoptions, get_printoptions - - Examples - -------- - >>> from numpy._core.arrayprint import set_string_function - >>> def pprint(arr): - ... return 'HA! - What are you going to do now?' - ... - >>> set_string_function(pprint) - >>> a = np.arange(10) - >>> a - HA! - What are you going to do now? - >>> _ = a - >>> # [0 1 2 3 4 5 6 7 8 9] - - We can reset the function to the default: - - >>> set_string_function(None) - >>> a - array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) - - `repr` affects either pretty printing or normal string representation. - Note that ``__repr__`` is still affected by setting ``__str__`` - because the width of each array element in the returned string becomes - equal to the length of the result of ``__str__()``. - - >>> x = np.arange(4) - >>> set_string_function(lambda x:'random', repr=False) - >>> x.__str__() - 'random' - >>> x.__repr__() - 'array([0, 1, 2, 3])' - - """ - - # Deprecated in NumPy 2.0, 2023-07-11 - warnings.warn( - "`set_string_function` is deprecated. Use `np.set_printoptions` " - "with a formatter for custom printing NumPy objects. " - "(deprecated in NumPy 2.0)", - DeprecationWarning, - stacklevel=2 - ) - - if f is None: - if repr: - return multiarray.set_string_function(_default_array_repr, 1) - else: - return multiarray.set_string_function(_default_array_str, 0) - else: - return multiarray.set_string_function(f, repr) diff --git a/numpy/_core/arrayprint.pyi b/numpy/_core/arrayprint.pyi index 50f10ec694f0..44d77083cd63 100644 --- a/numpy/_core/arrayprint.pyi +++ b/numpy/_core/arrayprint.pyi @@ -63,7 +63,8 @@ def set_printoptions( sign: Literal[None, "-", "+", " "] = ..., floatmode: None | _FloatMode = ..., *, - legacy: Literal[None, False, "1.13", "1.21"] = ... + legacy: Literal[None, False, "1.13", "1.21"] = ..., + override_repr: None | Callable[[NDArray[Any]], str] = ..., ) -> None: ... def get_printoptions() -> _FormatOptions: ... def array2string( diff --git a/numpy/_core/code_generators/cversions.txt b/numpy/_core/code_generators/cversions.txt index f91f616585a3..4ce44ada45bf 100644 --- a/numpy/_core/code_generators/cversions.txt +++ b/numpy/_core/code_generators/cversions.txt @@ -70,7 +70,10 @@ 0x00000010 = 04a7bf1e65350926a0e528798da263c0 # Version 17 (NumPy 1.25) No actual change. +# Version 17 (NumPy 1.26) No change 0x00000011 = ca1aebdad799358149567d9d93cbca09 # Version 18 (NumPy 2.0.0) 0x00000012 = 2b8f1f4da822491ff030b2b37dff07e3 +# Version 19 (NumPy 2.1.0) Only header additions +0x00000013 = 2b8f1f4da822491ff030b2b37dff07e3 diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 27e42bcb4c14..da2f8f636e59 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -41,6 +41,7 @@ def get_processor(): API_FILES = [join('multiarray', 'alloc.c'), join('multiarray', 'abstractdtypes.c'), join('multiarray', 'arrayfunction_override.c'), + join('multiarray', 'array_api_standard.c'), join('multiarray', 'array_assign_array.c'), join('multiarray', 'array_assign_scalar.c'), join('multiarray', 'array_coercion.c'), @@ -159,7 +160,7 @@ def __str__(self): return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr) def api_hash(self): - m = hashlib.md5() + m = hashlib.md5(usedforsecurity=False) m.update(remove_whitespace(self.return_type)) m.update('\000') m.update(self.name) @@ -532,7 +533,9 @@ def fullapi_hash(api_dicts): a.extend(name) a.extend(','.join(map(str, data))) - return hashlib.md5(''.join(a).encode('ascii')).hexdigest() + return hashlib.md5( + ''.join(a).encode('ascii'), usedforsecurity=False + ).hexdigest() # To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and # checksum a 128 bits md5 checksum (hex format as well) @@ -554,7 +557,7 @@ def main(): tagname = sys.argv[1] order_file = sys.argv[2] functions = get_api_functions(tagname, order_file) - m = hashlib.md5(tagname) + m = hashlib.md5(tagname, usedforsecurity=False) for func in functions: print(func) ah = func.api_hash() diff --git a/numpy/_core/code_generators/generate_numpy_api.py b/numpy/_core/code_generators/generate_numpy_api.py index d69725e581aa..7fc6ad1aaf89 100644 --- a/numpy/_core/code_generators/generate_numpy_api.py +++ b/numpy/_core/code_generators/generate_numpy_api.py @@ -33,13 +33,18 @@ _NPY_VERSION_CONCAT_HELPER(PY_ARRAY_UNIQUE_SYMBOL) #endif +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + #if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) -extern void **PyArray_API; -extern int PyArray_RUNTIME_VERSION; +extern NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +extern NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; #else #if defined(PY_ARRAY_UNIQUE_SYMBOL) -void **PyArray_API; -int PyArray_RUNTIME_VERSION; +NPY_API_SYMBOL_ATTRIBUTE void **PyArray_API; +NPY_API_SYMBOL_ATTRIBUTE int PyArray_RUNTIME_VERSION; #else static void **PyArray_API = NULL; static int PyArray_RUNTIME_VERSION = 0; @@ -227,6 +232,7 @@ def do_generate_api(targets, sources): # Check multiarray api indexes multiarray_api_index = genapi.merge_api_dicts(multiarray_api) + unused_index_max = max(multiarray_api_index.get("__unused_indices__", 0)) genapi.check_api_dict(multiarray_api_index) numpyapi_list = genapi.get_api_functions('NUMPY_API', @@ -278,6 +284,10 @@ def do_generate_api(targets, sources): init_list.append(api_item.array_api_define()) module_list.append(api_item.internal_define()) + # In case we end with a "hole", append more NULLs + while len(init_list) <= unused_index_max: + init_list.append(" NULL") + # Write to header s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) genapi.write_file(header_file, s) diff --git a/numpy/_core/code_generators/generate_ufunc_api.py b/numpy/_core/code_generators/generate_ufunc_api.py index 2acced5d5619..ef34b95d9fb2 100644 --- a/numpy/_core/code_generators/generate_ufunc_api.py +++ b/numpy/_core/code_generators/generate_ufunc_api.py @@ -18,11 +18,16 @@ #define PyUFunc_API PY_UFUNC_UNIQUE_SYMBOL #endif +/* By default do not export API in an .so (was never the case on windows) */ +#ifndef NPY_API_SYMBOL_ATTRIBUTE + #define NPY_API_SYMBOL_ATTRIBUTE NPY_VISIBILITY_HIDDEN +#endif + #if defined(NO_IMPORT) || defined(NO_IMPORT_UFUNC) -extern void **PyUFunc_API; +extern NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; #else #if defined(PY_UFUNC_UNIQUE_SYMBOL) -void **PyUFunc_API; +NPY_API_SYMBOL_ATTRIBUTE void **PyUFunc_API; #else static void **PyUFunc_API=NULL; #endif @@ -36,11 +41,7 @@ PyObject *numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { PyErr_Clear(); - numpy = PyImport_ImportModule("numpy._core._multiarray_umath"); - if (numpy == NULL && PyErr_ExceptionMatches(PyExc_ModuleNotFoundError)) { - PyErr_Clear(); - numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); - } + numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); } if (numpy == NULL) { diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index ec5a153dd439..64d6a19a871d 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -249,6 +249,7 @@ def english_upper(s): Examples -------- + >>> import numpy as np >>> from numpy.lib.utils import english_upper >>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_' >>> english_upper(s) @@ -953,6 +954,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.ceil'), None, + TD(bints), TD('e', f='ceil', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='ceil'), @@ -962,6 +964,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.trunc'), None, + TD(bints), TD('e', f='trunc', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='trunc'), @@ -978,6 +981,7 @@ def english_upper(s): Ufunc(1, 1, None, docstrings.get('numpy._core.umath.floor'), None, + TD(bints), TD('e', f='floor', astype={'e': 'f'}), TD(inexactvec, dispatch=[('loops_unary_fp', 'fd')]), TD('fdg', f='floor'), @@ -1280,7 +1284,46 @@ def english_upper(s): docstrings.get('numpy._core.umath._expandtabs'), None, ), - +'_center': + Ufunc(3, 1, None, + docstrings.get('numpy._core.umath._center'), + None, + ), +'_ljust': + Ufunc(3, 1, None, + docstrings.get('numpy._core.umath._ljust'), + None, + ), +'_rjust': + Ufunc(3, 1, None, + docstrings.get('numpy._core.umath._rjust'), + None, + ), +'_zfill': + Ufunc(2, 1, None, + docstrings.get('numpy._core.umath._zfill'), + None, + ), +'_partition_index': + Ufunc(3, 3, None, + docstrings.get('numpy._core.umath._partition_index'), + None, + ), +'_rpartition_index': + Ufunc(3, 3, None, + docstrings.get('numpy._core.umath._rpartition_index'), + None, + ), +'_partition': + Ufunc(2, 3, None, + docstrings.get('numpy._core.umath._partition'), + None, + ), +'_rpartition': + Ufunc(2, 3, None, + docstrings.get('numpy._core.umath._rpartition'), + None, + ), } def indent(st, spaces): @@ -1337,10 +1380,9 @@ def make_arrays(funcdict): funclist = [] datalist = [] siglist = [] - k = 0 sub = 0 - for t in uf.type_descriptions: + for k, t in enumerate(uf.type_descriptions): cfunc_alias = t.cfunc_alias if t.cfunc_alias else name cfunc_fname = None if t.func_data is FullTypeDescr: @@ -1400,8 +1442,6 @@ def make_arrays(funcdict): for x in t.in_ + t.out: siglist.append('NPY_%s' % (english_upper(chartoname[x]),)) - k += 1 - if funclist or siglist or datalist: funcnames = ', '.join(funclist) signames = ', '.join(siglist) @@ -1411,7 +1451,7 @@ def make_arrays(funcdict): % (name, funcnames)) code1list.append("static void * %s_data[] = {%s};" % (name, datanames)) - code1list.append("static char %s_signatures[] = {%s};" + code1list.append("static const char %s_signatures[] = {%s};" % (name, signames)) uf.empty = False else: diff --git a/numpy/_core/code_generators/numpy_api.py b/numpy/_core/code_generators/numpy_api.py index 7dbaeff4940b..ffdd70b6fe00 100644 --- a/numpy/_core/code_generators/numpy_api.py +++ b/numpy/_core/code_generators/numpy_api.py @@ -94,6 +94,7 @@ def get_annotations(): # NOTE: The Slots 320-360 are defined in `_experimental_dtype_api.h` # and filled explicitly outside the code generator as the metaclass # makes them tricky to expose. (This may be refactored.) + # Slot 366, 367, 368 are the abstract DTypes # End 2.0 API } @@ -107,13 +108,16 @@ def get_annotations(): 103, 115, 117, 122, 163, 164, 171, 173, 197, 201, 202, 208, 219, 220, 221, 222, 223, 278, 291, 293, 294, 295, 301] - + list(range(320, 361)) # range reserved DType class slots + # range/slots reserved DType classes (see _public_dtype_api_table.h): + + list(range(320, 361)) + [366, 367, 368] ), 'PyArray_GetNDArrayCVersion': (0,), # Unused slot 40, was `PyArray_SetNumericOps` # Unused slot 41, was `PyArray_GetNumericOps`, 'PyArray_INCREF': (42,), 'PyArray_XDECREF': (43,), + # `PyArray_SetStringFunction` was stubbed out + # and should be removed in the future. 'PyArray_SetStringFunction': (44,), 'PyArray_DescrFromType': (45,), 'PyArray_TypeObjectFromType': (46,), diff --git a/numpy/_core/code_generators/ufunc_docstrings.py b/numpy/_core/code_generators/ufunc_docstrings.py index 6a8946be3dee..cf000506e096 100644 --- a/numpy/_core/code_generators/ufunc_docstrings.py +++ b/numpy/_core/code_generators/ufunc_docstrings.py @@ -84,6 +84,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.array([-1.2, 1.2]) >>> np.absolute(x) array([ 1.2, 1.2]) @@ -136,6 +137,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.add(1.0, 4.0) 5.0 >>> x1 = np.arange(9.0).reshape((3, 3)) @@ -203,6 +205,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + We expect the arccos of 1 to be 0, and of -1 to be pi: >>> np.arccos([1, -1]) @@ -263,6 +267,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arccosh([np.e, 10.0]) array([ 1.65745445, 2.99322285]) >>> np.arccosh(1) @@ -315,6 +320,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arcsin(1) # pi/2 1.5707963267948966 >>> np.arcsin(-1) # -pi/2 @@ -366,6 +372,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arcsinh(np.array([np.e, 10.0])) array([ 1.72538256, 2.99822295]) @@ -421,6 +428,8 @@ def add_newdoc(place, name, doc): -------- We expect the arctan of 0 to be 0, and of 1 to be pi/4: + >>> import numpy as np + >>> np.arctan([0, 1]) array([ 0. , 0.78539816]) @@ -500,6 +509,8 @@ def add_newdoc(place, name, doc): -------- Consider four points in different quadrants: + >>> import numpy as np + >>> x = np.array([-1, +1, +1, -1]) >>> y = np.array([-1, -1, +1, +1]) >>> np.arctan2(y, x) * 180 / np.pi @@ -567,6 +578,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.arctanh([0, -0.5]) array([ 0. , -0.54930614]) @@ -603,6 +615,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise AND of 13 and 17 is therefore ``000000001``, or 1: @@ -665,6 +679,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 has the binary representation ``00001101``. Likewise, 16 is represented by ``00010000``. The bit-wise OR of 13 and 16 is then ``00011101``, or 29: @@ -732,6 +748,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + The number 13 is represented by ``00001101``. Likewise, 17 is represented by ``00010001``. The bit-wise XOR of 13 and 17 is therefore ``00011100``, or 28: @@ -777,7 +795,7 @@ def add_newdoc(place, name, doc): Returns ------- y : ndarray or scalar - The ceiling of each element in `x`, with `float` dtype. + The ceiling of each element in `x`. $OUT_SCALAR_1 See Also @@ -786,6 +804,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.ceil(a) array([-1., -1., -0., 1., 2., 2., 2.]) @@ -822,6 +842,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.trunc(a) array([-1., -1., -0., 0., 1., 1., 2.]) @@ -856,6 +877,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.conjugate(1+2j) (1-2j) @@ -894,6 +916,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cos(np.array([0, np.pi/2, np.pi])) array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> @@ -931,6 +954,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cosh(0) 1.0 @@ -968,6 +992,8 @@ def add_newdoc(place, name, doc): -------- Convert a radian array to degrees + >>> import numpy as np + >>> rad = np.arange(12.)*np.pi/6 >>> np.degrees(rad) array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., @@ -1009,6 +1035,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.rad2deg(np.pi/2) 90.0 @@ -1052,6 +1079,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.heaviside([-1.5, 0, 2.0], 0.5) array([ 0. , 0.5, 1. ]) >>> np.heaviside([-1.5, 0, 2.0], 1) @@ -1091,6 +1119,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.divide(2.0, 4.0) 0.5 >>> x1 = np.arange(9.0).reshape((3, 3)) @@ -1136,6 +1165,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.equal([0, 1, 3], np.arange(3)) array([ True, True, False]) @@ -1201,6 +1231,8 @@ def add_newdoc(place, name, doc): -------- Plot the magnitude and phase of ``exp(x)`` in the complex plane: + >>> import numpy as np + >>> import matplotlib.pyplot as plt >>> x = np.linspace(-2*np.pi, 2*np.pi, 100) @@ -1248,6 +1280,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.exp2([2, 3]) array([ 4., 8.]) @@ -1285,6 +1318,8 @@ def add_newdoc(place, name, doc): about 32 significant digits. This example shows the superiority of expm1 in this case. + >>> import numpy as np + >>> np.expm1(1e-10) 1.00000000005e-10 >>> np.exp(1e-10) - 1 @@ -1319,6 +1354,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fabs(-1) 1.0 >>> np.fabs([-1.2, 1.2]) @@ -1358,6 +1394,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.floor(a) array([-2., -2., -1., 0., 1., 1., 2.]) @@ -1396,6 +1433,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.floor_divide(7,3) 2 >>> np.floor_divide([1., 2., 3., 4.], 2.5) @@ -1449,6 +1487,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmod([-3, -2, -1, 1, 2, 3], 2) array([-1, 0, -1, 1, 0, 1]) >>> np.remainder([-3, -2, -1, 1, 2, 3], 2) @@ -1493,6 +1532,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.greater([4,2],[2,2]) array([ True, False]) @@ -1530,6 +1570,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.greater_equal([4, 2, 1], [2, 2, 2]) array([ True, True, False]) @@ -1567,6 +1608,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) array([[ 5., 5., 5.], [ 5., 5., 5.], @@ -1589,12 +1631,13 @@ def add_newdoc(place, name, doc): the integers in the input arrays. This ufunc implements the C/Python operator ``~``. - For signed integer inputs, the two's complement is returned. In a - two's-complement system negative numbers are represented by the two's - complement of the absolute value. This is the most common method of - representing signed integers on computers [1]_. A N-bit - two's-complement system can represent every integer in the range - :math:`-2^{N-1}` to :math:`+2^{N-1}-1`. + For signed integer inputs, the bit-wise NOT of the absolute value is + returned. In a two's-complement system, this operation effectively flips + all the bits, resulting in a representation that corresponds to the + negative of the input plus one. This is the most common method of + representing signed integers on computers [1]_. A N-bit two's-complement + system can represent every integer in the range :math:`-2^{N-1}` to + :math:`+2^{N-1}-1`. Parameters ---------- @@ -1629,6 +1672,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: @@ -1646,8 +1691,8 @@ def add_newdoc(place, name, doc): >>> np.binary_repr(x, width=16) '1111111111110010' - When using signed integer types the result is the two's complement of - the result for the unsigned type: + When using signed integer types, the result is the bit-wise NOT of + the unsigned type, interpreted as a signed integer: >>> np.invert(np.array([13], dtype=np.int8)) array([-14], dtype=int8) @@ -1705,6 +1750,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isfinite(1) True >>> np.isfinite(0) @@ -1761,6 +1807,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isinf(np.inf) True >>> np.isinf(np.nan) @@ -1806,6 +1853,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isnan(np.nan) True >>> np.isnan(np.inf) @@ -1839,6 +1887,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.isnat(np.datetime64("NaT")) True >>> np.isnat(np.datetime64("2016-01-01")) @@ -1879,6 +1928,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.binary_repr(5) '101' >>> np.left_shift(5, 2) @@ -1934,6 +1984,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.less([1, 2], [2, 2]) array([ True, False]) @@ -1970,6 +2021,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.less_equal([4, 2, 1], [2, 2, 2]) array([False, True, True]) @@ -2035,6 +2087,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log([1, np.e, np.e**2, 0]) array([ 0., 1., 2., -inf]) @@ -2089,6 +2142,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log10([1e-15, -3.]) array([-15., nan]) @@ -2137,6 +2191,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.array([0, 1, 2, 2**4]) >>> np.log2(x) array([-inf, 0., 1., 4.]) @@ -2180,6 +2235,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> prob1 = np.log(1e-50) >>> prob2 = np.log(2.5e-50) >>> prob12 = np.logaddexp(prob1, prob2) @@ -2223,6 +2279,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> prob1 = np.log2(1e-50) >>> prob2 = np.log2(2.5e-50) >>> prob12 = np.logaddexp2(prob1, prob2) @@ -2282,6 +2339,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.log1p(1e-99) 1e-99 >>> np.log(1 + 1e-99) @@ -2314,6 +2372,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_and(True, False) False >>> np.logical_and([True, False], [False, False]) @@ -2357,6 +2416,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_not(3) False >>> np.logical_not([True, False, 0, 1]) @@ -2393,6 +2453,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_or(True, False) True >>> np.logical_or([True, False], [False, False]) @@ -2436,6 +2497,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.logical_xor(True, False) True >>> np.logical_xor([True, True, False, False], [True, False, True, False]) @@ -2498,6 +2560,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.maximum([2, 3, 4], [1, 5, 2]) array([2, 5, 4]) @@ -2557,6 +2620,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.minimum([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) @@ -2617,6 +2681,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmax([2, 3, 4], [1, 5, 2]) array([ 2., 5., 4.]) @@ -2675,6 +2740,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.fmin([2, 3, 4], [1, 5, 2]) array([1, 3, 2]) @@ -2806,6 +2872,8 @@ def add_newdoc(place, name, doc): -------- For 2-D arrays it is the matrix product: + >>> import numpy as np + >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([[4, 1], @@ -2907,6 +2975,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Get the projected size along a given normal for an array of vectors. >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) @@ -2950,6 +3020,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.modf([0, 3.5]) (array([ 0. , 0.5]), array([ 0., 3.])) >>> np.modf(-0.5) @@ -2980,6 +3051,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.multiply(2.0, 4.0) 8.0 @@ -3020,6 +3092,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.negative([1.,-1.]) array([-1., 1.]) @@ -3056,6 +3129,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x1 = np.array(([1., -1.])) >>> np.positive(x1) @@ -3094,6 +3168,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.not_equal([1.,2.], [1., 3.]) array([False, True]) >>> np.not_equal([1, 2], [[1, 3],[1, 4]]) @@ -3158,6 +3233,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Cube each element in an array. >>> x1 = np.arange(6) @@ -3245,6 +3322,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Cube each element in a list. >>> x1 = range(6) @@ -3308,6 +3387,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Convert a degree array to radians >>> deg = np.arange(12.) * 30. @@ -3352,6 +3433,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.deg2rad(180) 3.1415926535897931 @@ -3386,6 +3468,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.reciprocal(2.) 0.5 >>> np.reciprocal([1, 2., 3.33]) @@ -3442,6 +3525,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.remainder([4, 7], [2, 3]) array([0, 1]) >>> np.remainder(np.arange(7), 5) @@ -3493,6 +3577,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.divmod(np.arange(5), 3) (array([0, 0, 0, 1, 1]), array([0, 1, 2, 0, 1])) @@ -3536,6 +3621,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.binary_repr(10) '1010' >>> np.right_shift(10, 1) @@ -3584,6 +3670,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.rint(a) array([-2., -2., -0., 0., 2., 2., 2.]) @@ -3618,11 +3705,14 @@ def add_newdoc(place, name, doc): Notes ----- There is more than one definition of sign in common use for complex - numbers. The definition used here is equivalent to :math:`x/\\sqrt{x*x}` - which is different from a common alternative, :math:`x/|x|`. + numbers. The definition used here, :math:`x/|x|`, is the more common + and useful one, but is different from the one used in numpy prior to + version 2.0, :math:`x/\\sqrt{x*x}`, which is equivalent to + ``sign(x.real) + 0j if x.real != 0 else sign(x.imag) + 0j``. Examples -------- + >>> import numpy as np >>> np.sign([-5., 4.5]) array([-1., 1.]) >>> np.sign(0) @@ -3650,6 +3740,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.signbit(-1.2) True >>> np.signbit(np.array([1, -2.3, 2.1])) @@ -3680,6 +3771,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.copysign(1.3, -1) -1.3 >>> 1/np.copysign(0, 1) @@ -3715,6 +3807,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> eps = np.finfo(np.float64).eps >>> np.nextafter(1, 2) == eps + 1 True @@ -3750,6 +3843,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.spacing(1) == np.finfo(np.float64).eps True @@ -3791,6 +3885,8 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np + Print sine of one angle: >>> np.sin(np.pi/2.) @@ -3844,6 +3940,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sinh(0) 0.0 >>> np.sinh(np.pi*1j/2) @@ -3902,6 +3999,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.sqrt([1,4,9]) array([ 1., 2., 3.]) @@ -3936,6 +4034,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.cbrt([1,8,27]) array([ 1., 2., 3.]) @@ -3965,6 +4064,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.square([-1j, 1]) array([-1.-0.j, 1.+0.j]) @@ -3993,6 +4093,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.subtract(1.0, 4.0) -3.0 @@ -4045,6 +4146,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> from math import pi >>> np.tan(np.array([-pi,pi/2,pi])) array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16]) @@ -4098,6 +4200,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.tanh((0, np.pi*1j, np.pi*1j/2)) array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j]) @@ -4153,6 +4256,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> x = np.arange(9) >>> y1, y2 = np.frexp(x) >>> y1 @@ -4200,6 +4304,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.ldexp(5, np.arange(4)) array([ 5., 10., 20., 40.], dtype=float16) @@ -4231,6 +4336,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.gcd(12, 20) 4 >>> np.gcd.reduce([15, 25, 35]) @@ -4262,6 +4368,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.lcm(12, 20) 60 >>> np.lcm.reduce([3, 12, 20]) @@ -4302,6 +4409,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.bitwise_count(1023) 10 >>> a = np.array([2**i - 1 for i in range(16)]) @@ -4334,6 +4442,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['Grace Hopper Conference', 'Open Source Day']) >>> np.strings.str_len(a) array([23, 15]) @@ -4373,6 +4482,17 @@ def add_newdoc(place, name, doc): -------- str.isalpha + Examples + -------- + >>> import numpy as np + >>> a = np.array(['a', 'b', '0']) + >>> np.strings.isalpha(a) + array([ True, True, False]) + + >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']]) + >>> np.strings.isalpha(a) + array([[ True, True, False], [ True, False, False]]) + """) add_newdoc('numpy._core.umath', 'isdigit', @@ -4403,6 +4523,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['a', 'b', '0']) >>> np.strings.isdigit(a) array([False, False, True]) @@ -4448,7 +4569,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4463,10 +4584,11 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(['a', '1', 'a1', '(', '']) >>> np.strings.isalnum(a) array([ True, True, True, False, False]) - + """) add_newdoc('numpy._core.umath', 'islower', @@ -4477,7 +4599,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4492,6 +4614,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.islower("GHC") array(False) >>> np.strings.islower("ghc") @@ -4507,7 +4630,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4522,8 +4645,9 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isupper("GHC") - array(True) + array(True) >>> a = np.array(["hello", "HELLO", "Hello"]) >>> np.strings.isupper(a) array([False, True, False]) @@ -4537,7 +4661,7 @@ def add_newdoc(place, name, doc): Parameters ---------- - x : array_like, with `np.bytes_` or `np.str_` dtype + x : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype $PARAMS Returns @@ -4552,12 +4676,13 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.istitle("Numpy Is Great") array(True) >>> np.strings.istitle("Numpy is great") array(False) - + """) add_newdoc('numpy._core.umath', 'isdecimal', @@ -4586,6 +4711,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isdecimal(['12345', '4.99', '123ABC', '']) array([ True, False, False, False]) @@ -4617,6 +4743,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> np.strings.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII']) array([ True, False, False, False, False]) @@ -4653,6 +4780,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(["NumPy is a Python library"]) >>> np.strings.find(a, "Python", 0, None) array([11]) @@ -4721,6 +4849,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.index(a, "Science") array([9]) @@ -4793,6 +4923,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science") array([9]) @@ -4866,6 +4997,7 @@ def add_newdoc(place, name, doc): Examples -------- + >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> import numpy as np + >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) + array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='>> np.strings.center(c, width=9, fillchar='*') + array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) + array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> import numpy as np + >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.ljust(c, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rjust(a, width=3) + array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> import numpy as np + >>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype='>> import numpy as np + + The ufunc is used most easily via ``np.strings.partition``, + which calls it after calculating the indices:: + + >>> x = np.array(["Numpy is nice!"]) + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype='>> import numpy as np + + The ufunc is used most easily via ``np.strings.rpartition``, + which calls it after calculating the indices:: + + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype='>> import numpy as np + + The ufunc is used most easily via ``np.strings.partition``, + which calls it under the hood:: + + >>> x = np.array(["Numpy is nice!"], dtype="T") + >>> np.strings.partition(x, " ") + (array(['Numpy'], dtype=StringDType()), + array([' '], dtype=StringDType()), + array(['is nice!'], dtype=StringDType())) + + """) + +add_newdoc('numpy._core.umath', '_rpartition', + """ + Partition each element in ``x1`` around the right-most separator, + ``x2``. + + For each element in ``x1``, split the element at the last + occurrence of ``x2`` at location ``x3``, and return a 3-tuple + containing the part before the separator, the separator itself, + and the part after the separator. If the separator is not found, + the third item of the tuple will contain the whole string, and + the first and second ones will be the empty string. + + Parameters + ---------- + x1 : array-like, with ``StringDType`` dtype + Input array + x2 : array-like, with ``StringDType`` dtype + Separator to split each string element in ``x1``. + + Returns + ------- + out : 3-tuple: + - ``StringDType`` array with the part before the separator + - ``StringDType`` array with the separator + - ``StringDType`` array with the part after the separator + + See Also + -------- + str.rpartition + + Examples + -------- + >>> import numpy as np + + The ufunc is used most easily via ``np.strings.rpartition``, + which calls it after calculating the indices:: + + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'], dtype="T") + >>> np.strings.rpartition(a, 'A') + (array(['aAaAa', ' a', 'abB'], dtype=StringDType()), + array(['A', 'A', 'A'], dtype=StringDType()), + array(['', ' ', 'Bba'], dtype=StringDType())) + + """) diff --git a/numpy/_core/config.h.in b/numpy/_core/config.h.in index 7ef169c44427..7625615270a2 100644 --- a/numpy/_core/config.h.in +++ b/numpy/_core/config.h.in @@ -8,6 +8,8 @@ #mesondefine HAVE_FSEEKO #mesondefine HAVE_FALLOCATE #mesondefine HAVE_STRTOLD_L +#mesondefine HAVE_THREAD_LOCAL +#mesondefine HAVE__THREAD_LOCAL #mesondefine HAVE__THREAD #mesondefine HAVE___DECLSPEC_THREAD_ @@ -106,6 +108,8 @@ #mesondefine HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_LE #mesondefine HAVE_LDOUBLE_IBM_DOUBLE_DOUBLE_BE +#mesondefine HAVE_EXTERNAL_LAPACK + #ifndef __cplusplus /* #undef inline */ #endif diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index 96dec7543101..6301556aaaa9 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -17,13 +17,24 @@ """ import functools +import numpy as np from .._utils import set_module from .numerictypes import bytes_, str_, character from .numeric import ndarray, array as narray, asarray as asnarray from numpy._core.multiarray import compare_chararrays from numpy._core import overrides from numpy.strings import * -from numpy.strings import multiply as strings_multiply +from numpy.strings import ( + multiply as strings_multiply, + partition as strings_partition, + rpartition as strings_rpartition, +) +from numpy._core.strings import ( + _split as split, + _rsplit as rsplit, + _splitlines as splitlines, + _join as join, +) __all__ = [ 'equal', 'not_equal', 'greater_equal', 'less_equal', @@ -67,10 +78,11 @@ def equal(x1, x2): Examples -------- + >>> import numpy as np >>> y = "aa " >>> x = "aa" >>> np.char.equal(x, y) - array(True) + array(True) See Also -------- @@ -104,10 +116,11 @@ def not_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.not_equal(x1, 'b') array([ True, False, True]) - + """ return compare_chararrays(x1, x2, '!=', True) @@ -138,10 +151,11 @@ def greater_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater_equal(x1, 'b') array([False, True, True]) - + """ return compare_chararrays(x1, x2, '>=', True) @@ -171,10 +185,11 @@ def less_equal(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less_equal(x1, 'b') array([ True, True, False]) - + """ return compare_chararrays(x1, x2, '<=', True) @@ -204,10 +219,11 @@ def greater(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.greater(x1, 'b') array([False, False, True]) - + """ return compare_chararrays(x1, x2, '>', True) @@ -237,10 +253,11 @@ def less(x1, x2): Examples -------- + >>> import numpy as np >>> x1 = np.array(['a', 'b', 'c']) >>> np.char.less(x1, 'b') array([True, False, False]) - + """ return compare_chararrays(x1, x2, '<', True) @@ -272,6 +289,7 @@ def multiply(a, i): Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "c"]) >>> np.strings.multiply(a, 3) array(['aaa', 'bbb', 'ccc'], dtype='>> import numpy as np + >>> x = np.array(["Numpy is nice!"]) + >>> np.char.partition(x, " ") + array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np + >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) + >>> np.char.rpartition(a, 'A') + array([['aAaAa', 'A', ''], + [' a', 'A', ' '], + ['abB', 'A', 'Bba']], dtype='>> import numpy as np >>> charar = np.char.chararray((3, 3)) >>> charar[:] = 'a' >>> charar @@ -479,19 +582,13 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): def __array_finalize__(self, obj): # The b is a special case because it is used for reconstructing. - if self.dtype.char not in 'SUbc': + if self.dtype.char not in 'VSUbc': raise ValueError("Can only create a chararray from string data.") def __getitem__(self, obj): val = ndarray.__getitem__(self, obj) - if isinstance(val, character): - temp = val.rstrip() - if len(temp) == 0: - val = '' - else: - val = temp - + return val.rstrip() return val # IMPLEMENTATION NOTE: Most of the methods of this class are @@ -1296,9 +1393,10 @@ class adds the following functionality: Examples -------- + >>> import numpy as np >>> np.char.asarray(['hello', 'world']) chararray(['hello', 'world'], dtype=' _CharArray[bytes_]: ... def zfill(self, width: _ArrayLikeInt_co) -> chararray[Any, _CharDType]: ... - def capitalize(self) -> chararray[_ShapeType, _CharDType]: ... - def title(self) -> chararray[_ShapeType, _CharDType]: ... - def swapcase(self) -> chararray[_ShapeType, _CharDType]: ... - def lower(self) -> chararray[_ShapeType, _CharDType]: ... - def upper(self) -> chararray[_ShapeType, _CharDType]: ... - def isalnum(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isalpha(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isdigit(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def islower(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isspace(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def istitle(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isupper(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isnumeric(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... - def isdecimal(self) -> ndarray[_ShapeType, dtype[np.bool]]: ... + def capitalize(self) -> chararray[_ShapeType_co, _CharDType]: ... + def title(self) -> chararray[_ShapeType_co, _CharDType]: ... + def swapcase(self) -> chararray[_ShapeType_co, _CharDType]: ... + def lower(self) -> chararray[_ShapeType_co, _CharDType]: ... + def upper(self) -> chararray[_ShapeType_co, _CharDType]: ... + def isalnum(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isalpha(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isdigit(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def islower(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isspace(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def istitle(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isupper(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isnumeric(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... + def isdecimal(self) -> ndarray[_ShapeType_co, dtype[np.bool]]: ... __all__: list[str] diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 0ef50471b9c4..a3d8712764e0 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -21,8 +21,8 @@ __all__ = [ 'all', 'amax', 'amin', 'any', 'argmax', 'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', - 'compress', 'cumprod', 'cumsum', 'diagonal', 'mean', - 'max', 'min', 'matrix_transpose', + 'compress', 'cumprod', 'cumsum', 'cumulative_prod', 'cumulative_sum', + 'diagonal', 'mean', 'max', 'min', 'matrix_transpose', 'ndim', 'nonzero', 'partition', 'prod', 'ptp', 'put', 'ravel', 'repeat', 'reshape', 'resize', 'round', 'searchsorted', 'shape', 'size', 'sort', 'squeeze', @@ -186,6 +186,7 @@ def take(a, indices, axis=None, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> a = [4, 3, 5, 7, 6, 8] >>> indices = [0, 1, 4] >>> np.take(a, indices) @@ -206,13 +207,13 @@ def take(a, indices, axis=None, out=None, mode='raise'): return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) -def _reshape_dispatcher(a, newshape, order=None): +def _reshape_dispatcher(a, /, shape=None, order=None, *, newshape=None, + copy=None): return (a,) -# not deprecated --- copy if necessary, view otherwise @array_function_dispatch(_reshape_dispatcher) -def reshape(a, newshape, order='C'): +def reshape(a, /, shape=None, order='C', *, newshape=None, copy=None): """ Gives a new shape to an array without changing its data. @@ -220,14 +221,14 @@ def reshape(a, newshape, order='C'): ---------- a : array_like Array to be reshaped. - newshape : int or tuple of ints + shape : int or tuple of ints The new shape should be compatible with the original shape. If an integer, then the result will be a 1-D array of that length. One shape dimension can be -1. In this case, the value is inferred from the length of the array and remaining dimensions. order : {'C', 'F', 'A'}, optional - Read the elements of `a` using this index order, and place the - elements into the reshaped array using this index order. 'C' + Read the elements of ``a`` using this index order, and place the + elements into the reshaped array using this index order. 'C' means to read / write the elements using C-like index order, with the last axis index changing fastest, back to the first axis index changing slowest. 'F' means to read / write the @@ -236,8 +237,16 @@ def reshape(a, newshape, order='C'): the 'C' and 'F' options take no account of the memory layout of the underlying array, and only refer to the order of indexing. 'A' means to read / write the elements in Fortran-like index - order if `a` is Fortran *contiguous* in memory, C-like order + order if ``a`` is Fortran *contiguous* in memory, C-like order otherwise. + newshape : int or tuple of ints + .. deprecated:: 2.1 + Replaced by ``shape`` argument. Retained for backward + compatibility. + copy : bool, optional + If ``True``, then the array data is copied. If ``None``, a copy will + only be made if it's required by ``order``. For ``False`` it raises + a ``ValueError`` if a copy cannot be avoided. Default: ``None``. Returns ------- @@ -255,9 +264,9 @@ def reshape(a, newshape, order='C'): It is not always possible to change the shape of an array without copying the data. - The `order` keyword gives the index ordering both for *fetching* the values - from `a`, and then *placing* the values into the output array. - For example, let's say you have an array: + The ``order`` keyword gives the index ordering both for *fetching* + the values from ``a``, and then *placing* the values into the output + array. For example, let's say you have an array: >>> a = np.arange(6).reshape((3, 2)) >>> a @@ -285,6 +294,7 @@ def reshape(a, newshape, order='C'): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.reshape(a, 6) array([1, 2, 3, 4, 5, 6]) @@ -296,7 +306,26 @@ def reshape(a, newshape, order='C'): [3, 4], [5, 6]]) """ - return _wrapfunc(a, 'reshape', newshape, order=order) + if newshape is None and shape is None: + raise TypeError( + "reshape() missing 1 required positional argument: 'shape'") + if newshape is not None: + if shape is not None: + raise TypeError( + "You cannot specify 'newshape' and 'shape' arguments " + "at the same time.") + # Deprecated in NumPy 2.1, 2024-04-18 + warnings.warn( + "`newshape` keyword argument is deprecated, " + "use `shape=...` or pass shape positionally instead. " + "(deprecated in NumPy 2.1)", + DeprecationWarning, + stacklevel=2, + ) + shape = newshape + if copy is not None: + return _wrapfunc(a, 'reshape', shape, order=order, copy=copy) + return _wrapfunc(a, 'reshape', shape, order=order) def _choose_dispatcher(a, choices, out=None, mode=None): @@ -389,6 +418,7 @@ def choose(a, choices, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], ... [20, 21, 22, 23], [30, 31, 32, 33]] >>> np.choose([2, 3, 1, 0], choices @@ -463,6 +493,7 @@ def repeat(a, repeats, axis=None): Examples -------- + >>> import numpy as np >>> np.repeat(3, 4) array([3, 3, 3, 3]) >>> x = np.array([[1,2],[3,4]]) @@ -524,6 +555,7 @@ def put(a, ind, v, mode='raise'): Examples -------- + >>> import numpy as np >>> a = np.arange(5) >>> np.put(a, [0, 2], [-44, -55]) >>> a @@ -572,6 +604,7 @@ def swapaxes(a, axis1, axis2): Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> np.swapaxes(x,0,1) array([[1], @@ -643,6 +676,7 @@ def transpose(a, axes=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> a array([[1, 2], @@ -695,6 +729,19 @@ def matrix_transpose(x, /): -------- transpose : Generic transpose method. + Examples + -------- + >>> import numpy as np + >>> np.matrix_transpose([[1, 2], [3, 4]]) + array([[1, 3], + [2, 4]]) + + >>> np.matrix_transpose([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) + array([[[1, 3], + [2, 4]], + [[5, 7], + [6, 8]]]) + """ x = asanyarray(x) if x.ndim < 2: @@ -784,8 +831,11 @@ def partition(a, kth, axis=-1, kind='introselect', order=None): the real parts except when they are equal, in which case the order is determined by the imaginary parts. + The sort order of ``np.nan`` is bigger than ``np.inf``. + Examples -------- + >>> import numpy as np >>> a = np.array([7, 1, 7, 7, 1, 5, 7, 2, 3, 2, 6, 2, 3, 0]) >>> p = np.partition(a, 4) >>> p @@ -879,12 +929,20 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None): Notes ----- - See `partition` for notes on the different selection algorithms. + The returned indices are not guaranteed to be sorted according to + the values. Furthermore, the default selection algorithm ``introselect`` + is unstable, and hence the returned indices are not guaranteed + to be the earliest/latest occurrence of the element. + + `argpartition` works for real/complex inputs with nan values, + see `partition` for notes on the enhanced sort order and + different selection algorithms. Examples -------- One dimensional array: + >>> import numpy as np >>> x = np.array([3, 4, 2, 1]) >>> x[np.argpartition(x, 3)] array([2, 1, 3, 4]) # may vary @@ -1038,6 +1096,7 @@ def sort(a, axis=-1, kind=None, order=None, *, stable=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,4],[3,1]]) >>> np.sort(a) # sort along the last axis array([[1, 4], @@ -1148,6 +1207,7 @@ def argsort(a, axis=-1, kind=None, order=None, *, stable=None): -------- One dimensional array: + >>> import numpy as np >>> x = np.array([3, 1, 2]) >>> np.argsort(x) array([1, 2, 0]) @@ -1250,6 +1310,7 @@ def argmax(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10, 11, 12], @@ -1347,6 +1408,7 @@ def argmin(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10, 11, 12], @@ -1460,6 +1522,7 @@ def searchsorted(a, v, side='left', sorter=None): Examples -------- + >>> import numpy as np >>> np.searchsorted([11,12,13,14,15], 13) 2 >>> np.searchsorted([11,12,13,14,15], 13, side='right') @@ -1523,7 +1586,8 @@ def resize(a, new_shape): Examples -------- - >>> a=np.array([[0,1],[2,3]]) + >>> import numpy as np + >>> a = np.array([[0,1],[2,3]]) >>> np.resize(a,(2,3)) array([[0, 1, 2], [3, 0, 1]]) @@ -1597,6 +1661,7 @@ def squeeze(a, axis=None): Examples -------- + >>> import numpy as np >>> x = np.array([[[0], [1], [2]]]) >>> x.shape (1, 3, 1) @@ -1710,6 +1775,7 @@ def diagonal(a, offset=0, axis1=0, axis2=1): Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape(2,2) >>> a array([[0, 1], @@ -1817,6 +1883,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.trace(np.eye(3)) 3.0 >>> a = np.arange(8).reshape((2,2,2)) @@ -1908,6 +1975,7 @@ def ravel(a, order='C'): -------- It is equivalent to ``reshape(-1, order=order)``. + >>> import numpy as np >>> x = np.array([[1, 2, 3], [4, 5, 6]]) >>> np.ravel(x) array([1, 2, 3, 4, 5, 6]) @@ -2006,6 +2074,7 @@ def nonzero(a): Examples -------- + >>> import numpy as np >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) >>> x array([[3, 0, 0], @@ -2079,6 +2148,7 @@ def shape(a): Examples -------- + >>> import numpy as np >>> np.shape(np.eye(3)) (3, 3) >>> np.shape([[1, 3]]) @@ -2146,6 +2216,7 @@ def compress(condition, a, axis=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], @@ -2171,12 +2242,14 @@ def compress(condition, a, axis=None, out=None): return _wrapfunc(a, 'compress', condition, axis=axis, out=out) -def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs): - return (a, a_min, a_max) +def _clip_dispatcher(a, a_min=None, a_max=None, out=None, *, min=None, + max=None, **kwargs): + return (a, a_min, a_max, out, min, max) @array_function_dispatch(_clip_dispatcher) -def clip(a, a_min, a_max, out=None, **kwargs): +def clip(a, a_min=np._NoValue, a_max=np._NoValue, out=None, *, + min=np._NoValue, max=np._NoValue, **kwargs): """ Clip (limit) the values in an array. @@ -2195,12 +2268,19 @@ def clip(a, a_min, a_max, out=None, **kwargs): Array containing elements to clip. a_min, a_max : array_like or None Minimum and maximum value. If ``None``, clipping is not performed on - the corresponding edge. Only one of `a_min` and `a_max` may be - ``None``. Both are broadcast against `a`. + the corresponding edge. If both ``a_min`` and ``a_max`` are ``None``, + the elements of the returned array stay the same. Both are broadcasted + against ``a``. out : ndarray, optional The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. + min, max : array_like or None + Array API compatible alternatives for ``a_min`` and ``a_max`` + arguments. Either ``a_min`` and ``a_max`` or ``min`` and ``max`` + can be passed at the same time. Default: ``None``. + + .. versionadded:: 2.1.0 **kwargs For other keyword-only arguments, see the :ref:`ufunc docs `. @@ -2226,6 +2306,7 @@ def clip(a, a_min, a_max, out=None, **kwargs): Examples -------- + >>> import numpy as np >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -2244,6 +2325,19 @@ def clip(a, a_min, a_max, out=None, **kwargs): array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8]) """ + if a_min is np._NoValue and a_max is np._NoValue: + a_min = None if min is np._NoValue else min + a_max = None if max is np._NoValue else max + elif a_min is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_min'") + elif a_max is np._NoValue: + raise TypeError("clip() missing 1 required positional " + "argument: 'a_max'") + elif min is not np._NoValue or max is not np._NoValue: + raise ValueError("Passing `min` or `max` keyword argument when " + "`a_min` and `a_max` are provided is forbidden.") + return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs) @@ -2316,6 +2410,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, ndarray.sum : Equivalent method. add: ``numpy.add.reduce`` equivalent function. cumsum : Cumulative sum of array elements. + trapezoid : Integration of array values using composite trapezoidal rule. mean, average @@ -2348,6 +2443,7 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.sum([0.5, 1.5]) 2.0 >>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32) @@ -2466,11 +2562,13 @@ def any(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Examples -------- + >>> import numpy as np >>> np.any([[True, False], [True, True]]) True - >>> np.any([[True, False], [False, False]], axis=0) - array([ True, False]) + >>> np.any([[True, False, True ], + ... [False, False, False]], axis=0) + array([ True, False, True]) >>> np.any([-1, 0, 5]) True @@ -2577,6 +2675,7 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): Examples -------- + >>> import numpy as np >>> np.all([[True,False],[True,True]]) False @@ -2602,6 +2701,202 @@ def all(a, axis=None, out=None, keepdims=np._NoValue, *, where=np._NoValue): keepdims=keepdims, where=where) +def _cumulative_func(x, func, axis, dtype, out, include_initial): + x = np.atleast_1d(x) + x_ndim = x.ndim + if axis is None: + if x_ndim >= 2: + raise ValueError("For arrays which have more than one dimension " + "``axis`` argument is required.") + axis = 0 + + if out is not None and include_initial: + item = [slice(None)] * x_ndim + item[axis] = slice(1, None) + func.accumulate(x, axis=axis, dtype=dtype, out=out[tuple(item)]) + item[axis] = 0 + out[tuple(item)] = func.identity + return out + + res = func.accumulate(x, axis=axis, dtype=dtype, out=out) + if include_initial: + initial_shape = list(x.shape) + initial_shape[axis] = 1 + res = np.concat( + [np.full_like(res, func.identity, shape=initial_shape), res], + axis=axis, + ) + + return res + + +def _cumulative_prod_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_prod_dispatcher) +def cumulative_prod(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative product of elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumprod`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative product is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array, as well as of the accumulator in which + the elements are multiplied. If ``dtype`` is not specified, it + defaults to the dtype of ``x``, unless ``x`` has an integer dtype + with a precision less than that of the default platform integer. + In that case, the default platform integer is used instead. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type of the resulting values will be cast if necessary. + See :ref:`ufuncs-output-type` for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_prod_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + Examples + -------- + >>> a = np.array([1, 2, 3]) + >>> np.cumulative_prod(a) # intermediate results 1, 1*2 + ... # total product 1*2*3 = 6 + array([1, 2, 6]) + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_prod(a, dtype=float) # specify type of output + array([ 1., 2., 6., 24., 120., 720.]) + + The cumulative product for each column (i.e., over the rows) of ``b``: + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_prod(b, axis=0) + array([[ 1, 2, 3], + [ 4, 10, 18]]) + + The cumulative product for each row (i.e. over the columns) of ``b``: + + >>> np.cumulative_prod(b, axis=1) + array([[ 1, 2, 6], + [ 4, 20, 120]]) + + """ + return _cumulative_func(x, um.multiply, axis, dtype, out, include_initial) + + +def _cumulative_sum_dispatcher(x, /, *, axis=None, dtype=None, out=None, + include_initial=None): + return (x, out) + + +@array_function_dispatch(_cumulative_sum_dispatcher) +def cumulative_sum(x, /, *, axis=None, dtype=None, out=None, + include_initial=False): + """ + Return the cumulative sum of the elements along a given axis. + + This function is an Array API compatible alternative to `numpy.cumsum`. + + Parameters + ---------- + x : array_like + Input array. + axis : int, optional + Axis along which the cumulative sum is computed. The default + (None) is only allowed for one-dimensional arrays. For arrays + with more than one dimension ``axis`` is required. + dtype : dtype, optional + Type of the returned array and of the accumulator in which the + elements are summed. If ``dtype`` is not specified, it defaults + to the dtype of ``x``, unless ``x`` has an integer dtype with + a precision less than that of the default platform integer. + In that case, the default platform integer is used. + out : ndarray, optional + Alternative output array in which to place the result. It must + have the same shape and buffer length as the expected output + but the type will be cast if necessary. See :ref:`ufuncs-output-type` + for more details. + include_initial : bool, optional + Boolean indicating whether to include the initial value (ones) as + the first value in the output. With ``include_initial=True`` + the shape of the output is different than the shape of the input. + Default: ``False``. + + Returns + ------- + cumulative_sum_along_axis : ndarray + A new array holding the result is returned unless ``out`` is + specified, in which case a reference to ``out`` is returned. The + result has the same shape as ``x`` if ``include_initial=False``. + + See Also + -------- + sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. + diff : Calculate the n-th discrete difference along given axis. + + Notes + ----- + Arithmetic is modular when using integer types, and no error is + raised on overflow. + + ``cumulative_sum(a)[-1]`` may not be equal to ``sum(a)`` for + floating-point values since ``sum`` may use a pairwise summation routine, + reducing the roundoff-error. See `sum` for more information. + + Examples + -------- + >>> a = np.array([1, 2, 3, 4, 5, 6]) + >>> a + array([1, 2, 3, 4, 5, 6]) + >>> np.cumulative_sum(a) + array([ 1, 3, 6, 10, 15, 21]) + >>> np.cumulative_sum(a, dtype=float) # specifies type of output value(s) + array([ 1., 3., 6., 10., 15., 21.]) + + >>> b = np.array([[1, 2, 3], [4, 5, 6]]) + >>> np.cumulative_sum(b,axis=0) # sum over rows for each of the 3 columns + array([[1, 2, 3], + [5, 7, 9]]) + >>> np.cumulative_sum(b,axis=1) # sum over columns for each of the 2 rows + array([[ 1, 3, 6], + [ 4, 9, 15]]) + + ``cumulative_sum(c)[-1]`` may not be equal to ``sum(c)`` + + >>> c = np.array([1, 2e-9, 3e-9] * 1000000) + >>> np.cumulative_sum(c)[-1] + 1000000.0050045159 + >>> c.sum() + 1000000.0050000029 + + """ + return _cumulative_func(x, um.add, axis, dtype, out, include_initial) + + def _cumsum_dispatcher(a, axis=None, dtype=None, out=None): return (a, out) @@ -2640,7 +2935,9 @@ def cumsum(a, axis=None, dtype=None, out=None): See Also -------- + cumulative_sum : Array API compatible alternative for ``cumsum``. sum : Sum array elements. + trapezoid : Integration of array values using composite trapezoidal rule. diff : Calculate the n-th discrete difference along given axis. Notes @@ -2654,6 +2951,7 @@ def cumsum(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3], [4,5,6]]) >>> a array([[1, 2, 3], @@ -2738,6 +3036,7 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.array([[4, 9, 2, 10], ... [6, 9, 7, 12]]) @@ -2860,6 +3159,7 @@ def max(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], @@ -3003,6 +3303,7 @@ def min(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.arange(4).reshape((2,2)) >>> a array([[0, 1], @@ -3148,6 +3449,7 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, -------- By default, calculate the product of all elements: + >>> import numpy as np >>> np.prod([1.,2.]) 2.0 @@ -3227,6 +3529,7 @@ def cumprod(a, axis=None, dtype=None, out=None): See Also -------- + cumulative_prod : Array API compatible alternative for ``cumprod``. :ref:`ufuncs-output-type` Notes @@ -3236,6 +3539,7 @@ def cumprod(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.array([1,2,3]) >>> np.cumprod(a) # intermediate results 1, 1*2 ... # total product 1*2*3 = 6 @@ -3288,6 +3592,7 @@ def ndim(a): Examples -------- + >>> import numpy as np >>> np.ndim([[1,2,3],[4,5,6]]) 2 >>> np.ndim(np.array([[1,2,3],[4,5,6]])) @@ -3332,6 +3637,7 @@ def size(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1,2,3],[4,5,6]]) >>> np.size(a) 6 @@ -3436,6 +3742,7 @@ def round(a, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> np.round([0.37, 1.64]) array([0., 2.]) >>> np.round([0.37, 1.64], decimals=1) @@ -3550,6 +3857,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.mean(a) 2.5 @@ -3730,6 +4038,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) 1.1180339887498949 # may vary @@ -3932,6 +4241,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, *, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) 1.25 diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 440c0a046890..08e791789c82 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -92,14 +92,22 @@ def take( @overload def reshape( a: _ArrayLike[_SCT], - newshape: _ShapeLike, + /, + shape: _ShapeLike = ..., order: _OrderACF = ..., + *, + newshape: _ShapeLike = ..., + copy: None | bool = ..., ) -> NDArray[_SCT]: ... @overload def reshape( a: ArrayLike, - newshape: _ShapeLike, + /, + shape: _ShapeLike = ..., order: _OrderACF = ..., + *, + newshape: _ShapeLike = ..., + copy: None | bool = ..., ) -> NDArray[Any]: ... @overload @@ -397,6 +405,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -411,6 +421,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -425,6 +437,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -439,6 +453,8 @@ def clip( a_max: None | ArrayLike, out: None = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: None = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -453,6 +469,8 @@ def clip( a_max: None | ArrayLike, out: _ArrayType = ..., *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: DTypeLike, where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -467,6 +485,8 @@ def clip( a_max: None | ArrayLike, out: _ArrayType, *, + min: None | ArrayLike = ..., + max: None | ArrayLike = ..., dtype: DTypeLike = ..., where: None | _ArrayLikeBool_co = ..., order: _OrderKACF = ..., @@ -598,6 +618,57 @@ def cumsum( out: _ArrayType = ..., ) -> _ArrayType: ... +@overload +def cumulative_sum( + x: _ArrayLike[_SCT], + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_sum( + x: ArrayLike, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + include_initial: bool = ..., +) -> _ArrayType: ... + @overload def ptp( a: _ArrayLike[_SCT], @@ -838,6 +909,97 @@ def cumprod( out: _ArrayType = ..., ) -> _ArrayType: ... +@overload +def cumulative_prod( + x: _ArrayLikeBool_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeUInt_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[uint64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeInt_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[int64]: ... +@overload +def cumulative_prod( + x: _ArrayLikeFloat_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[floating[Any]]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[complexfloating[Any, Any]]: ... +@overload +def cumulative_prod( + x: _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: None = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[object_]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: _DTypeLike[_SCT] = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[_SCT]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: None = ..., + include_initial: bool = ..., +) -> NDArray[Any]: ... +@overload +def cumulative_prod( + x: _ArrayLikeComplex_co | _ArrayLikeObject_co, + /, + *, + axis: None | SupportsIndex = ..., + dtype: DTypeLike = ..., + out: _ArrayType = ..., + include_initial: bool = ..., +) -> _ArrayType: ... + def ndim(a: ArrayLike) -> int: ... def size(a: ArrayLike, axis: None | int = ...) -> int: ... diff --git a/numpy/_core/function_base.py b/numpy/_core/function_base.py index 898bc0e309ce..0e98196f2922 100644 --- a/numpy/_core/function_base.py +++ b/numpy/_core/function_base.py @@ -101,6 +101,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, Examples -------- + >>> import numpy as np >>> np.linspace(2.0, 3.0, num=5) array([2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) @@ -272,6 +273,7 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, Examples -------- + >>> import numpy as np >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.443469 , 464.15888336, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) @@ -378,6 +380,7 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Examples -------- + >>> import numpy as np >>> np.geomspace(1, 1000, num=4) array([ 1., 10., 100., 1000.]) >>> np.geomspace(1, 1000, num=3, endpoint=False) diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index c582a79e5fb2..669dfc71e298 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -146,10 +146,12 @@ def _float_to_str(self, value): title = _title_fmt.format('half'))} # Key to identify the floating point type. Key is result of -# ftype('-0.1').newbyteorder('<').tobytes() # -# 20230201 - use (ftype(-1.0) / ftype(10.0)).newbyteorder('<').tobytes() -# instead because stold may have deficiencies on some platforms. +# ftype = np.longdouble # or float64, float32, etc. +# v = (ftype(-1.0) / ftype(10.0)) +# v.view(v.dtype.newbyteorder('<')).tobytes() +# +# Uses division to work around deficiencies in strtold on some platforms. # See: # https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure @@ -475,6 +477,7 @@ class finfo: Examples -------- + >>> import numpy as np >>> np.finfo(np.float64).dtype dtype('float64') >>> np.finfo(np.complex64).dtype @@ -661,6 +664,7 @@ class iinfo: -------- With types: + >>> import numpy as np >>> ii16 = np.iinfo(np.int16) >>> ii16.min -32768 diff --git a/numpy/_core/include/numpy/_numpyconfig.h.in b/numpy/_core/include/numpy/_numpyconfig.h.in index 0491877e3164..79b2ee3449a5 100644 --- a/numpy/_core/include/numpy/_numpyconfig.h.in +++ b/numpy/_core/include/numpy/_numpyconfig.h.in @@ -17,9 +17,11 @@ #mesondefine NPY_SIZEOF_PY_LONG_LONG #mesondefine NPY_SIZEOF_LONGLONG -#mesondefine NPY_USE_C99_FORMATS - -#mesondefine NPY_NO_SIGNAL +/* + * Defined to 1 or 0. Note that Pyodide hardcodes NPY_NO_SMP (and other defines + * in this header) for better cross-compilation, so don't rename them without a + * good reason. + */ #mesondefine NPY_NO_SMP #mesondefine NPY_VISIBILITY_HIDDEN diff --git a/numpy/_core/include/numpy/_public_dtype_api_table.h b/numpy/_core/include/numpy/_public_dtype_api_table.h index 5fbbdd785e4e..51f390540627 100644 --- a/numpy/_core/include/numpy/_public_dtype_api_table.h +++ b/numpy/_core/include/numpy/_public_dtype_api_table.h @@ -4,6 +4,9 @@ * * These definitions are only relevant for the public API and we reserve * the slots 320-360 in the API table generation for this (currently). + * + * TODO: This file should be consolidated with the API table generation + * (although not sure the current generation is worth preserving). */ #ifndef NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ #define NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ @@ -61,17 +64,21 @@ /* Object/Void */ #define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33]) #define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34]) -/* Abstract */ -#define PyArray_PyIntAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) -#define PyArray_PyFloatAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) -#define PyArray_PyComplexAbstractDType \ - (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Python types (used as markers for scalars) */ +#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35]) +#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36]) +#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37]) +/* Default integer type */ #define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38]) /* New non-legacy DTypes follow in the order they were added */ #define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39]) -/* NOTE: offset 40 is free, after that a new range will need to be used */ + +/* NOTE: offset 40 is free */ + +/* Need to start with a larger offset again for the abstract classes: */ +#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366]) +#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367]) +#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368]) #endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */ diff --git a/numpy/_core/include/numpy/dtype_api.h b/numpy/_core/include/numpy/dtype_api.h index f21d0e6558f3..9dd3effa3a80 100644 --- a/numpy/_core/include/numpy/dtype_api.h +++ b/numpy/_core/include/numpy/dtype_api.h @@ -106,7 +106,7 @@ typedef struct PyArrayMethod_Context_tag { struct PyArrayMethodObject_tag *method; /* Operand descriptors, filled in by resolve_descriptors */ - PyArray_Descr **descriptors; + PyArray_Descr *const *descriptors; /* Structure may grow (this is harmless for DType authors) */ } PyArrayMethod_Context; @@ -159,9 +159,9 @@ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( /* "method" is currently opaque (necessary e.g. to wrap Python) */ struct PyArrayMethodObject_tag *method, /* DTypes the method was created for */ - PyArray_DTypeMeta **dtypes, + PyArray_DTypeMeta *const *dtypes, /* Input descriptors (instances). Outputs may be NULL. */ - PyArray_Descr **given_descrs, + PyArray_Descr *const *given_descrs, /* Exact loop descriptors to use, must not hold references on error */ PyArray_Descr **loop_descrs, npy_intp *view_offset); @@ -177,9 +177,9 @@ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)( */ typedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)( struct PyArrayMethodObject_tag *method, - PyArray_DTypeMeta **dtypes, + PyArray_DTypeMeta *const *dtypes, /* Unlike above, these can have any DType and we may allow NULL. */ - PyArray_Descr **given_descrs, + PyArray_Descr *const *given_descrs, /* * Input scalars or NULL. Only ever passed for python scalars. * WARNING: In some cases, a loop may be explicitly selected and the @@ -227,7 +227,7 @@ typedef int (PyArrayMethod_GetLoop)( */ typedef int (PyArrayMethod_GetReductionInitial)( PyArrayMethod_Context *context, npy_bool reduction_is_empty, - char *initial); + void *initial); /* * The following functions are only used by the wrapping array method defined @@ -256,8 +256,8 @@ typedef int (PyArrayMethod_GetReductionInitial)( * `resolve_descriptors`, so that it can be filled there if not NULL.) */ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, - PyArray_DTypeMeta *wrapped_dtypes[], - PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]); + PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]); /** * The function to convert the actual loop descriptors (as returned by the @@ -278,7 +278,7 @@ typedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout, * @returns 0 on success, -1 on failure. */ typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, - PyArray_DTypeMeta *new_dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]); @@ -303,7 +303,7 @@ typedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout, * */ typedef int (PyArrayMethod_TraverseLoop)( - void *traverse_context, PyArray_Descr *descr, char *data, + void *traverse_context, const PyArray_Descr *descr, char *data, npy_intp size, npy_intp stride, NpyAuxData *auxdata); @@ -317,7 +317,7 @@ typedef int (PyArrayMethod_TraverseLoop)( * */ typedef int (PyArrayMethod_GetTraverseLoop)( - void *traverse_context, PyArray_Descr *descr, + void *traverse_context, const PyArray_Descr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -334,7 +334,7 @@ typedef int (PyArrayMethod_GetTraverseLoop)( * (There are potential use-cases, these are currently unsupported.) */ typedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc, - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]); /* @@ -449,7 +449,7 @@ typedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)( static inline PyArray_DTypeMeta * NPY_DT_NewRef(PyArray_DTypeMeta *o) { - Py_INCREF(o); + Py_INCREF((PyObject *)o); return o; } diff --git a/numpy/_core/include/numpy/ndarraytypes.h b/numpy/_core/include/numpy/ndarraytypes.h index 31aa3e4d330e..573f26938d87 100644 --- a/numpy/_core/include/numpy/ndarraytypes.h +++ b/numpy/_core/include/numpy/ndarraytypes.h @@ -8,8 +8,8 @@ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN -/* Only use thread if configured in config and python supports it */ -#if defined WITH_THREAD && !NPY_NO_SMP +/* Always allow threading unless it was explicitly disabled at build time */ +#if !NPY_NO_SMP #define NPY_ALLOW_THREADS 1 #else #define NPY_ALLOW_THREADS 0 @@ -1298,9 +1298,16 @@ typedef struct { * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS * to be runtime dependent. */ -#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD - PyArrayIterObject *iters[64]; /* 64 is NPY_MAXARGS */ -#else /* not internal build */ +#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) + PyArrayIterObject *iters[64]; +#elif defined(__cplusplus) + /* + * C++ doesn't strictly support flexible members and gives compilers + * warnings (pedantic only), so we lie. We can't make it 64 because + * then Cython is unhappy (larger struct at runtime is OK smaller not). + */ + PyArrayIterObject *iters[32]; +#else PyArrayIterObject *iters[]; #endif } PyArrayMultiIterObject; diff --git a/numpy/_core/include/numpy/npy_2_compat.h b/numpy/_core/include/numpy/npy_2_compat.h index 1d6d512f95b5..e39e65aedea7 100644 --- a/numpy/_core/include/numpy/npy_2_compat.h +++ b/numpy/_core/include/numpy/npy_2_compat.h @@ -46,14 +46,14 @@ #error "The NumPy 2 compat header requires `import_array()` for which " \ "the `ndarraytypes.h` header include is not sufficient. Please " \ "include it after `numpy/ndarrayobject.h` or similar.\n" \ - "To simplify includsion, you may use `PyArray_ImportNumPy()` " \ + "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \ "which is defined in the compat header and is lightweight (can be)." #endif #if NPY_ABI_VERSION < 0x02000000 /* * Define 2.0 feature version as it is needed below to decide whether we - * compile for both 1.x and 2.x (defining it gaurantees 1.x only). + * compile for both 1.x and 2.x (defining it guarantees 1.x only). */ #define NPY_2_0_API_VERSION 0x00000012 /* @@ -74,7 +74,7 @@ #ifdef import_array1 static inline int -PyArray_ImportNumPyAPI() +PyArray_ImportNumPyAPI(void) { if (NPY_UNLIKELY(PyArray_API == NULL)) { import_array1(-1); @@ -125,7 +125,7 @@ PyArray_ImportNumPyAPI() #define NPY_DEFAULT_INT \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG) #define NPY_RAVEL_AXIS \ - (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? -1 : 32) + (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32) #define NPY_MAXARGS \ (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32) #endif @@ -220,19 +220,19 @@ DESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1) #if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return _PyDataType_GetArrFuncs(descr); } #elif NPY_ABI_VERSION < 0x02000000 static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return descr->f; } #else static inline PyArray_ArrFuncs * - PyDataType_GetArrFuncs(PyArray_Descr *descr) + PyDataType_GetArrFuncs(const PyArray_Descr *descr) { if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { return _PyDataType_GetArrFuncs(descr); diff --git a/numpy/_core/include/numpy/npy_3kcompat.h b/numpy/_core/include/numpy/npy_3kcompat.h index 62fde943aacc..c2bf74faf09d 100644 --- a/numpy/_core/include/numpy/npy_3kcompat.h +++ b/numpy/_core/include/numpy/npy_3kcompat.h @@ -5,8 +5,7 @@ * hence the "3k" naming. * * If you want to use this for your own projects, it's recommended to make a - * copy of it. Although the stuff below is unlikely to change, we don't provide - * strong backwards compatibility guarantees at the moment. + * copy of it. We don't provide backwards compatibility guarantees. */ #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ @@ -15,27 +14,13 @@ #include #include -#ifndef NPY_PY3K -#define NPY_PY3K 1 -#endif - -#include "numpy/npy_common.h" -#include "numpy/ndarrayobject.h" +#include "npy_common.h" #ifdef __cplusplus extern "C" { #endif -/* - * PyInt -> PyLong - */ - - -/* - * This is a renamed copy of the Python non-limited API function _PyLong_AsInt. It is - * included here because it is missing from the PyPy API. It completes the PyLong_As* - * group of functions and can be useful in replacing PyInt_Check. - */ +/* Python13 removes _PyLong_AsInt */ static inline int Npy__PyLong_AsInt(PyObject *obj) { @@ -53,128 +38,14 @@ Npy__PyLong_AsInt(PyObject *obj) return (int)result; } +#if defined _MSC_VER && _MSC_VER >= 1900 -#if defined(NPY_PY3K) -/* Return True only if the long fits in a C long */ -static inline int PyInt_Check(PyObject *op) { - int overflow = 0; - if (!PyLong_Check(op)) { - return 0; - } - PyLong_AsLongAndOverflow(op, &overflow); - return (overflow == 0); -} - - -#define PyInt_FromLong PyLong_FromLong -#define PyInt_AsLong PyLong_AsLong -#define PyInt_AS_LONG PyLong_AsLong -#define PyInt_AsSsize_t PyLong_AsSsize_t -#define PyNumber_Int PyNumber_Long - -/* NOTE: - * - * Since the PyLong type is very different from the fixed-range PyInt, - * we don't define PyInt_Type -> PyLong_Type. - */ -#endif /* NPY_PY3K */ - -/* Py3 changes PySlice_GetIndicesEx' first argument's type to PyObject* */ -#ifdef NPY_PY3K -# define NpySlice_GetIndicesEx PySlice_GetIndicesEx -#else -# define NpySlice_GetIndicesEx(op, nop, start, end, step, slicelength) \ - PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength) -#endif - -#if PY_VERSION_HEX < 0x030900a4 - /* Introduced in https://github.com/python/cpython/commit/d2ec81a8c99796b51fb8c49b77a7fe369863226f */ - #define Py_SET_TYPE(obj, type) ((Py_TYPE(obj) = (type)), (void)0) - /* Introduced in https://github.com/python/cpython/commit/b10dc3e7a11fcdb97e285882eba6da92594f90f9 */ - #define Py_SET_SIZE(obj, size) ((Py_SIZE(obj) = (size)), (void)0) - /* Introduced in https://github.com/python/cpython/commit/c86a11221df7e37da389f9c6ce6e47ea22dc44ff */ - #define Py_SET_REFCNT(obj, refcnt) ((Py_REFCNT(obj) = (refcnt)), (void)0) -#endif - - -#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x) - -/* - * PyString -> PyBytes - */ - -#if defined(NPY_PY3K) - -#define PyString_Type PyBytes_Type -#define PyString_Check PyBytes_Check -#define PyStringObject PyBytesObject -#define PyString_FromString PyBytes_FromString -#define PyString_FromStringAndSize PyBytes_FromStringAndSize -#define PyString_AS_STRING PyBytes_AS_STRING -#define PyString_AsStringAndSize PyBytes_AsStringAndSize -#define PyString_FromFormat PyBytes_FromFormat -#define PyString_Concat PyBytes_Concat -#define PyString_ConcatAndDel PyBytes_ConcatAndDel -#define PyString_AsString PyBytes_AsString -#define PyString_GET_SIZE PyBytes_GET_SIZE -#define PyString_Size PyBytes_Size - -#define PyUString_Type PyUnicode_Type -#define PyUString_Check PyUnicode_Check -#define PyUStringObject PyUnicodeObject -#define PyUString_FromString PyUnicode_FromString -#define PyUString_FromStringAndSize PyUnicode_FromStringAndSize -#define PyUString_FromFormat PyUnicode_FromFormat -#define PyUString_Concat PyUnicode_Concat2 -#define PyUString_ConcatAndDel PyUnicode_ConcatAndDel -#define PyUString_GET_SIZE PyUnicode_GET_SIZE -#define PyUString_Size PyUnicode_Size -#define PyUString_InternFromString PyUnicode_InternFromString -#define PyUString_Format PyUnicode_Format - -#define PyBaseString_Check(obj) (PyUnicode_Check(obj)) - -#else - -#define PyBytes_Type PyString_Type -#define PyBytes_Check PyString_Check -#define PyBytesObject PyStringObject -#define PyBytes_FromString PyString_FromString -#define PyBytes_FromStringAndSize PyString_FromStringAndSize -#define PyBytes_AS_STRING PyString_AS_STRING -#define PyBytes_AsStringAndSize PyString_AsStringAndSize -#define PyBytes_FromFormat PyString_FromFormat -#define PyBytes_Concat PyString_Concat -#define PyBytes_ConcatAndDel PyString_ConcatAndDel -#define PyBytes_AsString PyString_AsString -#define PyBytes_GET_SIZE PyString_GET_SIZE -#define PyBytes_Size PyString_Size - -#define PyUString_Type PyString_Type -#define PyUString_Check PyString_Check -#define PyUStringObject PyStringObject -#define PyUString_FromString PyString_FromString -#define PyUString_FromStringAndSize PyString_FromStringAndSize -#define PyUString_FromFormat PyString_FromFormat -#define PyUString_Concat PyString_Concat -#define PyUString_ConcatAndDel PyString_ConcatAndDel -#define PyUString_GET_SIZE PyString_GET_SIZE -#define PyUString_Size PyString_Size -#define PyUString_InternFromString PyString_InternFromString -#define PyUString_Format PyString_Format - -#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj)) - -#endif /* NPY_PY3K */ +#include /* * Macros to protect CRT calls against instant termination when passed an * invalid parameter (https://bugs.python.org/issue23524). */ -#if defined _MSC_VER && _MSC_VER >= 1900 - -#include - extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \ _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler); @@ -187,20 +58,6 @@ extern _invalid_parameter_handler _Py_silent_invalid_parameter_handler; #endif /* _MSC_VER >= 1900 */ - -static inline void -PyUnicode_ConcatAndDel(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); - Py_DECREF(right); -} - -static inline void -PyUnicode_Concat2(PyObject **left, PyObject *right) -{ - Py_SETREF(*left, PyUnicode_Concat(*left, right)); -} - /* * PyFile_* compatibility */ @@ -217,13 +74,6 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos) npy_off_t pos; FILE *handle; - /* For Python 2 PyFileObject, use PyFile_AsFile */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return PyFile_AsFile(file); - } -#endif - /* Flush first to ensure things end up in the file in the correct order */ ret = PyObject_CallMethod(file, "flush", ""); if (ret == NULL) { @@ -335,13 +185,6 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) PyObject *ret, *io, *io_raw; npy_off_t position; - /* For Python 2 PyFileObject, do nothing */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 0; - } -#endif - position = npy_ftell(handle); /* Close the FILE* handle */ @@ -395,24 +238,6 @@ npy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos) return 0; } -static inline int -npy_PyFile_Check(PyObject *file) -{ - int fd; - /* For Python 2, check if it is a PyFileObject */ -#if !defined(NPY_PY3K) - if (PyFile_Check(file)) { - return 1; - } -#endif - fd = PyObject_AsFileDescriptor(file); - if (fd == -1) { - PyErr_Clear(); - return 0; - } - return 1; -} - static inline PyObject* npy_PyFile_OpenFile(PyObject *filename, const char *mode) { @@ -437,8 +262,8 @@ npy_PyFile_CloseFile(PyObject *file) return 0; } - -/* This is a copy of _PyErr_ChainExceptions +/* This is a copy of _PyErr_ChainExceptions, which + * is no longer exported from Python3.12 */ static inline void npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) @@ -447,30 +272,25 @@ npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb) return; if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetContext(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetContext(val2, val); + PyErr_Restore(exc2, val2, tb2); } else { PyErr_Restore(exc, val, tb); } } - /* This is a copy of _PyErr_ChainExceptions, with: - * - a minimal implementation for python 2 - * - __cause__ used instead of __context__ + * __cause__ used instead of __context__ */ static inline void npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) @@ -479,64 +299,23 @@ npy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb) return; if (PyErr_Occurred()) { - /* only py3 supports this anyway */ - #ifdef NPY_PY3K - PyObject *exc2, *val2, *tb2; - PyErr_Fetch(&exc2, &val2, &tb2); - PyErr_NormalizeException(&exc, &val, &tb); - if (tb != NULL) { - PyException_SetTraceback(val, tb); - Py_DECREF(tb); - } - Py_DECREF(exc); - PyErr_NormalizeException(&exc2, &val2, &tb2); - PyException_SetCause(val2, val); - PyErr_Restore(exc2, val2, tb2); - #endif + PyObject *exc2, *val2, *tb2; + PyErr_Fetch(&exc2, &val2, &tb2); + PyErr_NormalizeException(&exc, &val, &tb); + if (tb != NULL) { + PyException_SetTraceback(val, tb); + Py_DECREF(tb); + } + Py_DECREF(exc); + PyErr_NormalizeException(&exc2, &val2, &tb2); + PyException_SetCause(val2, val); + PyErr_Restore(exc2, val2, tb2); } else { PyErr_Restore(exc, val, tb); } } -/* - * PyObject_Cmp - */ -#if defined(NPY_PY3K) -static inline int -PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp) -{ - int v; - v = PyObject_RichCompareBool(i1, i2, Py_LT); - if (v == 1) { - *cmp = -1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_GT); - if (v == 1) { - *cmp = 1; - return 1; - } - else if (v == -1) { - return -1; - } - - v = PyObject_RichCompareBool(i1, i2, Py_EQ); - if (v == 1) { - *cmp = 0; - return 1; - } - else { - *cmp = 0; - return -1; - } -} -#endif - /* * PyCObject functions adapted to PyCapsules. * diff --git a/numpy/_core/include/numpy/npy_common.h b/numpy/_core/include/numpy/npy_common.h index 9fb3f6b3f51f..79ad8ad78cb2 100644 --- a/numpy/_core/include/numpy/npy_common.h +++ b/numpy/_core/include/numpy/npy_common.h @@ -113,14 +113,18 @@ #define NPY_NOINLINE static #endif -#ifdef HAVE___THREAD +#ifdef __cplusplus + #define NPY_TLS thread_local +#elif defined(HAVE_THREAD_LOCAL) + #define NPY_TLS thread_local +#elif defined(HAVE__THREAD_LOCAL) + #define NPY_TLS _Thread_local +#elif defined(HAVE___THREAD) #define NPY_TLS __thread +#elif defined(HAVE___DECLSPEC_THREAD_) + #define NPY_TLS __declspec(thread) #else - #ifdef HAVE___DECLSPEC_THREAD_ - #define NPY_TLS __declspec(thread) - #else - #define NPY_TLS - #endif + #define NPY_TLS #endif #ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE @@ -375,6 +379,7 @@ typedef struct #include + #if defined(_MSC_VER) && !defined(__INTEL_COMPILER) typedef _Dcomplex npy_cdouble; typedef _Fcomplex npy_cfloat; diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index 216b173fde58..d11df12b7ceb 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -362,7 +362,11 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { +#if defined(__cplusplus) return ((double *) &z)[0]; +#else + return creal(z); +#endif } static inline void npy_csetreal(npy_cdouble *z, const double r) @@ -372,7 +376,11 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { +#if defined(__cplusplus) return ((double *) &z)[1]; +#else + return cimag(z); +#endif } static inline void npy_csetimag(npy_cdouble *z, const double i) @@ -382,7 +390,11 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { +#if defined(__cplusplus) return ((float *) &z)[0]; +#else + return crealf(z); +#endif } static inline void npy_csetrealf(npy_cfloat *z, const float r) @@ -392,7 +404,11 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { +#if defined(__cplusplus) return ((float *) &z)[1]; +#else + return cimagf(z); +#endif } static inline void npy_csetimagf(npy_cfloat *z, const float i) @@ -402,7 +418,11 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { +#if defined(__cplusplus) return ((longdouble_t *) &z)[0]; +#else + return creall(z); +#endif } static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) @@ -412,7 +432,11 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { +#if defined(__cplusplus) return ((longdouble_t *) &z)[1]; +#else + return cimagl(z); +#endif } static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i) diff --git a/numpy/_core/include/numpy/numpyconfig.h b/numpy/_core/include/numpy/numpyconfig.h index 0b6b2dda4290..46ecade41ada 100644 --- a/numpy/_core/include/numpy/numpyconfig.h +++ b/numpy/_core/include/numpy/numpyconfig.h @@ -81,6 +81,7 @@ #define NPY_1_24_API_VERSION 0x00000010 #define NPY_1_25_API_VERSION 0x00000011 #define NPY_2_0_API_VERSION 0x00000012 +#define NPY_2_1_API_VERSION 0x00000013 /* @@ -120,8 +121,8 @@ /* user provided a target version, use it */ #define NPY_FEATURE_VERSION NPY_TARGET_VERSION #else - /* Use the default (increase when dropping Python 3.9 support) */ - #define NPY_FEATURE_VERSION NPY_1_19_API_VERSION + /* Use the default (increase when dropping Python 3.10 support) */ + #define NPY_FEATURE_VERSION NPY_1_21_API_VERSION #endif /* Sanity check the (requested) feature version */ @@ -129,7 +130,14 @@ #error "NPY_TARGET_VERSION higher than NumPy headers!" #elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION /* No support for irrelevant old targets, no need for error, but warn. */ - #warning "Requested NumPy target lower than supported NumPy 1.15." + #ifndef _MSC_VER + #warning "Requested NumPy target lower than supported NumPy 1.15." + #else + #define _WARN___STR2__(x) #x + #define _WARN___STR1__(x) _WARN___STR2__(x) + #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: " + #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.") + #endif #endif /* @@ -160,6 +168,8 @@ #define NPY_FEATURE_VERSION_STRING "1.25" #elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION #define NPY_FEATURE_VERSION_STRING "2.0" +#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION + #define NPY_FEATURE_VERSION_STRING "2.1" #else #error "Missing version string define for new NumPy version." #endif diff --git a/numpy/_core/include/numpy/ufuncobject.h b/numpy/_core/include/numpy/ufuncobject.h index dca375b32673..ada23626f70b 100644 --- a/numpy/_core/include/numpy/ufuncobject.h +++ b/numpy/_core/include/numpy/ufuncobject.h @@ -65,6 +65,39 @@ typedef int (PyUFunc_TypeResolutionFunc)( PyObject *type_tup, PyArray_Descr **out_dtypes); +/* + * This is the signature for the functions that may be assigned to the + * `process_core_dims_func` field of the PyUFuncObject structure. + * Implementation of this function is optional. This function is only used + * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1). + * The function is called by the ufunc during the processing of the arguments + * of a call of the ufunc. The function can check the core dimensions of the + * input and output arrays and return -1 with an exception set if any + * requirements are not satisfied. If the caller of the ufunc didn't provide + * output arrays, the core dimensions associated with the output arrays (i.e. + * those that are not also used in input arrays) will have the value -1 in + * `core_dim_sizes`. This function can replace any output core dimensions + * that are -1 with a value that is appropriate for the ufunc. + * + * Parameter Description + * --------------- ------------------------------------------------------ + * ufunc The ufunc object + * core_dim_sizes An array with length `ufunc->core_num_dim_ix`. + * The core dimensions of the arrays passed to the ufunc + * will have been set. If the caller of the ufunc didn't + * provide the output array(s), the output-only core + * dimensions will have the value -1. + * + * The function must not change any element in `core_dim_sizes` that is + * not -1 on input. Doing so will result in incorrect output from the + * ufunc, and could result in a crash of the Python interpreter. + * + * The function must return 0 on success, -1 on failure (with an exception + * set). + */ +typedef int (PyUFunc_ProcessCoreDimsFunc)( + struct _tagPyUFuncObject *ufunc, + npy_intp *core_dim_sizes); typedef struct _tagPyUFuncObject { PyObject_HEAD @@ -191,6 +224,12 @@ typedef struct _tagPyUFuncObject { /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */ PyObject *_loops; #endif + #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION + /* + * Optional function to process core dimensions of a gufunc. + */ + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; + #endif } PyUFuncObject; #include "arrayobject.h" diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index fb2c95a9d338..268b23dbadf9 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -127,6 +127,7 @@ class memmap(ndarray): Examples -------- + >>> import numpy as np >>> data = np.arange(12, dtype='float32') >>> data.resize((3,4)) diff --git a/numpy/_core/meson.build b/numpy/_core/meson.build index 5679b826aa6e..544af3665be7 100644 --- a/numpy/_core/meson.build +++ b/numpy/_core/meson.build @@ -47,7 +47,8 @@ C_ABI_VERSION = '0x02000000' # 0x00000010 - 1.24.x # 0x00000011 - 1.25.x # 0x00000012 - 2.0.x -C_API_VERSION = '0x00000012' +# 0x00000013 - 2.1.x +C_API_VERSION = '0x00000013' # Check whether we have a mismatch between the set C API VERSION and the # actual C API VERSION. Will raise a MismatchCAPIError if so. @@ -105,11 +106,12 @@ if use_highway highway_lib = static_library('highway', [ # required for hwy::Abort symbol - 'src/highway/hwy/targets.cc' + 'src/highway/hwy/abort.cc' ], cpp_args: '-DTOOLCHAIN_MISS_ASM_HWCAP_H', include_directories: ['src/highway'], - install: false + install: false, + gnu_symbol_visibility: 'hidden', ) else highway_lib = [] @@ -120,6 +122,11 @@ if use_intel_sort and not fs.exists('src/npysort/x86-simd-sort/README.md') error('Missing the `x86-simd-sort` git submodule! Run `git submodule update --init` to fix this.') endif +if not fs.exists('src/common/pythoncapi-compat') + error('Missing the `pythoncapi-compat` git submodule! ' + + 'Run `git submodule update --init` to fix this.') +endif + # Check sizes of types. Note, some of these landed in config.h before, but were # unused. So clean that up and only define the NPY_SIZEOF flavors rather than # the SIZEOF ones @@ -238,6 +245,8 @@ endforeach # variable attributes tested via "int %s a" % attribute optional_variable_attributes = [ + ['thread_local', 'HAVE_THREAD_LOCAL'], + ['_Thread_local', 'HAVE__THREAD_LOCAL'], ['__thread', 'HAVE__THREAD'], ['__declspec(thread)', 'HAVE___DECLSPEC_THREAD_'] ] @@ -256,7 +265,7 @@ foreach optional_attr: optional_variable_attributes return 0; } ''' - if cc.compiles(code) + if cc.compiles(code, name: optional_attr[0]) cdata.set10(optional_attr[1], true) endif endforeach @@ -344,7 +353,7 @@ max_opt = { 'msvc': ['/O2'], 'intel-cl': ['/O3'], }.get(compiler_id, ['-O3']) -max_opt = cc.has_multi_arguments(max_opt) ? max_opt : [] +max_opt = cc.has_multi_arguments(max_opt) and get_option('buildtype') != 'debug' ? max_opt : [] # Optional GCC compiler builtins and their call arguments. # If given, a required header and definition name (HAVE_ prepended) @@ -488,24 +497,20 @@ endif if cc.has_header('sys/endian.h') cdata.set10('NPY_HAVE_SYS_ENDIAN_H', true) endif -if is_windows - cdata.set10('NPY_NO_SIGNAL', true) -endif -# Command-line switch; distutils build checked for `NPY_NOSMP` env var instead -# TODO: document this (search for NPY_NOSMP in C API docs) +# Build-time option to disable threading is stored and exposed in numpyconfig.h +# Note: SMP is an old acronym for threading (Symmetric/Shared-memory MultiProcessing) cdata.set10('NPY_NO_SMP', get_option('disable-threading')) -# Check whether we can use inttypes (C99) formats -if cc.has_header_symbol('inttypes.h', 'PRIdPTR') - cdata.set10('NPY_USE_C99_FORMATS', true) -endif - visibility_hidden = '' if cc.has_function_attribute('visibility:hidden') and host_machine.system() != 'cygwin' visibility_hidden = '__attribute__((visibility("hidden")))' endif cdata.set('NPY_VISIBILITY_HIDDEN', visibility_hidden) +# if not set, we're using lapack_lite +if have_lapack + cdata.set10('HAVE_EXTERNAL_LAPACK', have_lapack) +endif config_h = configure_file( input: 'config.h.in', @@ -561,6 +566,7 @@ npymath_lib = static_library('npymath', install_dir: np_dir / '_core/lib', name_prefix: name_prefix_staticlib, name_suffix: name_suffix_staticlib, + gnu_symbol_visibility: 'hidden', ) dir_separator = '/' @@ -675,7 +681,6 @@ c_args_common = [ # Same as NPY_CXX_FLAGS (TODO: extend for what ccompiler_opt adds) cpp_args_common = c_args_common + [ - '-D__STDC_VERSION__=0', # for compatibility with C headers ] if cc.get_argument_syntax() != 'msvc' cpp_args_common += [ @@ -718,6 +723,7 @@ py.extension_module('_multiarray_tests', gnu_symbol_visibility: 'default', install: true, subdir: 'numpy/_core', + install_tag: 'tests' ) _umath_tests_mtargets = mod_features.multi_targets( @@ -754,6 +760,7 @@ foreach gen: test_modules_src install: true, subdir: 'numpy/_core', link_with: gen[2], + install_tag: 'tests' ) endforeach @@ -821,7 +828,7 @@ foreach gen_mtargets : [ 'highway_qsort.dispatch.h', 'src/npysort/highway_qsort.dispatch.cpp', use_highway ? [ - SVE, ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault + ASIMD, VSX2, # FIXME: disable VXE due to runtime segfault ] : [] ], [ @@ -941,12 +948,10 @@ foreach gen_mtargets : [ ], [ 'loops_trigonometric.dispatch.h', - src_file.process('src/umath/loops_trigonometric.dispatch.c.src'), + 'src/umath/loops_trigonometric.dispatch.cpp', [ - AVX512F, [AVX2, FMA3], - VSX4, VSX3, VSX2, + AVX512_SKX, [AVX2, FMA3], NEON_VFPV4, - VXE2, VXE ] ], [ @@ -1017,7 +1022,8 @@ foreach gen_mtargets : [ 'src/common', 'src/multiarray', 'src/npymath', - 'src/umath' + 'src/umath', + 'src/highway', ] ) if not is_variable('multiarray_umath_mtargets') @@ -1035,6 +1041,7 @@ src_multiarray_umath_common = [ 'src/common/mem_overlap.c', 'src/common/npy_argparse.c', 'src/common/npy_hashtable.c', + 'src/common/npy_import.c', 'src/common/npy_longdouble.c', 'src/common/ucsnarrow.c', 'src/common/ufunc_override.c', @@ -1057,6 +1064,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/array_coercion.c', 'src/multiarray/array_converter.c', 'src/multiarray/array_method.c', + 'src/multiarray/array_api_standard.c', 'src/multiarray/array_assign_scalar.c', 'src/multiarray/array_assign_array.c', 'src/multiarray/arrayfunction_override.c', @@ -1098,6 +1106,7 @@ src_multiarray = multiarray_gen_headers + [ 'src/multiarray/nditer_constr.c', 'src/multiarray/nditer_pywrap.c', src_file.process('src/multiarray/nditer_templ.c.src'), + 'src/multiarray/npy_static_data.c', 'src/multiarray/number.c', 'src/multiarray/refcount.c', src_file.process('src/multiarray/scalartypes.c.src'), @@ -1203,8 +1212,7 @@ py.extension_module('_multiarray_umath', src_numpy_api[1], # __multiarray_api.h src_umath_doc_h, npy_math_internal_h, - ], - objects: svml_objects, + ] + svml_objects, c_args: c_args_common, cpp_args: cpp_args_common, include_directories: [ @@ -1213,6 +1221,7 @@ py.extension_module('_multiarray_umath', 'src/multiarray', 'src/npymath', 'src/umath', + 'src/highway' ], dependencies: [blas_dep], link_with: [npymath_lib, multiarray_umath_mtargets.static_lib('_multiarray_umath_mtargets')] + highway_lib, @@ -1271,6 +1280,7 @@ py.extension_module('_simd', link_with: [npymath_lib, _simd_mtargets.static_lib('_simd_mtargets')], install: true, subdir: 'numpy/_core', + install_tag: 'tests', ) python_sources = [ @@ -1314,6 +1324,7 @@ python_sources = [ 'numerictypes.py', 'numerictypes.pyi', 'overrides.py', + 'printoptions.py', 'records.py', 'records.pyi', 'shape_base.py', @@ -1329,4 +1340,4 @@ py.install_sources( ) subdir('include') -install_subdir('tests', install_dir: np_dir / '_core', install_tag: 'python-runtime') +install_subdir('tests', install_dir: np_dir / '_core', install_tag: 'tests') diff --git a/numpy/_core/multiarray.py b/numpy/_core/multiarray.py index 27c2662c6a61..e2ca115b3728 100644 --- a/numpy/_core/multiarray.py +++ b/numpy/_core/multiarray.py @@ -35,11 +35,10 @@ 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat', 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'get_handler_name', 'get_handler_version', 'inner', 'interp', - 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory', - 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', + 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot', + 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters', 'normalize_axis_index', 'packbits', 'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function', - 'set_legacy_print_mode', 'set_typeDict', 'shares_memory', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros', '_get_promotion_state', '_set_promotion_state'] @@ -145,16 +144,17 @@ def empty_like( Examples -------- + >>> import numpy as np >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> np.empty_like(a) array([[-1073741821, -1073741821, 3], # uninitialized [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninit + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) - """ + """ # NOQA return (prototype,) @@ -226,6 +226,7 @@ def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None): Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) @@ -324,6 +325,7 @@ def inner(a, b): -------- Ordinary inner product for vectors: + >>> import numpy as np >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) @@ -400,6 +402,7 @@ def where(condition, x=None, y=None): Examples -------- + >>> import numpy as np >>> a = np.arange(10) >>> a array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -472,6 +475,7 @@ def lexsort(keys, axis=None): -------- Sort names: first by surname, then by name. + >>> import numpy as np >>> surnames = ('Hertz', 'Galilei', 'Hertz') >>> first_names = ('Heinrich', 'Galileo', 'Gustav') >>> ind = np.lexsort((first_names, surnames)) @@ -606,6 +610,7 @@ def can_cast(from_, to, casting=None): -------- Basic examples + >>> import numpy as np >>> np.can_cast(np.int32, np.int64) True >>> np.can_cast(np.float64, complex) @@ -656,6 +661,7 @@ def min_scalar_type(a): Examples -------- + >>> import numpy as np >>> np.min_scalar_type(10) dtype('uint8') @@ -734,6 +740,7 @@ def result_type(*arrays_and_dtypes): Examples -------- + >>> import numpy as np >>> np.result_type(3, np.arange(7, dtype='i1')) dtype('int8') @@ -813,6 +820,7 @@ def dot(a, b, out=None): Examples -------- + >>> import numpy as np >>> np.dot(3, 4) 12 @@ -876,6 +884,7 @@ def vdot(a, b): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j,3+4j]) >>> b = np.array([5+6j,7+8j]) >>> np.vdot(a, b) @@ -945,6 +954,7 @@ def bincount(x, weights=None, minlength=None): Examples -------- + >>> import numpy as np >>> np.bincount(np.arange(5)) array([1, 1, 1, 1, 1]) >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) @@ -1020,6 +1030,7 @@ def ravel_multi_index(multi_index, dims, mode=None, order=None): Examples -------- + >>> import numpy as np >>> arr = np.array([[3,6,6],[4,5,1]]) >>> np.ravel_multi_index(arr, (7,6)) array([22, 41, 37]) @@ -1074,6 +1085,7 @@ def unravel_index(indices, shape=None, order=None): Examples -------- + >>> import numpy as np >>> np.unravel_index([22, 41, 37], (7,6)) (array([3, 6, 6]), array([4, 5, 1])) >>> np.unravel_index([31, 41, 13], (7,6), order='F') @@ -1120,6 +1132,7 @@ def copyto(dst, src, casting=None, where=None): Examples -------- + >>> import numpy as np >>> A = np.array([4, 5, 6]) >>> B = [1, 2, 3] >>> np.copyto(A, B) @@ -1165,6 +1178,7 @@ def putmask(a, /, mask, values): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2, 3) >>> np.putmask(x, x>2, x**2) >>> x @@ -1222,6 +1236,7 @@ def packbits(a, axis=None, bitorder='big'): Examples -------- + >>> import numpy as np >>> a = np.array([[[1,0,1], ... [0,1,0]], ... [[1,1,0], @@ -1291,6 +1306,7 @@ def unpackbits(a, axis=None, count=None, bitorder='big'): Examples -------- + >>> import numpy as np >>> a = np.array([[2], [7], [23]], dtype=np.uint8) >>> a array([[ 2], @@ -1334,7 +1350,7 @@ def shares_memory(a, b, max_work=None): .. warning:: This function can be exponentially slow for some inputs, unless - `max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``. + `max_work` is set to zero or a positive integer. If in doubt, use `numpy.may_share_memory` instead. Parameters @@ -1346,12 +1362,13 @@ def shares_memory(a, b, max_work=None): of candidate solutions to consider). The following special values are recognized: - max_work=MAY_SHARE_EXACT (default) + max_work=-1 (default) The problem is solved exactly. In this case, the function returns True only if there is an element shared between the arrays. Finding the exact solution may take extremely long in some cases. - max_work=MAY_SHARE_BOUNDS + max_work=0 Only the memory bounds of a and b are checked. + This is equivalent to using ``may_share_memory()``. Raises ------ @@ -1368,6 +1385,7 @@ def shares_memory(a, b, max_work=None): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 4]) >>> np.shares_memory(x, np.array([5, 6, 7])) False @@ -1432,6 +1450,7 @@ def may_share_memory(a, b, max_work=None): Examples -------- + >>> import numpy as np >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) @@ -1494,6 +1513,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): Examples -------- + >>> import numpy as np >>> # The weekdays are Friday, Saturday, and Monday ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) @@ -1578,29 +1598,30 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, Examples -------- + >>> import numpy as np >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03') + np.datetime64('2011-10-03') >>> # Last business day in February 2012 (not accounting for holidays) ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29') + np.datetime64('2012-02-29') >>> # Third Wednesday in January 2011 ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19') + np.datetime64('2011-01-19') >>> # 2012 Mother's Day in Canada and the U.S. ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13') + np.datetime64('2012-05-13') >>> # First business day on or after a date ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21') + np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22') + np.datetime64('2011-03-22') >>> # First business day after a date ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21') + np.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23') + np.datetime64('2011-03-23') """ return (dates, offsets, weekmask, holidays, out) @@ -1667,6 +1688,7 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, Examples -------- + >>> import numpy as np >>> # Number of weekdays in January 2011 ... np.busday_count('2011-01', '2011-02') 21 @@ -1710,6 +1732,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- + >>> import numpy as np >>> import pytz >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d diff --git a/numpy/_core/multiarray.pyi b/numpy/_core/multiarray.pyi index 74cc86e64e79..dd1093015301 100644 --- a/numpy/_core/multiarray.pyi +++ b/numpy/_core/multiarray.pyi @@ -82,7 +82,12 @@ from numpy._typing import ( _T_co = TypeVar("_T_co", covariant=True) _T_contra = TypeVar("_T_contra", contravariant=True) _SCT = TypeVar("_SCT", bound=generic) -_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) +_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any]) +_ArrayType_co = TypeVar( + "_ArrayType_co", + bound=ndarray[Any, Any], + covariant=True, +) # Valid time units _UnitKind = L[ @@ -113,6 +118,9 @@ class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]): def __len__(self) -> int: ... def __getitem__(self, key: _T_contra, /) -> _T_co: ... +class _SupportsArray(Protocol[_ArrayType_co]): + def __array__(self, /) -> _ArrayType_co: ... + __all__: list[str] ALLOW_THREADS: Final[int] # 0 or 1 (system-specific) @@ -188,6 +196,17 @@ def array( like: None | _SupportsArrayFunc = ..., ) -> _ArrayType: ... @overload +def array( + object: _SupportsArray[_ArrayType], + dtype: None = ..., + *, + copy: None | bool | _CopyMode = ..., + order: _OrderKACF = ..., + subok: L[True], + ndmin: L[0] = ..., + like: None | _SupportsArrayFunc = ..., +) -> _ArrayType: ... +@overload def array( object: _ArrayLike[_SCT], dtype: None = ..., @@ -525,6 +544,8 @@ def asanyarray( dtype: None = ..., order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> _ArrayType: ... @overload @@ -533,6 +554,8 @@ def asanyarray( dtype: None = ..., order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[_SCT]: ... @overload @@ -541,6 +564,8 @@ def asanyarray( dtype: None = ..., order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[Any]: ... @overload @@ -549,6 +574,8 @@ def asanyarray( dtype: _DTypeLike[_SCT], order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[_SCT]: ... @overload @@ -557,6 +584,8 @@ def asanyarray( dtype: DTypeLike, order: _OrderKACF = ..., *, + device: None | L["cpu"] = ..., + copy: None | bool = ..., like: None | _SupportsArrayFunc = ..., ) -> NDArray[Any]: ... diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index 429620da5359..61518d5ab56f 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -18,7 +18,7 @@ fromstring, inner, lexsort, matmul, may_share_memory, min_scalar_type, ndarray, nditer, nested_iters, promote_types, putmask, result_type, shares_memory, vdot, where, zeros, normalize_axis_index, - _get_promotion_state, _set_promotion_state + _get_promotion_state, _set_promotion_state, vecdot ) from . import overrides @@ -52,8 +52,8 @@ 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose', 'putmask', 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not', - 'full', 'full_like', 'matmul', 'shares_memory', 'may_share_memory', - '_get_promotion_state', '_set_promotion_state'] + 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory', + 'may_share_memory', '_get_promotion_state', '_set_promotion_state'] def _zeros_like_dispatcher( @@ -115,6 +115,7 @@ def zeros_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x @@ -180,6 +181,7 @@ def ones(shape, dtype=None, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.ones(5) array([1., 1., 1., 1., 1.]) @@ -268,6 +270,7 @@ def ones_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x @@ -338,6 +341,7 @@ def full(shape, fill_value, dtype=None, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.full((2, 2), np.inf) array([[inf, inf], [inf, inf]]) @@ -425,6 +429,7 @@ def full_like( Examples -------- + >>> import numpy as np >>> x = np.arange(6, dtype=int) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1]) @@ -503,6 +508,7 @@ def count_nonzero(a, axis=None, *, keepdims=False): Examples -------- + >>> import numpy as np >>> np.count_nonzero(np.eye(4)) 4 >>> a = np.array([[0, 1, 7, 0], @@ -557,6 +563,7 @@ def isfortran(a): order (last index varies the fastest), or FORTRAN-contiguous order in memory (first index varies the fastest). + >>> import numpy as np >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C') >>> a array([[1, 2, 3], @@ -632,6 +639,7 @@ def argwhere(a): Examples -------- + >>> import numpy as np >>> x = np.arange(6).reshape(2,3) >>> x array([[0, 1, 2], @@ -680,6 +688,7 @@ def flatnonzero(a): Examples -------- + >>> import numpy as np >>> x = np.arange(-2, 3) >>> x array([-2, -1, 0, 1, 2]) @@ -752,6 +761,7 @@ def correlate(a, v, mode='valid'): Examples -------- + >>> import numpy as np >>> np.correlate([1, 2, 3], [0, 1, 0.5]) array([3.5]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") @@ -851,6 +861,7 @@ def convolve(a, v, mode='full'): Note how the convolution operator flips the second array before "sliding" the two across one another: + >>> import numpy as np >>> np.convolve([1, 2, 3], [0, 1, 0.5]) array([0. , 1. , 2.5, 4. , 1.5]) @@ -935,6 +946,7 @@ def outer(a, b, out=None): -------- Make a (*very* coarse) grid for computing a Mandelbrot set: + >>> import numpy as np >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) >>> rl array([[-2., -1., 0., 1., 2.], @@ -1033,6 +1045,7 @@ def tensordot(a, b, axes=2): -------- A "traditional" example: + >>> import numpy as np >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) @@ -1210,6 +1223,7 @@ def roll(a, shift, axis=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> np.roll(x, 2) array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7]) @@ -1258,7 +1272,7 @@ def roll(a, shift, axis=None): "'shift' and 'axis' should be scalars or 1D sequences") shifts = {ax: 0 for ax in range(a.ndim)} for sh, ax in broadcasted: - shifts[ax] += sh + shifts[ax] += int(sh) rolls = [((slice(None), slice(None)),)] * a.ndim for ax, offset in shifts.items(): @@ -1343,6 +1357,7 @@ def rollaxis(a, axis, start=0): Examples -------- + >>> import numpy as np >>> a = np.ones((3,4,5,6)) >>> np.rollaxis(a, 3, 1).shape (3, 6, 4, 5) @@ -1465,6 +1480,7 @@ def moveaxis(a, source, destination): Examples -------- + >>> import numpy as np >>> x = np.zeros((3, 4, 5)) >>> np.moveaxis(x, 0, -1).shape (4, 5, 3) @@ -1565,10 +1581,17 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): Supports full broadcasting of the inputs. + Dimension-2 input arrays were deprecated in 2.0.0. If you do need this + functionality, you can use:: + + def cross2d(x, y): + return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0] + Examples -------- Vector cross-product. + >>> import numpy as np >>> x = [1, 2, 3] >>> y = [4, 5, 6] >>> np.cross(x, y) @@ -1776,6 +1799,7 @@ def indices(dimensions, dtype=int, sparse=False): Examples -------- + >>> import numpy as np >>> grid = np.indices((2, 3)) >>> grid.shape (2, 2, 3) @@ -1875,6 +1899,7 @@ def fromfunction(function, shape, *, dtype=float, like=None, **kwargs): Examples -------- + >>> import numpy as np >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float) array([[0., 0.], [1., 1.]]) @@ -1964,14 +1989,20 @@ def isscalar(element): Examples -------- + >>> import numpy as np + >>> np.isscalar(3.1) True + >>> np.isscalar(np.array(3.1)) False + >>> np.isscalar([3.1]) False + >>> np.isscalar(False) True + >>> np.isscalar('numpy') True @@ -2039,6 +2070,7 @@ def binary_repr(num, width=None): Examples -------- + >>> import numpy as np >>> np.binary_repr(3) '11' >>> np.binary_repr(-3) @@ -2123,6 +2155,7 @@ def base_repr(number, base=2, padding=0): Examples -------- + >>> import numpy as np >>> np.base_repr(5) '101' >>> np.base_repr(6, 5) @@ -2142,7 +2175,7 @@ def base_repr(number, base=2, padding=0): elif base < 2: raise ValueError("Bases less than 2 not handled in base_repr.") - num = abs(number) + num = abs(int(number)) res = [] while num: res.append(digits[num % base]) @@ -2196,6 +2229,7 @@ def identity(n, dtype=None, *, like=None): Examples -------- + >>> import numpy as np >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], @@ -2286,17 +2320,23 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- + >>> import numpy as np >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8]) False + >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9]) True + >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9]) False + >>> np.allclose([1.0, np.nan], [1.0, np.nan]) False + >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) True + """ res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)) return builtins.bool(res) @@ -2370,24 +2410,34 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- + >>> import numpy as np >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) array([ True, False]) + >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) array([ True, True]) + >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) array([False, True]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan]) array([ True, False]) + >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) array([ True, True]) + >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) array([ True, False]) + >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) array([False, False]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) array([ True, True]) + >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) array([False, True]) + """ # Turn all but python scalars into arrays. x, y, atol, rtol = ( @@ -2466,17 +2516,24 @@ def array_equal(a1, a2, equal_nan=False): Examples -------- + >>> import numpy as np + >>> np.array_equal([1, 2], [1, 2]) True + >>> np.array_equal(np.array([1, 2]), np.array([1, 2])) True + >>> np.array_equal([1, 2], [1, 2, 3]) False + >>> np.array_equal([1, 2], [1, 4]) False + >>> a = np.array([1, np.nan]) >>> np.array_equal(a, a) False + >>> np.array_equal(a, a, equal_nan=True) True @@ -2497,17 +2554,17 @@ def array_equal(a1, a2, equal_nan=False): if a1.shape != a2.shape: return False if not equal_nan: - return builtins.bool((a1 == a2).all()) - cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) - and _dtype_cannot_hold_nan(a2.dtype)) - if cannot_have_nan: - if a1 is a2: - return True - return builtins.bool((a1 == a2).all()) + return builtins.bool((asanyarray(a1 == a2)).all()) if a1 is a2: # nan will compare equal so an array will compare equal to itself. return True + + cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype) + and _dtype_cannot_hold_nan(a2.dtype)) + if cannot_have_nan: + return builtins.bool(asarray(a1 == a2).all()) + # Handling NaN values if equal_nan is True a1nan, a2nan = isnan(a1), isnan(a2) # NaN's occur at different locations @@ -2541,6 +2598,7 @@ def array_equiv(a1, a2): Examples -------- + >>> import numpy as np >>> np.array_equiv([1, 2], [1, 2]) True >>> np.array_equiv([1, 2], [1, 3]) @@ -2566,15 +2624,15 @@ def array_equiv(a1, a2): except Exception: return False - return builtins.bool((a1 == a2).all()) + return builtins.bool(asanyarray(a1 == a2).all()) -def _astype_dispatcher(x, dtype, /, *, copy=None): +def _astype_dispatcher(x, dtype, /, *, copy=None, device=None): return (x, dtype) @array_function_dispatch(_astype_dispatcher) -def astype(x, dtype, /, *, copy = True): +def astype(x, dtype, /, *, copy=True, device=None): """ Copies an array to a specified data type. @@ -2595,6 +2653,11 @@ def astype(x, dtype, /, *, copy = True): matches the data type of the input array, the input array must be returned; otherwise, a newly allocated array must be returned. Defaults to ``True``. + device : str, optional + The device on which to place the returned array. Default: None. + For Array-API interoperability only, so must be ``"cpu"`` if passed. + + .. versionadded:: 2.1.0 Returns ------- @@ -2607,6 +2670,7 @@ def astype(x, dtype, /, *, copy = True): Examples -------- + >>> import numpy as np >>> arr = np.array([1, 2, 3]); arr array([1, 2, 3]) >>> np.astype(arr, np.float64) @@ -2620,9 +2684,15 @@ def astype(x, dtype, /, *, copy = True): True """ - if not isinstance(x, np.ndarray): + if not (isinstance(x, np.ndarray) or isscalar(x)): raise TypeError( - f"Input should be a NumPy array. It is a {type(x)} instead." + "Input should be a NumPy array or scalar. " + f"It is a {type(x)} instead." + ) + if device is not None and device != "cpu": + raise ValueError( + 'Device not understood. Only "cpu" is allowed, but received:' + f' {device}' ) return x.astype(dtype, copy=copy) diff --git a/numpy/_core/numeric.pyi b/numpy/_core/numeric.pyi index a24c368cbd08..f25c6258f2d0 100644 --- a/numpy/_core/numeric.pyi +++ b/numpy/_core/numeric.pyi @@ -7,15 +7,11 @@ from typing import ( SupportsAbs, SupportsIndex, NoReturn, + TypeGuard, ) -if sys.version_info >= (3, 10): - from typing import TypeGuard -else: - from typing_extensions import TypeGuard import numpy as np from numpy import ( - ComplexWarning as ComplexWarning, generic, unsignedinteger, signedinteger, @@ -497,39 +493,6 @@ def tensordot( axes: int | tuple[_ShapeLike, _ShapeLike] = ..., ) -> NDArray[object_]: ... -@overload -def vecdot( - x1: _ArrayLikeUnknown, x2: _ArrayLikeUnknown, axis: int = ... -) -> NDArray[Any]: ... -@overload -def vecdot( - x1: _ArrayLikeBool_co, x2: _ArrayLikeBool_co, axis: int = ... -) -> NDArray[np.bool]: ... -@overload -def vecdot( - x1: _ArrayLikeUInt_co, x2: _ArrayLikeUInt_co, axis: int = ... -) -> NDArray[unsignedinteger[Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeInt_co, x2: _ArrayLikeInt_co, axis: int = ... -) -> NDArray[signedinteger[Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeFloat_co, x2: _ArrayLikeFloat_co, axis: int = ... -) -> NDArray[floating[Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeComplex_co, x2: _ArrayLikeComplex_co, axis: int = ... -) -> NDArray[complexfloating[Any, Any]]: ... -@overload -def vecdot( - x1: _ArrayLikeTD64_co, x2: _ArrayLikeTD64_co, axis: int = ... -) -> NDArray[timedelta64]: ... -@overload -def vecdot( - x1: _ArrayLikeObject_co, x2: _ArrayLikeObject_co, axis: int = ... -) -> NDArray[object_]: ... - @overload def roll( a: _ArrayLike[_SCT], @@ -733,10 +696,12 @@ def astype( x: NDArray[Any], dtype: _DTypeLike[_SCT], copy: bool = ..., + device: None | L["cpu"] = ..., ) -> NDArray[_SCT]: ... @overload def astype( x: NDArray[Any], dtype: DTypeLike, copy: bool = ..., + device: None | L["cpu"] = ..., ) -> NDArray[Any]: ... diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 602ba9a051dd..d736aecd5a35 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -225,10 +225,11 @@ def issctype(rep): res = obj2sctype(rep) if res and res != object_: return True - return False + else: + return False except Exception: return False - + @set_module('numpy') def obj2sctype(rep, default=None): @@ -360,7 +361,11 @@ def issubsctype(arg1, arg2): return issubclass(obj2sctype(arg1), obj2sctype(arg2)) -def _preprocess_dtype(dtype, err_msg): +class _PreprocessDTypeError(Exception): + pass + + +def _preprocess_dtype(dtype): """ Preprocess dtype argument by: 1. fetching type from a data type @@ -369,7 +374,7 @@ def _preprocess_dtype(dtype, err_msg): if isinstance(dtype, ma.dtype): dtype = dtype.type if isinstance(dtype, ndarray) or dtype not in allTypes.values(): - raise TypeError(f"{err_msg}, but it is a {type(dtype)}.") + raise _PreprocessDTypeError() return dtype @@ -414,9 +419,13 @@ def isdtype(dtype, kind): True """ - dtype = _preprocess_dtype( - dtype, err_msg="dtype argument must be a NumPy dtype" - ) + try: + dtype = _preprocess_dtype(dtype) + except _PreprocessDTypeError: + raise TypeError( + "dtype argument must be a NumPy dtype, " + f"but it is a {type(dtype)}." + ) from None input_kinds = kind if isinstance(kind, tuple) else (kind,) @@ -440,12 +449,20 @@ def isdtype(dtype, kind): sctypes["int"] + sctypes["uint"] + sctypes["float"] + sctypes["complex"] ) - else: - kind = _preprocess_dtype( - kind, - err_msg="kind argument must be comprised of " - "NumPy dtypes or strings only" + elif isinstance(kind, str): + raise ValueError( + "kind argument is a string, but" + f" {repr(kind)} is not a known kind name." ) + else: + try: + kind = _preprocess_dtype(kind) + except _PreprocessDTypeError: + raise TypeError( + "kind argument must be comprised of " + "NumPy dtypes or strings only, " + f"but is a {type(kind)}." + ) from None processed_kinds.add(kind) return dtype in processed_kinds diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index 52ab73012604..b177dc55a6b6 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -45,14 +45,14 @@ _SCT = TypeVar("_SCT", bound=generic) class _TypeCodes(TypedDict): Character: L['c'] - Integer: L['bhilqp'] - UnsignedInteger: L['BHILQP'] + Integer: L['bhilqnp'] + UnsignedInteger: L['BHILQNP'] Float: L['efdg'] Complex: L['FDG'] - AllInteger: L['bBhHiIlLqQpP'] + AllInteger: L['bBhHiIlLqQnNpP'] AllFloat: L['efdgFDG'] Datetime: L['Mm'] - All: L['?bhilqpBHILQPefdgFDGSUVOMm'] + All: L['?bhilqnpBHILQNPefdgFDGSUVOMm'] __all__: list[str] diff --git a/numpy/_core/printoptions.py b/numpy/_core/printoptions.py new file mode 100644 index 000000000000..7ac93c2290e0 --- /dev/null +++ b/numpy/_core/printoptions.py @@ -0,0 +1,32 @@ +""" +Stores and defines the low-level format_options context variable. + +This is defined in its own file outside of the arrayprint module +so we can import it from C while initializing the multiarray +C module during import without introducing circular dependencies. +""" + +import sys +from contextvars import ContextVar + +__all__ = ["format_options"] + +default_format_options_dict = { + "edgeitems": 3, # repr N leading and trailing items of each dimension + "threshold": 1000, # total items > triggers array summarization + "floatmode": "maxprec", + "precision": 8, # precision of floating point representations + "suppress": False, # suppress printing small floating values in exp format + "linewidth": 75, + "nanstr": "nan", + "infstr": "inf", + "sign": "-", + "formatter": None, + # Internally stored as an int to simplify comparisons; converted from/to + # str/False on the way in/out. + 'legacy': sys.maxsize, + 'override_repr': None, +} + +format_options = ContextVar( + "format_options", default=default_format_options_dict.copy()) diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 79755e09bb40..1f92500aed6e 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -97,6 +97,7 @@ class format_parser: Examples -------- + >>> import numpy as np >>> np.rec.format_parser(['>> import numpy as np >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '>> x array([(1., 2), (3., 4)], dtype=[('x', ' record: ... -class recarray(ndarray[_ShapeType, _DType_co]): +class recarray(ndarray[_ShapeType_co, _DType_co]): # NOTE: While not strictly mandatory, we're demanding here that arguments # for the `format_parser`- and `dtype`-based dtype constructors are # mutually exclusive @@ -114,7 +114,7 @@ class recarray(ndarray[_ShapeType, _DType_co]): @overload def __getitem__(self, indx: str) -> NDArray[Any]: ... @overload - def __getitem__(self, indx: list[str]) -> recarray[_ShapeType, dtype[record]]: ... + def __getitem__(self, indx: list[str]) -> recarray[_ShapeType_co, dtype[record]]: ... @overload def field(self, attr: int | str, val: None = ...) -> Any: ... @overload @@ -174,7 +174,7 @@ def fromrecords( dtype: None = ..., shape: None | _ShapeLike = ..., *, - formats: DTypeLike, + formats: DTypeLike = ..., names: None | str | Sequence[str] = ..., titles: None | str | Sequence[str] = ..., aligned: bool = ..., diff --git a/numpy/_core/shape_base.py b/numpy/_core/shape_base.py index 200d8e7c74d7..ebee4c061196 100644 --- a/numpy/_core/shape_base.py +++ b/numpy/_core/shape_base.py @@ -1,5 +1,5 @@ __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', - 'stack', 'vstack'] + 'stack', 'unstack', 'vstack'] import functools import itertools @@ -11,7 +11,6 @@ from .multiarray import array, asanyarray, normalize_axis_index from . import fromnumeric as _from_nx - array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy') @@ -45,6 +44,7 @@ def atleast_1d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_1d(1.0) array([1.]) @@ -60,18 +60,18 @@ def atleast_1d(*arys): (array([1]), array([3, 4])) """ + if len(arys) == 1: + result = asanyarray(arys[0]) + if result.ndim == 0: + result = result.reshape(1) + return result res = [] for ary in arys: - ary = asanyarray(ary) - if ary.ndim == 0: - result = ary.reshape(1) - else: - result = ary + result = asanyarray(ary) + if result.ndim == 0: + result = result.reshape(1) res.append(result) - if len(res) == 1: - return res[0] - else: - return tuple(res) + return tuple(res) def _atleast_2d_dispatcher(*arys): @@ -103,6 +103,7 @@ def atleast_2d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_2d(3.0) array([[3.]]) @@ -163,6 +164,7 @@ def atleast_3d(*arys): Examples -------- + >>> import numpy as np >>> np.atleast_3d(3.0) array([[[3.]]]) @@ -261,9 +263,11 @@ def vstack(tup, *, dtype=None, casting="same_kind"): dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3]) >>> b = np.array([4, 5, 6]) >>> np.vstack((a,b)) @@ -331,11 +335,13 @@ def hstack(tup, *, dtype=None, casting="same_kind"): vstack : Stack arrays in sequence vertically (row wise). dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. - hsplit : Split an array into multiple sub-arrays + hsplit : Split an array into multiple sub-arrays horizontally (column-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((4,5,6)) >>> np.hstack((a,b)) @@ -414,10 +420,13 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): concatenate : Join a sequence of arrays along an existing axis. block : Assemble an nd-array from nested lists of blocks. split : Split array into a list of multiple sub-arrays of equal size. + unstack : Split an array into a tuple of sub-arrays along an axis. Examples -------- - >>> arrays = [np.random.randn(3, 4) for _ in range(10)] + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)] >>> np.stack(arrays, axis=0).shape (10, 3, 4) @@ -455,6 +464,76 @@ def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): return _nx.concatenate(expanded_arrays, axis=axis, out=out, dtype=dtype, casting=casting) +def _unstack_dispatcher(x, /, *, axis=None): + return (x,) + +@array_function_dispatch(_unstack_dispatcher) +def unstack(x, /, *, axis=0): + """ + Split an array into a sequence of arrays along the given axis. + + The ``axis`` parameter specifies the dimension along which the array will + be split. For example, if ``axis=0`` (the default) it will be the first + dimension and if ``axis=-1`` it will be the last dimension. + + The result is a tuple of arrays split along ``axis``. + + .. versionadded:: 2.1.0 + + Parameters + ---------- + x : ndarray + The array to be unstacked. + axis : int, optional + Axis along which the array will be split. Default: ``0``. + + Returns + ------- + unstacked : tuple of ndarrays + The unstacked arrays. + + See Also + -------- + stack : Join a sequence of arrays along a new axis. + concatenate : Join a sequence of arrays along an existing axis. + block : Assemble an nd-array from nested lists of blocks. + split : Split array into a list of multiple sub-arrays of equal size. + + Notes + ----- + ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e., + ``stack(unstack(x, axis=axis), axis=axis) == x``. + + This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since + iterating on an array iterates along the first axis. + + Examples + -------- + >>> arr = np.arange(24).reshape((2, 3, 4)) + >>> np.unstack(arr) + (array([[ 0, 1, 2, 3], + [ 4, 5, 6, 7], + [ 8, 9, 10, 11]]), + array([[12, 13, 14, 15], + [16, 17, 18, 19], + [20, 21, 22, 23]])) + >>> np.unstack(arr, axis=1) + (array([[ 0, 1, 2, 3], + [12, 13, 14, 15]]), + array([[ 4, 5, 6, 7], + [16, 17, 18, 19]]), + array([[ 8, 9, 10, 11], + [20, 21, 22, 23]])) + >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1) + >>> arr2.shape + (2, 3, 4) + >>> np.all(arr == arr2) + np.True_ + + """ + if x.ndim == 0: + raise ValueError("Input array must be at least 1-d.") + return tuple(_nx.moveaxis(x, axis, 0)) # Internal functions to eliminate the overhead of repeated dispatch in one of # the two possible paths inside np.block. @@ -709,7 +788,7 @@ def block(arrays): second-last dimension (-2), and so on until the outermost list is reached. Blocks can be of any dimension, but will not be broadcasted using - the normal rules. Instead, leading axes of size 1 are inserted, + the normal rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim`` the same for all blocks. This is primarily useful for working with scalars, and means that code like ``np.block([v, 1])`` is valid, where ``v.ndim == 1``. @@ -755,6 +834,7 @@ def block(arrays): dstack : Stack arrays in sequence depth wise (along third axis). column_stack : Stack 1-D arrays as columns into a 2-D array. vsplit : Split an array into multiple sub-arrays vertically (row-wise). + unstack : Split an array into a tuple of sub-arrays along an axis. Notes ----- @@ -788,8 +868,9 @@ def block(arrays): Examples -------- - The most common use of this function is to build a block matrix + The most common use of this function is to build a block matrix: + >>> import numpy as np >>> A = np.eye(2) * 2 >>> B = np.eye(3) * 3 >>> np.block([ @@ -802,7 +883,7 @@ def block(arrays): [1., 1., 0., 3., 0.], [1., 1., 0., 0., 3.]]) - With a list of depth 1, `block` can be used as `hstack` + With a list of depth 1, `block` can be used as `hstack`: >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) array([1, 2, 3]) @@ -834,7 +915,7 @@ def block(arrays): [2, 2], [2, 2]]) - It can also be used in places of `atleast_1d` and `atleast_2d` + It can also be used in place of `atleast_1d` and `atleast_2d`: >>> a = np.array(0) >>> b = np.array([1]) diff --git a/numpy/_core/shape_base.pyi b/numpy/_core/shape_base.pyi index 8cf604b7358d..627dbba06c19 100644 --- a/numpy/_core/shape_base.pyi +++ b/numpy/_core/shape_base.pyi @@ -117,6 +117,21 @@ def stack( casting: _CastingKind = ... ) -> _ArrayType: ... +@overload +def unstack( + array: _ArrayLike[_SCT], + /, + *, + axis: int = ..., +) -> tuple[NDArray[_SCT], ...]: ... +@overload +def unstack( + array: ArrayLike, + /, + *, + axis: int = ..., +) -> tuple[NDArray[Any], ...]: ... + @overload def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ... @overload diff --git a/numpy/_core/src/_simd/_simd.c b/numpy/_core/src/_simd/_simd.c index 87ecc3e9f479..c1881dd86f0a 100644 --- a/numpy/_core/src/_simd/_simd.c +++ b/numpy/_core/src/_simd/_simd.c @@ -92,6 +92,12 @@ PyMODINIT_FUNC PyInit__simd(void) NPY__CPU_DISPATCH_CALL(NPY_CPU_HAVE, ATTACH_MODULE, MAKE_MSVC_HAPPY) NPY__CPU_DISPATCH_BASELINE_CALL(ATTACH_BASELINE_MODULE, MAKE_MSVC_HAPPY) #endif + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; err: Py_DECREF(m); diff --git a/numpy/_core/src/_simd/_simd_vector.inc b/numpy/_core/src/_simd/_simd_vector.inc index 3d0c15375074..4911402bc568 100644 --- a/numpy/_core/src/_simd/_simd_vector.inc +++ b/numpy/_core/src/_simd/_simd_vector.inc @@ -92,7 +92,7 @@ static PyTypeObject PySIMDVectorType = { * miss-align load variable of 256/512-bit vector from non-aligned * 256/512-bit stack pointer. * - * check the following links for more clearification: + * check the following links for more clarification: * https://github.com/numpy/numpy/pull/18330#issuecomment-821539919 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=49001 */ diff --git a/numpy/_core/src/common/array_assign.c b/numpy/_core/src/common/array_assign.c index 4e154b7fc7b0..3c6d2f14cb65 100644 --- a/numpy/_core/src/common/array_assign.c +++ b/numpy/_core/src/common/array_assign.c @@ -15,7 +15,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "shape.h" diff --git a/numpy/_core/src/common/binop_override.h b/numpy/_core/src/common/binop_override.h index ec3d046796ab..def9b895c872 100644 --- a/numpy/_core/src/common/binop_override.h +++ b/numpy/_core/src/common/binop_override.h @@ -6,6 +6,7 @@ #include "numpy/arrayobject.h" #include "get_attr_string.h" +#include "npy_static_data.h" /* * Logic for deciding when binops should return NotImplemented versus when @@ -128,7 +129,7 @@ binop_should_defer(PyObject *self, PyObject *other, int inplace) * Classes with __array_ufunc__ are living in the future, and only need to * check whether __array_ufunc__ equals None. */ - attr = PyArray_LookupSpecial(other, npy_um_str_array_ufunc); + attr = PyArray_LookupSpecial(other, npy_interned_str.array_ufunc); if (attr != NULL) { defer = !inplace && (attr == Py_None); Py_DECREF(attr); diff --git a/numpy/_core/src/common/dlpack/dlpack.h b/numpy/_core/src/common/dlpack/dlpack.h index f0cbf61368c7..e05e600304d9 100644 --- a/numpy/_core/src/common/dlpack/dlpack.h +++ b/numpy/_core/src/common/dlpack/dlpack.h @@ -1,5 +1,6 @@ // Taken from: -// https://github.com/dmlc/dlpack/blob/ca4d00ad3e2e0f410eeab3264d21b8a39397f362/include/dlpack/dlpack.h +// https://github.com/dmlc/dlpack/blob/bbd2f4d32427e548797929af08cfe2a9cbb3cf12/include/dlpack/dlpack.h +// but added typedef to DLManagedTensorVersioned /*! * Copyright (c) 2017 by Contributors * \file dlpack.h @@ -118,6 +119,8 @@ typedef enum { kDLWebGPU = 15, /*! \brief Qualcomm Hexagon DSP */ kDLHexagon = 16, + /*! \brief Microsoft MAIA devices */ + kDLMAIA = 17, } DLDeviceType; /*! @@ -215,6 +218,9 @@ typedef struct { * return size; * } * \endcode + * + * Note that if the tensor is of size zero, then the data pointer should be + * set to `NULL`. */ void* data; /*! \brief The device of the tensor */ @@ -259,7 +265,7 @@ typedef struct DLManagedTensor { * \brief Destructor - this should be called * to destruct the manager_ctx which backs the DLManagedTensor. It can be * NULL if there is no way for the caller to provide a reasonable destructor. - * The destructors deletes the argument self as well. + * The destructor deletes the argument self as well. */ void (*deleter)(struct DLManagedTensor * self); } DLManagedTensor; @@ -269,6 +275,14 @@ typedef struct DLManagedTensor { /*! \brief bit mask to indicate that the tensor is read only. */ #define DLPACK_FLAG_BITMASK_READ_ONLY (1UL << 0UL) +/*! + * \brief bit mask to indicate that the tensor is a copy made by the producer. + * + * If set, the tensor is considered solely owned throughout its lifetime by the + * consumer, until the producer-provided deleter is invoked. + */ +#define DLPACK_FLAG_BITMASK_IS_COPIED (1UL << 1UL) + /*! * \brief A versioned and managed C Tensor object, manage memory of DLTensor. * @@ -279,7 +293,7 @@ typedef struct DLManagedTensor { * * \note This is the current standard DLPack exchange data structure. */ -struct DLManagedTensorVersioned { +typedef struct DLManagedTensorVersioned { /*! * \brief The API and ABI version of the current managed Tensor */ @@ -296,7 +310,7 @@ struct DLManagedTensorVersioned { * * This should be called to destruct manager_ctx which holds the DLManagedTensorVersioned. * It can be NULL if there is no way for the caller to provide a reasonable - * destructor. The destructors deletes the argument self as well. + * destructor. The destructor deletes the argument self as well. */ void (*deleter)(struct DLManagedTensorVersioned *self); /*! @@ -308,11 +322,12 @@ struct DLManagedTensorVersioned { * stable, to ensure that deleter can be correctly called. * * \sa DLPACK_FLAG_BITMASK_READ_ONLY + * \sa DLPACK_FLAG_BITMASK_IS_COPIED */ uint64_t flags; /*! \brief DLTensor which is being memory managed */ DLTensor dl_tensor; -}; +} DLManagedTensorVersioned; #ifdef __cplusplus } // DLPACK_EXTERN_C diff --git a/numpy/_core/src/common/npy_argparse.c b/numpy/_core/src/common/npy_argparse.c index 59858f6207bb..70cb82bb4b2c 100644 --- a/numpy/_core/src/common/npy_argparse.c +++ b/numpy/_core/src/common/npy_argparse.c @@ -5,12 +5,36 @@ #include #include "numpy/ndarraytypes.h" +#include "numpy/npy_2_compat.h" #include "npy_argparse.h" -#include "npy_pycompat.h" +#include "npy_atomic.h" #include "npy_import.h" #include "arrayfunction_override.h" +#if PY_VERSION_HEX < 0x30d00b3 +static PyThread_type_lock argparse_mutex; +#define LOCK_ARGPARSE_MUTEX \ + PyThread_acquire_lock(argparse_mutex, WAIT_LOCK) +#define UNLOCK_ARGPARSE_MUTEX \ + PyThread_release_lock(argparse_mutex) +#else +static PyMutex argparse_mutex = {0}; +#define LOCK_ARGPARSE_MUTEX PyMutex_Lock(&argparse_mutex) +#define UNLOCK_ARGPARSE_MUTEX PyMutex_Unlock(&argparse_mutex) +#endif + +NPY_NO_EXPORT int +init_argparse_mutex(void) { +#if PY_VERSION_HEX < 0x30d00b3 + argparse_mutex = PyThread_allocate_lock(); + if (argparse_mutex == NULL) { + PyErr_NoMemory(); + return -1; + } +#endif + return 0; +} /** * Small wrapper converting to array just like CPython does. @@ -195,7 +219,7 @@ initialize_keywords(const char *funcname, } if (i >= npositional_only) { int i_kwarg = i - npositional_only; - cache->kw_strings[i_kwarg] = PyUString_InternFromString(name); + cache->kw_strings[i_kwarg] = PyUnicode_InternFromString(name); if (cache->kw_strings[i_kwarg] == NULL) { va_end(va); goto error; @@ -273,15 +297,20 @@ _npy_parse_arguments(const char *funcname, /* ... is NULL, NULL, NULL terminated: name, converter, value */ ...) { - if (NPY_UNLIKELY(cache->npositional == -1)) { - va_list va; - va_start(va, kwnames); - - int res = initialize_keywords(funcname, cache, va); - va_end(va); - if (res < 0) { - return -1; + if (!npy_atomic_load_uint8(&cache->initialized)) { + LOCK_ARGPARSE_MUTEX; + if (!npy_atomic_load_uint8(&cache->initialized)) { + va_list va; + va_start(va, kwnames); + int res = initialize_keywords(funcname, cache, va); + va_end(va); + if (res < 0) { + UNLOCK_ARGPARSE_MUTEX; + return -1; + } + npy_atomic_store_uint8(&cache->initialized, 1); } + UNLOCK_ARGPARSE_MUTEX; } if (NPY_UNLIKELY(len_args > cache->npositional)) { diff --git a/numpy/_core/src/common/npy_argparse.h b/numpy/_core/src/common/npy_argparse.h index f4122103d22b..9f69da1307b5 100644 --- a/numpy/_core/src/common/npy_argparse.h +++ b/numpy/_core/src/common/npy_argparse.h @@ -20,7 +20,6 @@ NPY_NO_EXPORT int PyArray_PythonPyIntFromInt(PyObject *obj, int *value); - #define _NPY_MAX_KWARGS 15 typedef struct { @@ -28,16 +27,18 @@ typedef struct { int nargs; int npositional_only; int nrequired; + npy_uint8 initialized; /* Null terminated list of keyword argument name strings */ PyObject *kw_strings[_NPY_MAX_KWARGS+1]; } _NpyArgParserCache; +NPY_NO_EXPORT int init_argparse_mutex(void); /* * The sole purpose of this macro is to hide the argument parsing cache. * Since this cache must be static, this also removes a source of error. */ -#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache = {-1} +#define NPY_PREPARE_ARGPARSER static _NpyArgParserCache __argparse_cache; /** * Macro to help with argument parsing. diff --git a/numpy/_core/src/common/npy_atomic.h b/numpy/_core/src/common/npy_atomic.h new file mode 100644 index 000000000000..910028dcde7c --- /dev/null +++ b/numpy/_core/src/common/npy_atomic.h @@ -0,0 +1,100 @@ +/* + * Provides wrappers around C11 standard library atomics and MSVC intrinsics + * to provide basic atomic load and store functionality. This is based on + * code in CPython's pyatomic.h, pyatomic_std.h, and pyatomic_msc.h + */ + +#ifndef NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ +#define NUMPY_CORE_SRC_COMMON_NPY_ATOMIC_H_ + +#include "numpy/npy_common.h" + +#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L \ + && !defined(__STDC_NO_ATOMICS__) +// TODO: support C++ atomics as well if this header is ever needed in C++ + #include + #include + #define STDC_ATOMICS +#elif _MSC_VER + #include + #define MSC_ATOMICS + #if !defined(_M_X64) && !defined(_M_IX86) && !defined(_M_ARM64) + #error "Unsupported MSVC build configuration, neither x86 or ARM" + #endif +#elif defined(__GNUC__) && (__GNUC__ > 4) + #define GCC_ATOMICS +#elif defined(__clang__) + #if __has_builtin(__atomic_load) + #define GCC_ATOMICS + #endif +#else + #error "no supported atomic implementation for this platform/compiler" +#endif + + +static inline npy_uint8 +npy_atomic_load_uint8(const npy_uint8 *obj) { +#ifdef STDC_ATOMICS + return (npy_uint8)atomic_load((const _Atomic(uint8_t)*)obj); +#elif defined(MSC_ATOMICS) +#if defined(_M_X64) || defined(_M_IX86) + return *(volatile npy_uint8 *)obj; +#else // defined(_M_ARM64) + return (npy_uint8)__ldar8((unsigned __int8 volatile *)obj); +#endif +#elif defined(GCC_ATOMICS) + return __atomic_load_n(obj, __ATOMIC_SEQ_CST); +#endif +} + +static inline void* +npy_atomic_load_ptr(const void *obj) { +#ifdef STDC_ATOMICS + return atomic_load((const _Atomic(void *)*)obj); +#elif defined(MSC_ATOMICS) +#if SIZEOF_VOID_P == 8 +#if defined(_M_X64) || defined(_M_IX86) + return (void *)*(volatile uint64_t *)obj; +#elif defined(_M_ARM64) + return (void *)__ldar64((unsigned __int64 volatile *)obj); +#endif +#else +#if defined(_M_X64) || defined(_M_IX86) + return (void *)*(volatile uint32_t *)obj; +#elif defined(_M_ARM64) + return (void *)__ldar32((unsigned __int32 volatile *)obj); +#endif +#endif +#elif defined(GCC_ATOMICS) + return (void *)__atomic_load_n((void * const *)obj, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +npy_atomic_store_uint8(npy_uint8 *obj, npy_uint8 value) { +#ifdef STDC_ATOMICS + atomic_store((_Atomic(uint8_t)*)obj, value); +#elif defined(MSC_ATOMICS) + _InterlockedExchange8((volatile char *)obj, (char)value); +#elif defined(GCC_ATOMICS) + __atomic_store_n(obj, value, __ATOMIC_SEQ_CST); +#endif +} + +static inline void +npy_atomic_store_ptr(void *obj, void *value) +{ +#ifdef STDC_ATOMICS + atomic_store((_Atomic(void *)*)obj, value); +#elif defined(MSC_ATOMICS) + _InterlockedExchangePointer((void * volatile *)obj, (void *)value); +#elif defined(GCC_ATOMICS) + __atomic_store_n((void **)obj, value, __ATOMIC_SEQ_CST); +#endif +} + +#undef MSC_ATOMICS +#undef STDC_ATOMICS +#undef GCC_ATOMICS + +#endif // NUMPY_CORE_SRC_COMMON_NPY_NPY_ATOMIC_H_ diff --git a/numpy/_core/src/common/npy_config.h b/numpy/_core/src/common/npy_config.h index e590366888aa..82641a85509e 100644 --- a/numpy/_core/src/common/npy_config.h +++ b/numpy/_core/src/common/npy_config.h @@ -126,6 +126,14 @@ #undef HAVE_CPOWL #undef HAVE_CEXPL +/* + * cygwin uses newlib, which has naive implementations of the + * complex log functions. + */ +#undef HAVE_CLOG +#undef HAVE_CLOGF +#undef HAVE_CLOGL + #include #if CYGWIN_VERSION_DLL_MAJOR < 3003 // rather than blocklist cabsl, hypotl, modfl, sqrtl, error out @@ -182,6 +190,16 @@ #undef HAVE_CACOSHF #undef HAVE_CACOSHL +/* + * musl's clog is low precision for some inputs. As of MUSL 1.2.5, + * the first comment in clog.c is "// FIXME". + * See https://github.com/numpy/numpy/pull/24416#issuecomment-1678208628 + * and https://github.com/numpy/numpy/pull/24448 + */ +#undef HAVE_CLOG +#undef HAVE_CLOGF +#undef HAVE_CLOGL + #endif /* defined(__GLIBC) */ #endif /* defined(HAVE_FEATURES_H) */ diff --git a/numpy/_core/src/common/npy_cpu_dispatch.c b/numpy/_core/src/common/npy_cpu_dispatch.c index 992a470ada04..ff22f234a7c6 100644 --- a/numpy/_core/src/common/npy_cpu_dispatch.c +++ b/numpy/_core/src/common/npy_cpu_dispatch.c @@ -1,11 +1,14 @@ -#include "npy_cpu_dispatch.h" +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE -static PyObject *npy__cpu_dispatch_registery = NULL; +#include "npy_cpu_dispatch.h" +#include "numpy/ndarraytypes.h" +#include "npy_static_data.h" NPY_VISIBILITY_HIDDEN int npy_cpu_dispatch_tracer_init(PyObject *mod) { - if (npy__cpu_dispatch_registery != NULL) { + if (npy_static_pydata.cpu_dispatch_registry != NULL) { PyErr_Format(PyExc_RuntimeError, "CPU dispatcher tracer already initlized"); return -1; } @@ -22,7 +25,7 @@ npy_cpu_dispatch_tracer_init(PyObject *mod) if (err != 0) { return -1; } - npy__cpu_dispatch_registery = reg_dict; + npy_static_pydata.cpu_dispatch_registry = reg_dict; return 0; } @@ -30,13 +33,13 @@ NPY_VISIBILITY_HIDDEN void npy_cpu_dispatch_trace(const char *fname, const char *signature, const char **dispatch_info) { - PyObject *func_dict = PyDict_GetItemString(npy__cpu_dispatch_registery, fname); + PyObject *func_dict = PyDict_GetItemString(npy_static_pydata.cpu_dispatch_registry, fname); if (func_dict == NULL) { func_dict = PyDict_New(); if (func_dict == NULL) { return; } - int err = PyDict_SetItemString(npy__cpu_dispatch_registery, fname, func_dict); + int err = PyDict_SetItemString(npy_static_pydata.cpu_dispatch_registry, fname, func_dict); Py_DECREF(func_dict); if (err != 0) { return; diff --git a/numpy/_core/src/common/npy_cpu_features.c b/numpy/_core/src/common/npy_cpu_features.c index 7a24cb01625b..43f2c435a140 100644 --- a/numpy/_core/src/common/npy_cpu_features.c +++ b/numpy/_core/src/common/npy_cpu_features.c @@ -5,6 +5,11 @@ /******************** Private Definitions *********************/ +// This is initialized during module initialization and thereafter immutable. +// We don't include it in the global data struct because the definitions in +// this file are shared by the _simd, _umath_tests, and +// _multiarray_umath modules + // Hold all CPU features boolean values static unsigned char npy__cpu_have[NPY_CPU_FEATURE_MAX]; @@ -119,7 +124,8 @@ static struct { {NPY_CPU_FEATURE_ASIMDHP, "ASIMDHP"}, {NPY_CPU_FEATURE_ASIMDDP, "ASIMDDP"}, {NPY_CPU_FEATURE_ASIMDFHM, "ASIMDFHM"}, - {NPY_CPU_FEATURE_SVE, "SVE"}}; + {NPY_CPU_FEATURE_SVE, "SVE"}, + {NPY_CPU_FEATURE_RVV, "RVV"}}; NPY_VISIBILITY_HIDDEN PyObject * @@ -325,7 +331,6 @@ npy__cpu_check_env(int disable, const char *env) { ) < 0) { return -1; } - return 0; } #define NOTSUPP_BODY \ @@ -469,6 +474,8 @@ npy__cpu_init_features(void) // third call to the cpuid to get extended AVX2 & AVX512 feature bits npy__cpu_cpuid(reg, 7); npy__cpu_have[NPY_CPU_FEATURE_AVX2] = (reg[1] & (1 << 5)) != 0; + npy__cpu_have[NPY_CPU_FEATURE_AVX2] = npy__cpu_have[NPY_CPU_FEATURE_AVX2] && + npy__cpu_have[NPY_CPU_FEATURE_FMA3]; if (!npy__cpu_have[NPY_CPU_FEATURE_AVX2]) return; // detect AVX2 & FMA3 @@ -636,7 +643,7 @@ static void npy__cpu_init_features(void) { memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); - + unsigned int hwcap = getauxval(AT_HWCAP); if ((hwcap & HWCAP_S390_VX) == 0) { return; @@ -648,7 +655,7 @@ npy__cpu_init_features(void) npy__cpu_have[NPY_CPU_FEATURE_VXE2] = 1; return; } - + npy__cpu_have[NPY_CPU_FEATURE_VXE] = (hwcap & HWCAP_S390_VXE) != 0; npy__cpu_have[NPY_CPU_FEATURE_VX] = 1; @@ -813,6 +820,28 @@ npy__cpu_init_features(void) #endif } +/************** RISC-V 64 ***************/ + +#elif defined(__riscv) && __riscv_xlen == 64 + +#include + +#ifndef HWCAP_RVV + // https://github.com/torvalds/linux/blob/v6.8/arch/riscv/include/uapi/asm/hwcap.h#L24 + #define COMPAT_HWCAP_ISA_V (1 << ('V' - 'A')) +#endif + +static void +npy__cpu_init_features(void) +{ + memset(npy__cpu_have, 0, sizeof(npy__cpu_have[0]) * NPY_CPU_FEATURE_MAX); + + unsigned int hwcap = getauxval(AT_HWCAP); + if (hwcap & COMPAT_HWCAP_ISA_V) { + npy__cpu_have[NPY_CPU_FEATURE_RVV] = 1; + } +} + /*********** Unsupported ARCH ***********/ #else static void diff --git a/numpy/_core/src/common/npy_cpu_features.h b/numpy/_core/src/common/npy_cpu_features.h index 83522b933785..d1e9d7e60d9f 100644 --- a/numpy/_core/src/common/npy_cpu_features.h +++ b/numpy/_core/src/common/npy_cpu_features.h @@ -98,6 +98,9 @@ enum npy_cpu_features // Vector-Enhancements Facility 2 NPY_CPU_FEATURE_VXE2 = 352, + // RISC-V + NPY_CPU_FEATURE_RVV = 400, + NPY_CPU_FEATURE_MAX }; diff --git a/numpy/_core/src/common/npy_ctypes.h b/numpy/_core/src/common/npy_ctypes.h index 578de06397bd..78809732416c 100644 --- a/numpy/_core/src/common/npy_ctypes.h +++ b/numpy/_core/src/common/npy_ctypes.h @@ -4,6 +4,7 @@ #include #include "npy_import.h" +#include "multiarraymodule.h" /* * Check if a python type is a ctypes class. @@ -17,16 +18,18 @@ static inline int npy_ctypes_check(PyTypeObject *obj) { - static PyObject *py_func = NULL; PyObject *ret_obj; int ret; - npy_cache_import("numpy._core._internal", "npy_ctypes_check", &py_func); - if (py_func == NULL) { + + if (npy_cache_import_runtime( + "numpy._core._internal", "npy_ctypes_check", + &npy_runtime_imports.npy_ctypes_check) == -1) { goto fail; } - ret_obj = PyObject_CallFunctionObjArgs(py_func, (PyObject *)obj, NULL); + ret_obj = PyObject_CallFunctionObjArgs( + npy_runtime_imports.npy_ctypes_check, (PyObject *)obj, NULL); if (ret_obj == NULL) { goto fail; } diff --git a/numpy/_core/src/common/npy_dlpack.h b/numpy/_core/src/common/npy_dlpack.h index cb926a26271d..1dd3ae7f88e5 100644 --- a/numpy/_core/src/common/npy_dlpack.h +++ b/numpy/_core/src/common/npy_dlpack.h @@ -6,23 +6,27 @@ // Part of the Array API specification. #define NPY_DLPACK_CAPSULE_NAME "dltensor" +#define NPY_DLPACK_VERSIONED_CAPSULE_NAME "dltensor_versioned" #define NPY_DLPACK_USED_CAPSULE_NAME "used_dltensor" +#define NPY_DLPACK_VERSIONED_USED_CAPSULE_NAME "used_dltensor_versioned" // Used internally by NumPy to store a base object // as it has to release a reference to the original // capsule. #define NPY_DLPACK_INTERNAL_CAPSULE_NAME "numpy_dltensor" +#define NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME "numpy_dltensor_versioned" -PyObject * +NPY_NO_EXPORT PyObject * array_dlpack(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); -PyObject * +NPY_NO_EXPORT PyObject * array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)); NPY_NO_EXPORT PyObject * -from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj); +from_dlpack(PyObject *NPY_UNUSED(self), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames); #endif diff --git a/numpy/_core/src/common/npy_hashtable.c b/numpy/_core/src/common/npy_hashtable.c index 14f6cca1b864..596e62cf8354 100644 --- a/numpy/_core/src/common/npy_hashtable.c +++ b/numpy/_core/src/common/npy_hashtable.c @@ -160,8 +160,9 @@ _resize_if_necessary(PyArrayIdentityHash *tb) for (npy_intp i = 0; i < prev_size; i++) { PyObject **item = &old_table[i * (tb->key_len + 1)]; if (item[0] != NULL) { - tb->nelem -= 1; /* Decrement, setitem will increment again */ - PyArrayIdentityHash_SetItem(tb, item+1, item[0], 1); + PyObject **tb_item = find_item(tb, item + 1); + tb_item[0] = item[0]; + memcpy(tb_item+1, item+1, tb->key_len * sizeof(PyObject *)); } } PyMem_Free(old_table); @@ -179,10 +180,13 @@ _resize_if_necessary(PyArrayIdentityHash *tb) * @param value Normally a Python object, no reference counting is done. * use NULL to clear an item. If the item does not exist, no * action is performed for NULL. - * @param replace If 1, allow replacements. + * @param replace If 1, allow replacements. If replace is 0 an error is raised + * if the stored value is different from the value to be cached. If the + * value to be cached is identical to the stored value, the value to be + * cached is ignored and no error is raised. * @returns 0 on success, -1 with a MemoryError or RuntimeError (if an item - * is added which is already in the cache). The caller should avoid - * the RuntimeError. + * is added which is already in the cache and replace is 0). The + * caller should avoid the RuntimeError. */ NPY_NO_EXPORT int PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, @@ -195,9 +199,9 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject **tb_item = find_item(tb, key); if (value != NULL) { - if (tb_item[0] != NULL && !replace) { + if (tb_item[0] != NULL && tb_item[0] != value && !replace) { PyErr_SetString(PyExc_RuntimeError, - "Identity cache already includes the item."); + "Identity cache already includes an item with this key."); return -1; } tb_item[0] = value; @@ -214,7 +218,8 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key) +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key) { - return find_item(tb, key)[0]; + PyObject *res = find_item(tb, key)[0]; + return res; } diff --git a/numpy/_core/src/common/npy_hashtable.h b/numpy/_core/src/common/npy_hashtable.h index a0bf81967d75..a4252da87aff 100644 --- a/numpy/_core/src/common/npy_hashtable.h +++ b/numpy/_core/src/common/npy_hashtable.h @@ -21,7 +21,7 @@ PyArrayIdentityHash_SetItem(PyArrayIdentityHash *tb, PyObject *const *key, PyObject *value, int replace); NPY_NO_EXPORT PyObject * -PyArrayIdentityHash_GetItem(PyArrayIdentityHash const *tb, PyObject *const *key); +PyArrayIdentityHash_GetItem(PyArrayIdentityHash *tb, PyObject *const *key); NPY_NO_EXPORT PyArrayIdentityHash * PyArrayIdentityHash_New(int key_len); diff --git a/numpy/_core/src/common/npy_import.c b/numpy/_core/src/common/npy_import.c new file mode 100644 index 000000000000..cff071e9b522 --- /dev/null +++ b/numpy/_core/src/common/npy_import.c @@ -0,0 +1,21 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include "numpy/ndarraytypes.h" +#include "npy_import.h" +#include "npy_atomic.h" + + +NPY_VISIBILITY_HIDDEN npy_runtime_imports_struct npy_runtime_imports; + +NPY_NO_EXPORT int +init_import_mutex(void) { +#if PY_VERSION_HEX < 0x30d00b3 + npy_runtime_imports.import_mutex = PyThread_allocate_lock(); + if (npy_runtime_imports.import_mutex == NULL) { + PyErr_NoMemory(); + return -1; + } +#endif + return 0; +} diff --git a/numpy/_core/src/common/npy_import.h b/numpy/_core/src/common/npy_import.h index 58b4ba0bc7e5..9df85357b5ec 100644 --- a/numpy/_core/src/common/npy_import.h +++ b/numpy/_core/src/common/npy_import.h @@ -3,7 +3,74 @@ #include -/*! \brief Fetch and cache Python function. +#include "numpy/npy_common.h" +#include "npy_atomic.h" + +/* + * Cached references to objects obtained via an import. All of these are + * can be initialized at any time by npy_cache_import_runtime. + */ +typedef struct npy_runtime_imports_struct { +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_type_lock import_mutex; +#else + PyMutex import_mutex; +#endif + PyObject *_add_dtype_helper; + PyObject *_all; + PyObject *_amax; + PyObject *_amin; + PyObject *_any; + PyObject *array_function_errmsg_formatter; + PyObject *array_ufunc_errmsg_formatter; + PyObject *_clip; + PyObject *_commastring; + PyObject *_convert_to_stringdtype_kwargs; + PyObject *_default_array_repr; + PyObject *_default_array_str; + PyObject *_dump; + PyObject *_dumps; + PyObject *_getfield_is_safe; + PyObject *internal_gcd_func; + PyObject *_mean; + PyObject *NO_NEP50_WARNING; + PyObject *npy_ctypes_check; + PyObject *numpy_matrix; + PyObject *_prod; + PyObject *_promote_fields; + PyObject *_std; + PyObject *_sum; + PyObject *_ufunc_doc_signature_formatter; + PyObject *_var; + PyObject *_view_is_safe; + PyObject *_void_scalar_to_string; +} npy_runtime_imports_struct; + +NPY_VISIBILITY_HIDDEN extern npy_runtime_imports_struct npy_runtime_imports; + +/*! \brief Import a Python object. + + * This function imports the Python function specified by + * \a module and \a function, increments its reference count, and returns + * the result. On error, returns NULL. + * + * @param module Absolute module name. + * @param attr module attribute to cache. + */ +static inline PyObject* +npy_import(const char *module, const char *attr) +{ + PyObject *ret = NULL; + PyObject *mod = PyImport_ImportModule(module); + + if (mod != NULL) { + ret = PyObject_GetAttrString(mod, attr); + Py_DECREF(mod); + } + return ret; +} + +/*! \brief Fetch and cache Python object at runtime. * * Import a Python function and cache it for use. The function checks if * cache is NULL, and if not NULL imports the Python function specified by @@ -16,17 +83,32 @@ * @param attr module attribute to cache. * @param cache Storage location for imported function. */ -static inline void -npy_cache_import(const char *module, const char *attr, PyObject **cache) -{ - if (NPY_UNLIKELY(*cache == NULL)) { - PyObject *mod = PyImport_ImportModule(module); - - if (mod != NULL) { - *cache = PyObject_GetAttrString(mod, attr); - Py_DECREF(mod); +static inline int +npy_cache_import_runtime(const char *module, const char *attr, PyObject **obj) { + if (!npy_atomic_load_ptr(obj)) { + PyObject* value = npy_import(module, attr); + if (value == NULL) { + return -1; } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_acquire_lock(npy_runtime_imports.import_mutex, WAIT_LOCK); +#else + PyMutex_Lock(&npy_runtime_imports.import_mutex); +#endif + if (!npy_atomic_load_ptr(obj)) { + npy_atomic_store_ptr(obj, Py_NewRef(value)); + } +#if PY_VERSION_HEX < 0x30d00b3 + PyThread_release_lock(npy_runtime_imports.import_mutex); +#else + PyMutex_Unlock(&npy_runtime_imports.import_mutex); +#endif + Py_DECREF(value); } + return 0; } +NPY_NO_EXPORT int +init_import_mutex(void); + #endif /* NUMPY_CORE_SRC_COMMON_NPY_IMPORT_H_ */ diff --git a/numpy/_core/src/common/npy_longdouble.c b/numpy/_core/src/common/npy_longdouble.c index 38dfd325c685..ce80a9ae2bc3 100644 --- a/numpy/_core/src/common/npy_longdouble.c +++ b/numpy/_core/src/common/npy_longdouble.c @@ -6,7 +6,7 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_math.h" -#include "npy_pycompat.h" + #include "numpyos.h" /* diff --git a/numpy/_core/src/common/npy_pycompat.h b/numpy/_core/src/common/npy_pycompat.h index ce6c34fa1333..769b90215f2b 100644 --- a/numpy/_core/src/common/npy_pycompat.h +++ b/numpy/_core/src/common/npy_pycompat.h @@ -2,21 +2,8 @@ #define NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ #include "numpy/npy_3kcompat.h" +#include "pythoncapi-compat/pythoncapi_compat.h" - -/* - * In Python 3.10a7 (or b1), python started using the identity for the hash - * when a value is NaN. See https://bugs.python.org/issue43475 - */ -#if PY_VERSION_HEX > 0x030a00a6 #define Npy_HashDouble _Py_HashDouble -#else -static inline Py_hash_t -Npy_HashDouble(PyObject *NPY_UNUSED(identity), double val) -{ - return _Py_HashDouble(val); -} -#endif - #endif /* NUMPY_CORE_SRC_COMMON_NPY_PYCOMPAT_H_ */ diff --git a/numpy/_core/src/common/numpyos.c b/numpy/_core/src/common/numpyos.c index fb69e2587ee9..319f5dcc395f 100644 --- a/numpy/_core/src/common/numpyos.c +++ b/numpy/_core/src/common/numpyos.c @@ -9,7 +9,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #if defined(HAVE_STRTOLD_L) && !defined(_GNU_SOURCE) # define _GNU_SOURCE diff --git a/numpy/_core/src/common/python_xerbla.c b/numpy/_core/src/common/python_xerbla.c index 37a41408be22..71a4c81edbf1 100644 --- a/numpy/_core/src/common/python_xerbla.c +++ b/numpy/_core/src/common/python_xerbla.c @@ -28,22 +28,16 @@ CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info) char buf[sizeof(format) + 6 + 4]; /* 6 for name, 4 for param. num. */ int len = 0; /* length of subroutine name*/ -#ifdef WITH_THREAD PyGILState_STATE save; -#endif while( len<6 && srname[len]!='\0' ) len++; while( len && srname[len-1]==' ' ) len--; -#ifdef WITH_THREAD save = PyGILState_Ensure(); -#endif PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info); PyErr_SetString(PyExc_ValueError, buf); -#ifdef WITH_THREAD PyGILState_Release(save); -#endif return 0; } diff --git a/numpy/_core/src/common/pythoncapi-compat b/numpy/_core/src/common/pythoncapi-compat new file mode 160000 index 000000000000..2d18aecd7b2f --- /dev/null +++ b/numpy/_core/src/common/pythoncapi-compat @@ -0,0 +1 @@ +Subproject commit 2d18aecd7b2f549d38a13e27b682ea4966f37bd8 diff --git a/numpy/_core/src/common/simd/avx512/avx512.h b/numpy/_core/src/common/simd/avx512/avx512.h index aa6abe256424..2a4a20b2970d 100644 --- a/numpy/_core/src/common/simd/avx512/avx512.h +++ b/numpy/_core/src/common/simd/avx512/avx512.h @@ -11,6 +11,8 @@ // Enough limit to allow us to use _mm512_i32gather_* and _mm512_i32scatter_* #define NPY_SIMD_MAXLOAD_STRIDE32 (0x7fffffff / 16) #define NPY_SIMD_MAXSTORE_STRIDE32 (0x7fffffff / 16) +#define NPY_SIMD_MAXLOAD_STRIDE64 (0x7fffffff / 16) +#define NPY_SIMD_MAXSTORE_STRIDE64 (0x7fffffff / 16) typedef __m512i npyv_u8; typedef __m512i npyv_s8; diff --git a/numpy/_core/src/common/simd/avx512/conversion.h b/numpy/_core/src/common/simd/avx512/conversion.h index 474aee446b6a..3b29b6729f20 100644 --- a/numpy/_core/src/common/simd/avx512/conversion.h +++ b/numpy/_core/src/common/simd/avx512/conversion.h @@ -131,20 +131,44 @@ npyv_pack_b8_b64(npyv_b64 a, npyv_b64 b, npyv_b64 c, npyv_b64 d, __mmask16 gh = _mm512_kunpackb((__mmask16)h, (__mmask16)g); return npyv_pack_b8_b32(ab, cd, ef, gh); } - +/* + * A compiler bug workaround on Intel Compiler Classic. + * The bug manifests specifically when the + * scalar result of _cvtmask64_u64 is compared against the constant -1. This + * comparison uniquely triggers a bug under conditions of equality (==) or + * inequality (!=) checks, which are typically used in reduction operations like + * np.logical_or. + * + * The underlying issue arises from the compiler's optimizer. When the last + * vector comparison instruction operates on zmm, the optimizer erroneously + * emits a duplicate of this instruction but on the lower half register ymm. It + * then performs a bitwise XOR operation between the mask produced by this + * duplicated instruction and the mask from the original comparison instruction. + * This erroneous behavior leads to incorrect results. + * + * See https://github.com/numpy/numpy/issues/26197#issuecomment-2056750975 + */ +#ifdef __INTEL_COMPILER +#define NPYV__VOLATILE_CVTMASK64 volatile +#else +#define NPYV__VOLATILE_CVTMASK64 +#endif // convert boolean vectors to integer bitfield -NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) -{ +NPY_FINLINE npy_uint64 npyv_tobits_b8(npyv_b8 a) { #ifdef NPY_HAVE_AVX512BW_MASK - return (npy_uint64)_cvtmask64_u64(a); + npy_uint64 NPYV__VOLATILE_CVTMASK64 t = (npy_uint64)_cvtmask64_u64(a); + return t; #elif defined(NPY_HAVE_AVX512BW) - return (npy_uint64)a; + npy_uint64 NPYV__VOLATILE_CVTMASK64 t = (npy_uint64)a; + return t; #else int mask_lo = _mm256_movemask_epi8(npyv512_lower_si256(a)); int mask_hi = _mm256_movemask_epi8(npyv512_higher_si256(a)); return (unsigned)mask_lo | ((npy_uint64)(unsigned)mask_hi << 32); #endif } +#undef NPYV__VOLATILE_CVTMASK64 + NPY_FINLINE npy_uint64 npyv_tobits_b16(npyv_b16 a) { #ifdef NPY_HAVE_AVX512BW_MASK diff --git a/numpy/_core/src/common/simd/simd.h b/numpy/_core/src/common/simd/simd.h index 2d9d48cf1cdd..706229af0a62 100644 --- a/numpy/_core/src/common/simd/simd.h +++ b/numpy/_core/src/common/simd/simd.h @@ -1,5 +1,7 @@ #ifndef _NPY_SIMD_H_ #define _NPY_SIMD_H_ + +#include /* for alignof until C23 */ /** * the NumPy C SIMD vectorization interface "NPYV" are types and functions intended * to simplify vectorization of code on different platforms, currently supports @@ -123,10 +125,11 @@ typedef double npyv_lanetype_f64; * acceptable limit of strides before using any of non-contiguous load/store intrinsics. * * For instance: - * npy_intp ld_stride = step[0] / sizeof(float); - * npy_intp st_stride = step[1] / sizeof(float); * - * if (npyv_loadable_stride_f32(ld_stride) && npyv_storable_stride_f32(st_stride)) { + * if (npyv_loadable_stride_f32(steps[0]) && npyv_storable_stride_f32(steps[1])) { + * // Strides are now guaranteed to be a multiple and compatible + * npy_intp ld_stride = steps[0] / sizeof(float); + * npy_intp st_stride = steps[1] / sizeof(float); * for (;;) * npyv_f32 a = npyv_loadn_f32(ld_pointer, ld_stride); * // ... @@ -134,7 +137,7 @@ typedef double npyv_lanetype_f64; * } * else { * for (;;) - * // C scalars + * // C scalars, use byte steps/strides. * } */ #ifndef NPY_SIMD_MAXLOAD_STRIDE32 @@ -149,11 +152,29 @@ typedef double npyv_lanetype_f64; #ifndef NPY_SIMD_MAXSTORE_STRIDE64 #define NPY_SIMD_MAXSTORE_STRIDE64 0 #endif -#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ - NPY_FINLINE int npyv_loadable_stride_##SFX(npy_intp stride) \ - { return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; } \ - NPY_FINLINE int npyv_storable_stride_##SFX(npy_intp stride) \ - { return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; } +#define NPYV_IMPL_MAXSTRIDE(SFX, MAXLOAD, MAXSTORE) \ + NPY_FINLINE int \ + npyv_loadable_stride_##SFX(npy_intp stride) \ + { \ + if (alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXLOAD > 0 ? llabs(stride) <= MAXLOAD : 1; \ + } \ + NPY_FINLINE int \ + npyv_storable_stride_##SFX(npy_intp stride) \ + { \ + if (alignof(npyv_lanetype_##SFX) != sizeof(npyv_lanetype_##SFX) && \ + stride % sizeof(npyv_lanetype_##SFX) != 0) { \ + /* stride not a multiple of itemsize, cannot handle. */ \ + return 0; \ + } \ + stride = stride / sizeof(npyv_lanetype_##SFX); \ + return MAXSTORE > 0 ? llabs(stride) <= MAXSTORE : 1; \ + } #if NPY_SIMD NPYV_IMPL_MAXSTRIDE(u32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) NPYV_IMPL_MAXSTRIDE(s32, NPY_SIMD_MAXLOAD_STRIDE32, NPY_SIMD_MAXSTORE_STRIDE32) diff --git a/numpy/_core/src/common/ucsnarrow.c b/numpy/_core/src/common/ucsnarrow.c index 4bea4beee384..203e02fbb3dd 100644 --- a/numpy/_core/src/common/ucsnarrow.c +++ b/numpy/_core/src/common/ucsnarrow.c @@ -9,7 +9,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "ctors.h" /* diff --git a/numpy/_core/src/common/ufunc_override.c b/numpy/_core/src/common/ufunc_override.c index 4fb4d4b3edda..17b678edd4bf 100644 --- a/numpy/_core/src/common/ufunc_override.c +++ b/numpy/_core/src/common/ufunc_override.c @@ -1,11 +1,13 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define _MULTIARRAYMODULE +#include "numpy/ndarraytypes.h" #include "npy_pycompat.h" #include "get_attr_string.h" #include "npy_import.h" #include "ufunc_override.h" #include "scalartypes.h" +#include "npy_static_data.h" /* * Check whether an object has __array_ufunc__ defined on its class and it @@ -18,15 +20,8 @@ NPY_NO_EXPORT PyObject * PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) { - static PyObject *ndarray_array_ufunc = NULL; PyObject *cls_array_ufunc; - /* On first entry, cache ndarray's __array_ufunc__ */ - if (ndarray_array_ufunc == NULL) { - ndarray_array_ufunc = PyObject_GetAttrString((PyObject *)&PyArray_Type, - "__array_ufunc__"); - } - /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { return NULL; @@ -40,7 +35,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) * Does the class define __array_ufunc__? (Note that LookupSpecial has fast * return for basic python types, so no need to worry about those here) */ - cls_array_ufunc = PyArray_LookupSpecial(obj, npy_um_str_array_ufunc); + cls_array_ufunc = PyArray_LookupSpecial(obj, npy_interned_str.array_ufunc); if (cls_array_ufunc == NULL) { if (PyErr_Occurred()) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ @@ -48,7 +43,7 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj) return NULL; } /* Ignore if the same as ndarray.__array_ufunc__ */ - if (cls_array_ufunc == ndarray_array_ufunc) { + if (cls_array_ufunc == npy_static_pydata.ndarray_array_ufunc) { Py_DECREF(cls_array_ufunc); return NULL; } @@ -99,12 +94,11 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * *out_kwd_obj = NULL; return -1; } - /* borrowed reference */ - *out_kwd_obj = _PyDict_GetItemStringWithError(kwds, "out"); - if (*out_kwd_obj == NULL) { - if (PyErr_Occurred()) { - return -1; - } + int result = PyDict_GetItemStringRef(kwds, "out", out_kwd_obj); + if (result == -1) { + return -1; + } + else if (result == 0) { Py_INCREF(Py_None); *out_kwd_obj = Py_None; return 0; @@ -118,15 +112,14 @@ PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject * seq = PySequence_Fast(*out_kwd_obj, "Could not convert object to sequence"); if (seq == NULL) { - *out_kwd_obj = NULL; + Py_CLEAR(*out_kwd_obj); return -1; } *out_objs = PySequence_Fast_ITEMS(seq); - *out_kwd_obj = seq; + Py_SETREF(*out_kwd_obj, seq); return PySequence_Fast_GET_SIZE(seq); } else { - Py_INCREF(*out_kwd_obj); *out_objs = out_kwd_obj; return 1; } diff --git a/numpy/_core/src/dummymodule.c b/numpy/_core/src/dummymodule.c index 7284ffd68545..2f293d6c4cd6 100644 --- a/numpy/_core/src/dummymodule.c +++ b/numpy/_core/src/dummymodule.c @@ -10,8 +10,6 @@ #define PY_SSIZE_T_CLEAN #include -#include "npy_pycompat.h" - static struct PyMethodDef methods[] = { {NULL, NULL, 0, NULL} }; diff --git a/numpy/_core/src/highway b/numpy/_core/src/highway index 58b52a717469..a97b5d371d69 160000 --- a/numpy/_core/src/highway +++ b/numpy/_core/src/highway @@ -1 +1 @@ -Subproject commit 58b52a717469e62b2d9b8eaa2f5dddb44d4a4cbf +Subproject commit a97b5d371d696564e206627a883b1341c65bd983 diff --git a/numpy/_core/src/multiarray/_multiarray_tests.c.src b/numpy/_core/src/multiarray/_multiarray_tests.c.src index 606405dbfdda..5d0d91f1e996 100644 --- a/numpy/_core/src/multiarray/_multiarray_tests.c.src +++ b/numpy/_core/src/multiarray/_multiarray_tests.c.src @@ -42,6 +42,28 @@ argparse_example_function(PyObject *NPY_UNUSED(mod), Py_RETURN_NONE; } +/* + * Tests that argparse cache creation is thread-safe. *must* be called only + * by the python-level test_thread_safe_argparse_cache function, otherwise + * the cache might be created before the test to make sure cache creation is + * thread-safe runs + */ +static PyObject * +threaded_argparse_example_function(PyObject *NPY_UNUSED(mod), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + NPY_PREPARE_ARGPARSER; + int arg1; + PyObject *arg2; + if (npy_parse_arguments("thread_func", args, len_args, kwnames, + "$arg1", &PyArray_PythonPyIntFromInt, &arg1, + "$arg2", NULL, &arg2, + NULL, NULL, NULL) < 0) { + return NULL; + } + Py_RETURN_NONE; +} + /* test PyArray_IsPythonScalar, before including private py3 compat header */ static PyObject * IsPythonScalar(PyObject * dummy, PyObject *args) @@ -58,7 +80,7 @@ IsPythonScalar(PyObject * dummy, PyObject *args) } } -#include "npy_pycompat.h" + /** Function to test calling via ctypes */ @@ -1992,7 +2014,35 @@ PrintFloat_Printf_g(PyObject *obj, int precision) return PyUnicode_FromString(str); } - +/* + * format_float_OSprintf_g(val, precision) + * + * Print a floating point scalar using the system's printf function, + * equivalent to: + * + * printf("%.*g", precision, val); + * + * for half/float/double, or replacing 'g' by 'Lg' for longdouble. This + * method is designed to help cross-validate the format_float_* methods. + * + * Parameters + * ---------- + * val : python float or numpy floating scalar + * Value to format. + * + * precision : non-negative integer, optional + * Precision given to printf. + * + * Returns + * ------- + * rep : string + * The string representation of the floating point value + * + * See Also + * -------- + * format_float_scientific + * format_float_positional + */ static PyObject * printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds) { @@ -2177,6 +2227,9 @@ static PyMethodDef Multiarray_TestsMethods[] = { {"argparse_example_function", (PyCFunction)argparse_example_function, METH_KEYWORDS | METH_FASTCALL, NULL}, + {"threaded_argparse_example_function", + (PyCFunction)threaded_argparse_example_function, + METH_KEYWORDS | METH_FASTCALL, NULL}, {"IsPythonScalar", IsPythonScalar, METH_VARARGS, NULL}, @@ -2379,10 +2432,19 @@ PyMODINIT_FUNC PyInit__multiarray_tests(void) return m; } import_array(); + if (init_argparse_mutex() < 0) { + return NULL; + } if (PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "cannot load _multiarray_tests module."); } + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/multiarray/abstractdtypes.c b/numpy/_core/src/multiarray/abstractdtypes.c index 3142411b2b61..ae7a8ec1506c 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.c +++ b/numpy/_core/src/multiarray/abstractdtypes.c @@ -21,7 +21,7 @@ int_default_descriptor(PyArray_DTypeMeta* NPY_UNUSED(cls)) } static PyArray_Descr * -discover_descriptor_from_pyint( +discover_descriptor_from_pylong( PyArray_DTypeMeta *NPY_UNUSED(cls), PyObject *obj) { assert(PyLong_Check(obj)); @@ -85,33 +85,49 @@ discover_descriptor_from_pycomplex( NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes() { - ((PyTypeObject *)&PyArray_PyIntAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyIntAbstractDType.scalar_type = &PyLong_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyIntAbstractDType) < 0) { + if (PyType_Ready((PyTypeObject *)&PyArray_IntAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyFloatAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyFloatAbstractDType.scalar_type = &PyFloat_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatAbstractDType) < 0) { + if (PyType_Ready((PyTypeObject *)&PyArray_FloatAbstractDType) < 0) { return -1; } - ((PyTypeObject *)&PyArray_PyComplexAbstractDType)->tp_base = &PyArrayDescr_Type; - PyArray_PyComplexAbstractDType.scalar_type = &PyComplex_Type; - if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexAbstractDType) < 0) { + if (PyType_Ready((PyTypeObject *)&PyArray_ComplexAbstractDType) < 0) { + return -1; + } + /* + * Delayed assignments to avoid "error C2099: initializer is not a constant" + * in windows compilers. Can hopefully be done in structs in the future. + */ + ((PyTypeObject *)&PyArray_PyLongDType)->tp_base = + (PyTypeObject *)&PyArray_IntAbstractDType; + PyArray_PyLongDType.scalar_type = &PyLong_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyLongDType) < 0) { + return -1; + } + ((PyTypeObject *)&PyArray_PyFloatDType)->tp_base = + (PyTypeObject *)&PyArray_FloatAbstractDType; + PyArray_PyFloatDType.scalar_type = &PyFloat_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyFloatDType) < 0) { + return -1; + } + ((PyTypeObject *)&PyArray_PyComplexDType)->tp_base = + (PyTypeObject *)&PyArray_ComplexAbstractDType; + PyArray_PyComplexDType.scalar_type = &PyComplex_Type; + if (PyType_Ready((PyTypeObject *)&PyArray_PyComplexDType) < 0) { return -1; } /* Register the new DTypes for discovery */ if (_PyArray_MapPyTypeToDType( - &PyArray_PyIntAbstractDType, &PyLong_Type, NPY_FALSE) < 0) { + &PyArray_PyLongDType, &PyLong_Type, NPY_FALSE) < 0) { return -1; } if (_PyArray_MapPyTypeToDType( - &PyArray_PyFloatAbstractDType, &PyFloat_Type, NPY_FALSE) < 0) { + &PyArray_PyFloatDType, &PyFloat_Type, NPY_FALSE) < 0) { return -1; } if (_PyArray_MapPyTypeToDType( - &PyArray_PyComplexAbstractDType, &PyComplex_Type, NPY_FALSE) < 0) { + &PyArray_PyComplexDType, &PyComplex_Type, NPY_FALSE) < 0) { return -1; } @@ -161,7 +177,6 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other) /* This is a back-compat fallback to usually do the right thing... */ PyArray_DTypeMeta *uint8_dt = &PyArray_UInt8DType; PyArray_DTypeMeta *res = NPY_DT_CALL_common_dtype(other, uint8_dt); - Py_DECREF(uint8_dt); if (res == NULL) { PyErr_Clear(); } @@ -205,7 +220,7 @@ float_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return NPY_DT_NewRef(&PyArray_DoubleDType); } } - else if (other == &PyArray_PyIntAbstractDType) { + else if (other == &PyArray_PyLongDType) { Py_INCREF(cls); return cls; } @@ -261,8 +276,8 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return res; } - else if (other == &PyArray_PyIntAbstractDType || - other == &PyArray_PyFloatAbstractDType) { + else if (other == &PyArray_PyLongDType || + other == &PyArray_PyFloatDType) { Py_INCREF(cls); return cls; } @@ -272,59 +287,206 @@ complex_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) /* - * TODO: These abstract DTypes also carry the dual role of representing - * `Floating`, `Complex`, and `Integer` (both signed and unsigned). - * They will have to be renamed and exposed in that capacity. + * Define abstract numerical DTypes that all regular ones can inherit from + * (in arraytypes.c.src). + * Here, also define types corresponding to the python scalars. */ -NPY_DType_Slots pyintabstractdtype_slots = { - .discover_descr_from_pyobject = discover_descriptor_from_pyint, +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_IntAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy.dtypes._IntegerAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; + +NPY_DType_Slots pylongdtype_slots = { + .discover_descr_from_pyobject = discover_descriptor_from_pylong, .default_descr = int_default_descriptor, .common_dtype = int_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyIntAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyLongDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._IntegerAbstractDType", + .tp_name = "numpy.dtypes._PyLongDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, - .dt_slots = &pyintabstractdtype_slots, + .dt_slots = &pylongdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_FloatAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy.dtypes._FloatAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; -NPY_DType_Slots pyfloatabstractdtype_slots = { +NPY_DType_Slots pyfloatdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pyfloat, .default_descr = float_default_descriptor, .common_dtype = float_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyFloatDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._FloatAbstractDType", + .tp_name = "numpy.dtypes._PyFloatDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, - .dt_slots = &pyfloatabstractdtype_slots, + .dt_slots = &pyfloatdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_ComplexAbstractDType = {{{ + PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) + .tp_name = "numpy.dtypes._ComplexAbstractDType", + .tp_base = &PyArrayDescr_Type, + .tp_basicsize = sizeof(PyArray_Descr), + .tp_flags = Py_TPFLAGS_DEFAULT, + },}, + .type_num = -1, + .flags = NPY_DT_ABSTRACT, +}; -NPY_DType_Slots pycomplexabstractdtype_slots = { +NPY_DType_Slots pycomplexdtype_slots = { .discover_descr_from_pyobject = discover_descriptor_from_pycomplex, .default_descr = complex_default_descriptor, .common_dtype = complex_common_dtype, }; -NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexAbstractDType = {{{ +NPY_NO_EXPORT PyArray_DTypeMeta PyArray_PyComplexDType = {{{ PyVarObject_HEAD_INIT(&PyArrayDTypeMeta_Type, 0) - .tp_name = "numpy._ComplexAbstractDType", + .tp_name = "numpy.dtypes._PyComplexDType", + .tp_base = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ .tp_basicsize = sizeof(PyArray_Descr), .tp_flags = Py_TPFLAGS_DEFAULT, },}, .type_num = -1, - .flags = NPY_DT_ABSTRACT, - .dt_slots = &pycomplexabstractdtype_slots, + .dt_slots = &pycomplexdtype_slots, + .scalar_type = NULL, /* set in initialize_and_map_pytypes_to_dtypes */ }; + + +/* + * Additional functions to deal with Python literal int, float, complex + */ +/* + * This function takes an existing array operand and if the new descr does + * not match, replaces it with a new array that has the correct descriptor + * and holds exactly the scalar value. + */ +NPY_NO_EXPORT int +npy_update_operand_for_scalar( + PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr, + NPY_CASTING casting) +{ + if (PyArray_EquivTypes(PyArray_DESCR(*operand), descr)) { + /* + * TODO: This is an unfortunate work-around for legacy type resolvers + * (see `convert_ufunc_arguments` in `ufunc_object.c`), that + * currently forces us to replace the array. + */ + if (!(PyArray_FLAGS(*operand) & NPY_ARRAY_WAS_PYTHON_INT)) { + return 0; + } + } + else if (NPY_UNLIKELY(casting == NPY_EQUIV_CASTING) && + descr->type_num != NPY_OBJECT) { + /* + * increadibly niche, but users could pass equiv casting and we + * actually need to cast. Let object pass (technically correct) but + * in all other cases, we don't technically consider equivalent. + * NOTE(seberg): I don't think we should be beholden to this logic. + */ + PyErr_Format(PyExc_TypeError, + "cannot cast Python %s to %S under the casting rule 'equiv'", + Py_TYPE(scalar)->tp_name, descr); + return -1; + } + + Py_INCREF(descr); + PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr( + &PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL); + Py_SETREF(*operand, new); + if (*operand == NULL) { + return -1; + } + if (scalar == NULL) { + /* The ufunc.resolve_dtypes paths can go here. Anything should go. */ + return 0; + } + return PyArray_SETITEM(new, PyArray_BYTES(*operand), scalar); +} + + +/* + * When a user passed a Python literal (int, float, complex), special promotion + * rules mean that we don't know the exact descriptor that should be used. + * + * Typically, this just doesn't really matter. Unfortunately, there are two + * exceptions: + * 1. The user might have passed `signature=` which may not be compatible. + * In that case, we cannot really assume "safe" casting. + * 2. It is at least fathomable that a DType doesn't deal with this directly. + * or that using the original int64/object is wrong in the type resolution. + * + * The solution is to assume that we can use the common DType of the signature + * and the Python scalar DType (`in_DT`) as a safe intermediate. + */ +NPY_NO_EXPORT PyArray_Descr * +npy_find_descr_for_scalar( + PyObject *scalar, PyArray_Descr *original_descr, + PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT) +{ + PyArray_Descr *res; + /* There is a good chance, descriptors already match... */ + if (NPY_DTYPE(original_descr) == op_DT) { + Py_INCREF(original_descr); + return original_descr; + } + + PyArray_DTypeMeta *common = PyArray_CommonDType(in_DT, op_DT); + if (common == NULL) { + PyErr_Clear(); + /* This is fine. We simply assume the original descr is viable. */ + Py_INCREF(original_descr); + return original_descr; + } + /* A very likely case is that there is nothing to do: */ + if (NPY_DTYPE(original_descr) == common) { + Py_DECREF(common); + Py_INCREF(original_descr); + return original_descr; + } + if (!NPY_DT_is_parametric(common) || + /* In some paths we only have a scalar type, can't discover */ + scalar == NULL || + /* If the DType doesn't know the scalar type, guess at default. */ + !NPY_DT_CALL_is_known_scalar_type(common, Py_TYPE(scalar))) { + if (common->singleton != NULL) { + res = common->singleton; + Py_INCREF(res); + } + else { + res = NPY_DT_CALL_default_descr(common); + } + } + else { + res = NPY_DT_CALL_discover_descr_from_pyobject(common, scalar); + } + + Py_DECREF(common); + return res; +} diff --git a/numpy/_core/src/multiarray/abstractdtypes.h b/numpy/_core/src/multiarray/abstractdtypes.h index 212994a422ea..3c96ffe8e0ef 100644 --- a/numpy/_core/src/multiarray/abstractdtypes.h +++ b/numpy/_core/src/multiarray/abstractdtypes.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ #define NUMPY_CORE_SRC_MULTIARRAY_ABSTRACTDTYPES_H_ +#include "numpy/ndarraytypes.h" #include "arrayobject.h" #include "dtypemeta.h" @@ -14,9 +15,12 @@ extern "C" { * may be necessary to make them (partially) public, to allow user-defined * dtypes to perform value based casting. */ -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyIntAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatAbstractDType; -NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_IntAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_FloatAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_ComplexAbstractDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyLongDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyFloatDType; +NPY_NO_EXPORT extern PyArray_DTypeMeta PyArray_PyComplexDType; NPY_NO_EXPORT int initialize_and_map_pytypes_to_dtypes(void); @@ -38,42 +42,46 @@ static inline int npy_mark_tmp_array_if_pyscalar( PyObject *obj, PyArrayObject *arr, PyArray_DTypeMeta **dtype) { - /* - * We check the array dtype for two reasons: First, booleans are - * integer subclasses. Second, an int, float, or complex could have - * a custom DType registered, and then we should use that. - * Further, `np.float64` is a double subclass, so must reject it. - */ - if (PyLong_Check(obj) - && (PyArray_ISINTEGER(arr) || PyArray_ISOBJECT(arr))) { + if (PyLong_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_INT; if (dtype != NULL) { - Py_INCREF(&PyArray_PyIntAbstractDType); - Py_SETREF(*dtype, &PyArray_PyIntAbstractDType); + Py_INCREF(&PyArray_PyLongDType); + Py_SETREF(*dtype, &PyArray_PyLongDType); } return 1; } - else if (PyFloat_Check(obj) && !PyArray_IsScalar(obj, Double) - && PyArray_TYPE(arr) == NPY_DOUBLE) { + else if (PyFloat_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_FLOAT; if (dtype != NULL) { - Py_INCREF(&PyArray_PyFloatAbstractDType); - Py_SETREF(*dtype, &PyArray_PyFloatAbstractDType); + Py_INCREF(&PyArray_PyFloatDType); + Py_SETREF(*dtype, &PyArray_PyFloatDType); } return 1; } - else if (PyComplex_Check(obj) && !PyArray_IsScalar(obj, CDouble) - && PyArray_TYPE(arr) == NPY_CDOUBLE) { + else if (PyComplex_CheckExact(obj)) { ((PyArrayObject_fields *)arr)->flags |= NPY_ARRAY_WAS_PYTHON_COMPLEX; if (dtype != NULL) { - Py_INCREF(&PyArray_PyComplexAbstractDType); - Py_SETREF(*dtype, &PyArray_PyComplexAbstractDType); + Py_INCREF(&PyArray_PyComplexDType); + Py_SETREF(*dtype, &PyArray_PyComplexDType); } return 1; } return 0; } + +NPY_NO_EXPORT int +npy_update_operand_for_scalar( + PyArrayObject **operand, PyObject *scalar, PyArray_Descr *descr, + NPY_CASTING casting); + + +NPY_NO_EXPORT PyArray_Descr * +npy_find_descr_for_scalar( + PyObject *scalar, PyArray_Descr *original_descr, + PyArray_DTypeMeta *in_DT, PyArray_DTypeMeta *op_DT); + + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/alloc.c b/numpy/_core/src/multiarray/alloc.c index 0487fad1a942..396a7adb3148 100644 --- a/numpy/_core/src/multiarray/alloc.c +++ b/numpy/_core/src/multiarray/alloc.c @@ -11,6 +11,8 @@ #include "numpy/npy_common.h" #include "npy_config.h" #include "alloc.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" #include #ifdef NPY_OS_LINUX @@ -35,13 +37,11 @@ typedef struct { static cache_bucket datacache[NBUCKETS]; static cache_bucket dimcache[NBUCKETS_DIM]; -static int _madvise_hugepage = 1; - /* * This function tells whether NumPy attempts to call `madvise` with * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value - * of `_madvise_hugepage` may be ignored. + * of `madvise_hugepage` may be ignored. * * It is exposed to Python as `np._core.multiarray._get_madvise_hugepage`. */ @@ -49,7 +49,7 @@ NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { #ifdef NPY_OS_LINUX - if (_madvise_hugepage) { + if (npy_thread_unsafe_state.madvise_hugepage) { Py_RETURN_TRUE; } #endif @@ -59,20 +59,20 @@ _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) /* * This function enables or disables the use of `MADV_HUGEPAGE` on Linux - * by modifying the global static `_madvise_hugepage`. - * It returns the previous value of `_madvise_hugepage`. + * by modifying the global static `madvise_hugepage`. + * It returns the previous value of `madvise_hugepage`. * * It is exposed to Python as `np._core.multiarray._set_madvise_hugepage`. */ NPY_NO_EXPORT PyObject * _set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj) { - int was_enabled = _madvise_hugepage; + int was_enabled = npy_thread_unsafe_state.madvise_hugepage; int enabled = PyObject_IsTrue(enabled_obj); if (enabled < 0) { return NULL; } - _madvise_hugepage = enabled; + npy_thread_unsafe_state.madvise_hugepage = enabled; if (was_enabled) { Py_RETURN_TRUE; } @@ -96,11 +96,13 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, assert((esz == 1 && cache == datacache) || (esz == sizeof(npy_intp) && cache == dimcache)); assert(PyGILState_Check()); +#ifndef Py_GIL_DISABLED if (nelem < msz) { if (cache[nelem].available > 0) { return cache[nelem].ptrs[--(cache[nelem].available)]; } } +#endif p = alloc(nelem * esz); if (p) { #ifdef _PyPyGC_AddMemoryPressure @@ -108,7 +110,8 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz, #endif #ifdef NPY_OS_LINUX /* allow kernel allocating huge pages for large arrays */ - if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && _madvise_hugepage) { + if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u))) && + npy_thread_unsafe_state.madvise_hugepage) { npy_uintp offset = 4096u - (npy_uintp)p % (4096u); npy_uintp length = nelem * esz - offset; /** @@ -131,12 +134,14 @@ _npy_free_cache(void * p, npy_uintp nelem, npy_uint msz, cache_bucket * cache, void (*dealloc)(void *)) { assert(PyGILState_Check()); +#ifndef Py_GIL_DISABLED if (p != NULL && nelem < msz) { if (cache[nelem].available < NCACHE) { cache[nelem].ptrs[cache[nelem].available++] = p; return; } } +#endif dealloc(p); } @@ -233,7 +238,11 @@ PyDataMem_NEW(size_t size) assert(size != 0); result = malloc(size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -246,7 +255,11 @@ PyDataMem_NEW_ZEROED(size_t nmemb, size_t size) void *result; result = calloc(nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + free(result); + return NULL; + } return result; } @@ -269,11 +282,13 @@ PyDataMem_RENEW(void *ptr, size_t size) void *result; assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = realloc(ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + free(result); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } @@ -350,13 +365,18 @@ NPY_NO_EXPORT void * PyDataMem_UserNEW(size_t size, PyObject *mem_handler) { void *result; - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { return NULL; } assert(size != 0); result = handler->allocator.malloc(handler->allocator.ctx, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -364,12 +384,17 @@ NPY_NO_EXPORT void * PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) { void *result; - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { return NULL; } result = handler->allocator.calloc(handler->allocator.ctx, nmemb, size); - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, nmemb * size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; + } return result; } @@ -377,7 +402,8 @@ PyDataMem_UserNEW_ZEROED(size_t nmemb, size_t size, PyObject *mem_handler) NPY_NO_EXPORT void PyDataMem_UserFREE(void *ptr, size_t size, PyObject *mem_handler) { - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { WARN_NO_RETURN(PyExc_RuntimeWarning, "Could not get pointer to 'mem_handler' from PyCapsule"); @@ -391,17 +417,20 @@ NPY_NO_EXPORT void * PyDataMem_UserRENEW(void *ptr, size_t size, PyObject *mem_handler) { void *result; - PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + PyDataMem_Handler *handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { return NULL; } assert(size != 0); + PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); result = handler->allocator.realloc(handler->allocator.ctx, ptr, size); - if (result != ptr) { - PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr); + int ret = PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); + if (ret == -1) { + handler->allocator.free(handler->allocator.ctx, result, size); + return NULL; } - PyTraceMalloc_Track(NPY_TRACE_DOMAIN, (npy_uintp)result, size); return result; } @@ -423,6 +452,10 @@ PyDataMem_SetHandler(PyObject *handler) if (handler == NULL) { handler = PyDataMem_DefaultHandler; } + if (!PyCapsule_IsValid(handler, MEM_HANDLER_CAPSULE_NAME)) { + PyErr_SetString(PyExc_ValueError, "Capsule must be named 'mem_handler'"); + return NULL; + } token = PyContextVar_Set(current_handler, handler); if (token == NULL) { Py_DECREF(old_handler); @@ -473,7 +506,8 @@ get_handler_name(PyObject *NPY_UNUSED(self), PyObject *args) return NULL; } } - handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { Py_DECREF(mem_handler); return NULL; @@ -510,7 +544,8 @@ get_handler_version(PyObject *NPY_UNUSED(self), PyObject *args) return NULL; } } - handler = (PyDataMem_Handler *) PyCapsule_GetPointer(mem_handler, "mem_handler"); + handler = (PyDataMem_Handler *) PyCapsule_GetPointer( + mem_handler, MEM_HANDLER_CAPSULE_NAME); if (handler == NULL) { Py_DECREF(mem_handler); return NULL; diff --git a/numpy/_core/src/multiarray/alloc.h b/numpy/_core/src/multiarray/alloc.h index 186eb54870ab..aed2095fe73c 100644 --- a/numpy/_core/src/multiarray/alloc.h +++ b/numpy/_core/src/multiarray/alloc.h @@ -6,6 +6,7 @@ #include "numpy/ndarraytypes.h" #define NPY_TRACE_DOMAIN 389047 +#define MEM_HANDLER_CAPSULE_NAME "mem_handler" NPY_NO_EXPORT PyObject * _get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)); diff --git a/numpy/_core/src/multiarray/array_api_standard.c b/numpy/_core/src/multiarray/array_api_standard.c new file mode 100644 index 000000000000..76612cff36fb --- /dev/null +++ b/numpy/_core/src/multiarray/array_api_standard.c @@ -0,0 +1,78 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include + + +NPY_NO_EXPORT PyObject * +array_device(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)) +{ + return PyUnicode_FromString("cpu"); +} + +NPY_NO_EXPORT PyObject * +array_to_device(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"", "stream", NULL}; + char *device = ""; + PyObject *stream = Py_None; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|$O:to_device", kwlist, + &device, + &stream)) { + return NULL; + } + + if (stream != Py_None) { + PyErr_SetString(PyExc_ValueError, + "The stream argument in to_device() " + "is not supported"); + return NULL; + } + + if (strcmp(device, "cpu") != 0) { + PyErr_Format(PyExc_ValueError, + "Unsupported device: %s. Only 'cpu' is accepted.", device); + return NULL; + } + + Py_INCREF(self); + return self; +} + +NPY_NO_EXPORT PyObject * +array_array_namespace(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"api_version", NULL}; + PyObject *array_api_version = Py_None; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|$O:__array_namespace__", kwlist, + &array_api_version)) { + return NULL; + } + + if (array_api_version != Py_None) { + if (!PyUnicode_Check(array_api_version)) + { + PyErr_Format(PyExc_ValueError, + "Only None and strings are allowed as the Array API version, " + "but received: %S.", array_api_version); + return NULL; + } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0 && + PyUnicode_CompareWithASCIIString(array_api_version, "2023.12") != 0) + { + PyErr_Format(PyExc_ValueError, + "Version \"%U\" of the Array API Standard is not supported.", + array_api_version); + return NULL; + } + } + + PyObject *numpy_module = PyImport_ImportModule("numpy"); + if (numpy_module == NULL){ + return NULL; + } + + return numpy_module; +} diff --git a/numpy/_core/src/multiarray/array_api_standard.h b/numpy/_core/src/multiarray/array_api_standard.h new file mode 100644 index 000000000000..6776863701b8 --- /dev/null +++ b/numpy/_core/src/multiarray/array_api_standard.h @@ -0,0 +1,14 @@ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ + + +NPY_NO_EXPORT PyObject * +array_device(PyObject *NPY_UNUSED(self), void *NPY_UNUSED(ignored)); + +NPY_NO_EXPORT PyObject * +array_to_device(PyObject *self, PyObject *args, PyObject *kwds); + +NPY_NO_EXPORT PyObject * +array_array_namespace(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds); + +#endif /* NUMPY_CORE_SRC_MULTIARRAY_ARRAY_API_STANDARD_H_ */ diff --git a/numpy/_core/src/multiarray/array_assign_array.c b/numpy/_core/src/multiarray/array_assign_array.c index 687757190d00..8886d1cacb40 100644 --- a/numpy/_core/src/multiarray/array_assign_array.c +++ b/numpy/_core/src/multiarray/array_assign_array.c @@ -17,7 +17,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "methods.h" diff --git a/numpy/_core/src/multiarray/array_assign_scalar.c b/numpy/_core/src/multiarray/array_assign_scalar.c index d1919fafb4bd..0199ba969eb9 100644 --- a/numpy/_core/src/multiarray/array_assign_scalar.c +++ b/numpy/_core/src/multiarray/array_assign_scalar.c @@ -17,7 +17,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "methods.h" @@ -243,8 +243,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst, } /* Check the casting rule */ - if (!can_cast_scalar_to(src_dtype, src_data, - PyArray_DESCR(dst), casting)) { + if (!PyArray_CanCastTypeTo(src_dtype, PyArray_DESCR(dst), casting)) { npy_set_invalid_cast_error( src_dtype, PyArray_DESCR(dst), casting, NPY_TRUE); return -1; diff --git a/numpy/_core/src/multiarray/array_coercion.c b/numpy/_core/src/multiarray/array_coercion.c index c2b924e093b5..0cffcc6bab22 100644 --- a/numpy/_core/src/multiarray/array_coercion.c +++ b/numpy/_core/src/multiarray/array_coercion.c @@ -6,6 +6,7 @@ #include #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "lowlevel_strided_loops.h" #include "numpy/arrayobject.h" @@ -99,6 +100,7 @@ enum _dtype_discovery_flags { DISCOVER_TUPLES_AS_ELEMENTS = 1 << 4, MAX_DIMS_WAS_REACHED = 1 << 5, DESCRIPTOR_WAS_SET = 1 << 6, + COPY_WAS_CREATED_BY__ARRAY__ = 1 << 7, }; @@ -223,24 +225,23 @@ npy_discover_dtype_from_pytype(PyTypeObject *pytype) PyObject *DType; if (pytype == &PyArray_Type) { - DType = Py_None; + DType = Py_NewRef(Py_None); } else if (pytype == &PyFloat_Type) { - DType = (PyObject *)&PyArray_PyFloatAbstractDType; + DType = Py_NewRef((PyObject *)&PyArray_PyFloatDType); } else if (pytype == &PyLong_Type) { - DType = (PyObject *)&PyArray_PyIntAbstractDType; + DType = Py_NewRef((PyObject *)&PyArray_PyLongDType); } else { - DType = PyDict_GetItem(_global_pytype_to_type_dict, - (PyObject *)pytype); + int res = PyDict_GetItemRef(_global_pytype_to_type_dict, + (PyObject *)pytype, (PyObject **)&DType); - if (DType == NULL) { - /* the python type is not known */ + if (res <= 0) { + /* the python type is not known or an error was set */ return NULL; } } - Py_INCREF(DType); assert(DType == Py_None || PyObject_TypeCheck(DType, (PyTypeObject *)&PyArrayDTypeMeta_Type)); return (PyArray_DTypeMeta *)DType; } @@ -614,10 +615,13 @@ update_shape(int curr_ndim, int *max_ndim, return success; } - +#ifndef Py_GIL_DISABLED #define COERCION_CACHE_CACHE_SIZE 5 static int _coercion_cache_num = 0; static coercion_cache_obj *_coercion_cache_cache[COERCION_CACHE_CACHE_SIZE]; +#else +#define COERCION_CACHE_CACHE_SIZE 0 +#endif /* * Steals a reference to the object. @@ -628,11 +632,14 @@ npy_new_coercion_cache( coercion_cache_obj ***next_ptr, int ndim) { coercion_cache_obj *cache; +#if COERCION_CACHE_CACHE_SIZE > 0 if (_coercion_cache_num > 0) { _coercion_cache_num--; cache = _coercion_cache_cache[_coercion_cache_num]; } - else { + else +#endif + { cache = PyMem_Malloc(sizeof(coercion_cache_obj)); } if (cache == NULL) { @@ -661,11 +668,14 @@ npy_unlink_coercion_cache(coercion_cache_obj *current) { coercion_cache_obj *next = current->next; Py_DECREF(current->arr_or_sequence); +#if COERCION_CACHE_CACHE_SIZE > 0 if (_coercion_cache_num < COERCION_CACHE_CACHE_SIZE) { _coercion_cache_cache[_coercion_cache_num] = current; _coercion_cache_num++; } - else { + else +#endif + { PyMem_Free(current); } return next; @@ -1027,8 +1037,9 @@ PyArray_DiscoverDTypeAndShape_Recursive( /* __array__ may be passed the requested descriptor if provided */ requested_descr = *out_descr; } + int was_copied_by__array__ = 0; arr = (PyArrayObject *)_array_from_array_like(obj, - requested_descr, 0, NULL, copy); + requested_descr, 0, NULL, copy, &was_copied_by__array__); if (arr == NULL) { return -1; } @@ -1036,6 +1047,9 @@ PyArray_DiscoverDTypeAndShape_Recursive( Py_DECREF(arr); arr = NULL; } + if (was_copied_by__array__ == 1) { + *flags |= COPY_WAS_CREATED_BY__ARRAY__; + } } if (arr != NULL) { /* @@ -1170,6 +1184,15 @@ PyArray_DiscoverDTypeAndShape_Recursive( return -1; } + /* + * For a sequence we need to make a copy of the final aggregate anyway. + * There's no need to pass explicit `copy=True`, so we switch + * to `copy=None` (copy if needed). + */ + if (copy == 1) { + copy = -1; + } + /* Recursive call for each sequence item */ for (Py_ssize_t i = 0; i < size; i++) { max_dims = PyArray_DiscoverDTypeAndShape_Recursive( @@ -1217,6 +1240,8 @@ PyArray_DiscoverDTypeAndShape_Recursive( * to choose a default. * @param copy Specifies the copy behavior. -1 is corresponds to copy=None, * 0 to copy=False, and 1 to copy=True in the Python API. + * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy was + * made by implementor. * @return dimensions of the discovered object or -1 on error. * WARNING: If (and only if) the output is a single array, the ndim * returned _can_ exceed the maximum allowed number of dimensions. @@ -1229,7 +1254,7 @@ PyArray_DiscoverDTypeAndShape( npy_intp out_shape[NPY_MAXDIMS], coercion_cache_obj **coercion_cache, PyArray_DTypeMeta *fixed_DType, PyArray_Descr *requested_descr, - PyArray_Descr **out_descr, int copy) + PyArray_Descr **out_descr, int copy, int *was_copied_by__array__) { coercion_cache_obj **coercion_cache_head = coercion_cache; *coercion_cache = NULL; @@ -1282,6 +1307,10 @@ PyArray_DiscoverDTypeAndShape( goto fail; } + if (was_copied_by__array__ != NULL && flags & COPY_WAS_CREATED_BY__ARRAY__) { + *was_copied_by__array__ = 1; + } + if (NPY_UNLIKELY(flags & FOUND_RAGGED_ARRAY)) { /* * If max-dims was reached and the dimensions reduced, this is ragged. @@ -1396,7 +1425,7 @@ _discover_array_parameters(PyObject *NPY_UNUSED(self), int ndim = PyArray_DiscoverDTypeAndShape( obj, NPY_MAXDIMS, shape, &coercion_cache, - dt_info.dtype, dt_info.descr, (PyArray_Descr **)&out_dtype, 0); + dt_info.dtype, dt_info.descr, (PyArray_Descr **)&out_dtype, 0, NULL); Py_XDECREF(dt_info.dtype); Py_XDECREF(dt_info.descr); if (ndim < 0) { diff --git a/numpy/_core/src/multiarray/array_coercion.h b/numpy/_core/src/multiarray/array_coercion.h index e6639ba1bba9..d8f72903a67c 100644 --- a/numpy/_core/src/multiarray/array_coercion.h +++ b/numpy/_core/src/multiarray/array_coercion.h @@ -40,7 +40,7 @@ PyArray_DiscoverDTypeAndShape( npy_intp out_shape[NPY_MAXDIMS], coercion_cache_obj **coercion_cache, PyArray_DTypeMeta *fixed_DType, PyArray_Descr *requested_descr, - PyArray_Descr **out_descr, int copy); + PyArray_Descr **out_descr, int copy, int *was_copied_by__array__); NPY_NO_EXPORT PyObject * _discover_array_parameters(PyObject *NPY_UNUSED(self), diff --git a/numpy/_core/src/multiarray/array_converter.c b/numpy/_core/src/multiarray/array_converter.c index fd7ccd767056..496173038954 100644 --- a/numpy/_core/src/multiarray/array_converter.c +++ b/numpy/_core/src/multiarray/array_converter.c @@ -21,12 +21,12 @@ #include "abstractdtypes.h" #include "convert_datatype.h" #include "descriptor.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" #include "ctors.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "array_assign.h" #include "common.h" @@ -186,8 +186,8 @@ static int pyscalar_mode_conv(PyObject *obj, scalar_policy *policy) { PyObject *strings[3] = { - npy_ma_str_convert, npy_ma_str_preserve, - npy_ma_str_convert_if_no_array}; + npy_interned_str.convert, npy_interned_str.preserve, + npy_interned_str.convert_if_no_array}; /* First quick pass using the identity (should practically always match) */ for (int i = 0; i < 3; i++) { @@ -351,8 +351,8 @@ array_converter_result_type(PyArrayArrayConverterObject *self, "extra_dtype and ensure_inexact are mutually exclusive."); goto finish; } - Py_INCREF(&PyArray_PyFloatAbstractDType); - dt_info.dtype = &PyArray_PyFloatAbstractDType; + Py_INCREF(&PyArray_PyFloatDType); + dt_info.dtype = &PyArray_PyFloatDType; } if (dt_info.dtype != NULL) { diff --git a/numpy/_core/src/multiarray/array_method.c b/numpy/_core/src/multiarray/array_method.c index b262c1f263c2..f09e560b0607 100644 --- a/numpy/_core/src/multiarray/array_method.c +++ b/numpy/_core/src/multiarray/array_method.c @@ -31,6 +31,7 @@ #define _MULTIARRAYMODULE #include +#include #include "arrayobject.h" #include "array_coercion.h" #include "array_method.h" @@ -59,8 +60,8 @@ static NPY_CASTING default_resolve_descriptors( PyArrayMethodObject *method, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **input_descrs, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *input_descrs, PyArray_Descr **output_descrs, npy_intp *view_offset) { @@ -139,7 +140,7 @@ npy_default_get_strided_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArrayMethodObject *meth = context->method; *flags = meth->flags & NPY_METH_RUNTIME_FLAGS; *out_transferdata = NULL; @@ -412,11 +413,9 @@ PyArrayMethod_FromSpec(PyArrayMethod_Spec *spec) /** * Create a new ArrayMethod (internal version). * - * @param name A name for the individual method, may be NULL. * @param spec A filled context object to pass generic information about * the method (such as usually needing the API, and the DTypes). * Unused fields must be NULL. - * @param slots Slots with the correct pair of IDs and (function) pointers. * @param private Some slots are currently considered private, if not true, * these will be rejected. * diff --git a/numpy/_core/src/multiarray/arrayfunction_override.c b/numpy/_core/src/multiarray/arrayfunction_override.c index 707024f94c04..e4248ad29aba 100644 --- a/numpy/_core/src/multiarray/arrayfunction_override.c +++ b/numpy/_core/src/multiarray/arrayfunction_override.c @@ -4,24 +4,14 @@ #include #include "structmember.h" -#include "npy_pycompat.h" +#include "numpy/ndarraytypes.h" #include "get_attr_string.h" #include "npy_import.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "arrayfunction_override.h" -/* Return the ndarray.__array_function__ method. */ -static PyObject * -get_ndarray_array_function(void) -{ - PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type, - "__array_function__"); - assert(method != NULL); - return method; -} - - /* * Get an object's __array_function__ method in the fastest way possible. * Never raises an exception. Returns NULL if the method doesn't exist. @@ -29,19 +19,13 @@ get_ndarray_array_function(void) static PyObject * get_array_function(PyObject *obj) { - static PyObject *ndarray_array_function = NULL; - - if (ndarray_array_function == NULL) { - ndarray_array_function = get_ndarray_array_function(); - } - /* Fast return for ndarray */ if (PyArray_CheckExact(obj)) { - Py_INCREF(ndarray_array_function); - return ndarray_array_function; + Py_INCREF(npy_static_pydata.ndarray_array_function); + return npy_static_pydata.ndarray_array_function; } - PyObject *array_function = PyArray_LookupSpecial(obj, npy_ma_str_array_function); + PyObject *array_function = PyArray_LookupSpecial(obj, npy_interned_str.array_function); if (array_function == NULL && PyErr_Occurred()) { PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ } @@ -142,12 +126,7 @@ get_implementing_args_and_methods(PyObject *relevant_args, static int is_default_array_function(PyObject *obj) { - static PyObject *ndarray_array_function = NULL; - - if (ndarray_array_function == NULL) { - ndarray_array_function = get_ndarray_array_function(); - } - return obj == ndarray_array_function; + return obj == npy_static_pydata.ndarray_array_function; } @@ -175,7 +154,7 @@ array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, } } - PyObject *implementation = PyObject_GetAttr(func, npy_ma_str_implementation); + PyObject *implementation = PyObject_GetAttr(func, npy_interned_str.implementation); if (implementation == NULL) { return NULL; } @@ -252,14 +231,14 @@ get_args_and_kwargs( static void set_no_matching_types_error(PyObject *public_api, PyObject *types) { - static PyObject *errmsg_formatter = NULL; /* No acceptable override found, raise TypeError. */ - npy_cache_import("numpy._core._internal", - "array_function_errmsg_formatter", - &errmsg_formatter); - if (errmsg_formatter != NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", + "array_function_errmsg_formatter", + &npy_runtime_imports.array_function_errmsg_formatter) == 0) { PyObject *errmsg = PyObject_CallFunctionObjArgs( - errmsg_formatter, public_api, types, NULL); + npy_runtime_imports.array_function_errmsg_formatter, + public_api, types, NULL); if (errmsg != NULL) { PyErr_SetObject(PyExc_TypeError, errmsg); Py_DECREF(errmsg); @@ -321,12 +300,12 @@ array_implement_c_array_function_creation( } /* The like argument must be present in the keyword arguments, remove it */ - if (PyDict_DelItem(kwargs, npy_ma_str_like) < 0) { + if (PyDict_DelItem(kwargs, npy_interned_str.like) < 0) { goto finish; } /* Fetch the actual symbol (the long way right now) */ - numpy_module = PyImport_Import(npy_ma_str_numpy); + numpy_module = PyImport_Import(npy_interned_str.numpy); if (numpy_module == NULL) { goto finish; } diff --git a/numpy/_core/src/multiarray/arrayobject.c b/numpy/_core/src/multiarray/arrayobject.c index 3001f84edf05..15596f1f86a2 100644 --- a/numpy/_core/src/multiarray/arrayobject.c +++ b/numpy/_core/src/multiarray/arrayobject.c @@ -62,9 +62,7 @@ maintainer email: oliphant.travis@ieee.org #include "binop_override.h" #include "array_coercion.h" - - -NPY_NO_EXPORT npy_bool numpy_warn_if_no_mem_policy = 0; +#include "multiarraymodule.h" /*NUMPY_API Compute the size of an array (in number of items) @@ -251,7 +249,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object) */ ndim = PyArray_DiscoverDTypeAndShape(src_object, PyArray_NDIM(dest), dims, &cache, - NPY_DTYPE(PyArray_DESCR(dest)), PyArray_DESCR(dest), &dtype, 1); + NPY_DTYPE(PyArray_DESCR(dest)), PyArray_DESCR(dest), &dtype, 1, NULL); if (ndim < 0) { return -1; } @@ -429,7 +427,7 @@ array_dealloc(PyArrayObject *self) } } if (fa->mem_handler == NULL) { - if (numpy_warn_if_no_mem_policy) { + if (npy_thread_unsafe_state.warn_if_no_mem_policy) { char const *msg = "Trying to dealloc data, but a memory policy " "is not set. If you take ownership of the data, you must " "set a base owning the data (e.g. a PyCapsule)."; @@ -927,7 +925,8 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op) */ if (result == NULL && (cmp_op == Py_EQ || cmp_op == Py_NE) - && PyErr_ExceptionMatches(npy_UFuncNoLoopError)) { + && PyErr_ExceptionMatches( + npy_static_pydata._UFuncNoLoopError)) { PyErr_Clear(); PyArrayObject *array_other = (PyArrayObject *)PyArray_FROM_O(other); diff --git a/numpy/_core/src/multiarray/arrayobject.h b/numpy/_core/src/multiarray/arrayobject.h index 476b87a9d7e1..8d6f84faa6b1 100644 --- a/numpy/_core/src/multiarray/arrayobject.h +++ b/numpy/_core/src/multiarray/arrayobject.h @@ -9,8 +9,6 @@ extern "C" { #endif -extern NPY_NO_EXPORT npy_bool numpy_warn_if_no_mem_policy; - NPY_NO_EXPORT PyObject * _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); @@ -55,6 +53,14 @@ static const int NPY_ARRAY_WAS_PYTHON_COMPLEX = (1 << 28); static const int NPY_ARRAY_WAS_INT_AND_REPLACED = (1 << 27); static const int NPY_ARRAY_WAS_PYTHON_LITERAL = (1 << 30 | 1 << 29 | 1 << 28); +/* + * This flag allows same kind casting, similar to NPY_ARRAY_FORCECAST. + * + * An array never has this flag set; they're only used as parameter + * flags to the various FromAny functions. + */ +static const int NPY_ARRAY_SAME_KIND_CASTING = (1 << 26); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/arraytypes.c.src b/numpy/_core/src/multiarray/arraytypes.c.src index c266979c6f6f..9524be8a0c89 100644 --- a/numpy/_core/src/multiarray/arraytypes.c.src +++ b/numpy/_core/src/multiarray/arraytypes.c.src @@ -18,6 +18,7 @@ #include "npy_config.h" #include "npy_sort.h" +#include "abstractdtypes.h" #include "common.h" #include "ctors.h" #include "convert_datatype.h" @@ -41,6 +42,7 @@ #include "arraytypes.h" #include "umathmodule.h" +#include "npy_static_data.h" /* * Define a stack allocated dummy array with only the minimum information set: @@ -273,10 +275,10 @@ static int #endif ) { PyArray_Descr *descr = PyArray_DescrFromType(NPY_@TYPE@); - - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION || ( - npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN - && !npy_give_promotion_warnings())) { + int promotion_state = get_npy_promotion_state(); + if (promotion_state == NPY_USE_LEGACY_PROMOTION || ( + promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN + && !npy_give_promotion_warnings())) { /* * This path will be taken both for the "promotion" case such as * `uint8_arr + 123` as well as the assignment case. @@ -4184,8 +4186,7 @@ NPY_NO_EXPORT _PyArray_LegacyDescr @from@_Descr = { /* The smallest type number is ?, the largest bounded by 'z'. */ #define _MAX_LETTER ('z' + 1) -static npy_int16 _letter_to_num[_MAX_LETTER - '?']; -#define LETTER_TO_NUM(letter) _letter_to_num[letter - '?'] +#define LETTER_TO_NUM(letter) npy_static_cdata._letter_to_num[letter - '?'] static _PyArray_LegacyDescr *_builtin_descrs[] = { &BOOL_Descr, @@ -4221,6 +4222,7 @@ NPY_NO_EXPORT PyArray_Descr * PyArray_DescrFromType(int type) { PyArray_Descr *ret = NULL; + npy_bool is_stringdtype = (type == NPY_VSTRING || type == NPY_VSTRINGLTR); if (type < 0) { /* @@ -4232,7 +4234,7 @@ PyArray_DescrFromType(int type) */ ret = NULL; } - else if (type == NPY_VSTRING || type == NPY_VSTRINGLTR) { + else if (is_stringdtype) { ret = (PyArray_Descr *)new_stringdtype_instance(NULL, 1); } // builtin legacy dtypes @@ -4279,7 +4281,7 @@ PyArray_DescrFromType(int type) PyErr_SetString(PyExc_ValueError, "Invalid data-type for array"); } - else { + else if (!is_stringdtype) { Py_INCREF(ret); } @@ -4351,10 +4353,16 @@ set_typeinfo(PyObject *dict) * CFloat, CDouble, CLongDouble, * Object, String, Unicode, Void, * Datetime, Timedelta# + * #scls = PyArrayDescr_Type, + * PyArray_IntAbstractDType*10, + * PyArray_FloatAbstractDType*4, + * PyArray_ComplexAbstractDType*3, + * PyArrayDescr_Type*6 # */ if (dtypemeta_wrap_legacy_descriptor( _builtin_descrs[NPY_@NAME@], &_Py@Name@_ArrFuncs, + (PyTypeObject *)&@scls@, "numpy.dtypes." NPY_@NAME@_Name "DType", #ifdef NPY_@NAME@_alias "numpy.dtypes." NPY_@NAME@_Alias "DType" diff --git a/numpy/_core/src/multiarray/arraywrap.c b/numpy/_core/src/multiarray/arraywrap.c index d72fab0e4c98..ae7b6e987ff8 100644 --- a/numpy/_core/src/multiarray/arraywrap.c +++ b/numpy/_core/src/multiarray/arraywrap.c @@ -12,7 +12,7 @@ #include "get_attr_string.h" #include "arraywrap.h" -#include "multiarraymodule.h" +#include "npy_static_data.h" /* @@ -57,7 +57,7 @@ npy_find_array_wrap( } } else { - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str_array_wrap); + PyObject *new_wrap = PyArray_LookupSpecial_OnInstance(obj, npy_interned_str.array_wrap); if (new_wrap == NULL) { if (PyErr_Occurred()) { goto fail; @@ -159,8 +159,8 @@ npy_apply_wrap( } else { /* Replace passed wrap/wrap_type (borrowed refs) with new_wrap/type. */ - PyObject *new_wrap = PyArray_LookupSpecial_OnInstance( - original_out, npy_ma_str_array_wrap); + new_wrap = PyArray_LookupSpecial_OnInstance( + original_out, npy_interned_str.array_wrap); if (new_wrap != NULL) { wrap = new_wrap; wrap_type = (PyObject *)Py_TYPE(original_out); @@ -177,11 +177,13 @@ npy_apply_wrap( */ if (!return_scalar && !force_wrap && (PyObject *)Py_TYPE(obj) == wrap_type) { + Py_XDECREF(new_wrap); Py_INCREF(obj); return obj; } if (wrap == Py_None) { + Py_XDECREF(new_wrap); Py_INCREF(obj); if (return_scalar) { /* @@ -239,8 +241,9 @@ npy_apply_wrap( wrap, arr, py_context, (return_scalar && PyArray_NDIM(arr) == 0) ? Py_True : Py_False, NULL); - if (res != NULL) + if (res != NULL) { goto finish; + } else if (!PyErr_ExceptionMatches(PyExc_TypeError)) { goto finish; } diff --git a/numpy/_core/src/multiarray/calculation.c b/numpy/_core/src/multiarray/calculation.c index 73d8ba58bd05..cf77ce90902d 100644 --- a/numpy/_core/src/multiarray/calculation.c +++ b/numpy/_core/src/multiarray/calculation.c @@ -11,7 +11,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "number.h" diff --git a/numpy/_core/src/multiarray/common.c b/numpy/_core/src/multiarray/common.c index 5804c9cc9148..655122ff7f09 100644 --- a/numpy/_core/src/multiarray/common.c +++ b/numpy/_core/src/multiarray/common.c @@ -7,7 +7,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "abstractdtypes.h" @@ -48,7 +48,7 @@ _array_find_python_scalar_type(PyObject *op) } else if (PyLong_Check(op)) { return NPY_DT_CALL_discover_descr_from_pyobject( - &PyArray_PyIntAbstractDType, op); + &PyArray_PyLongDType, op); } return NULL; } @@ -119,7 +119,7 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype) int ndim; ndim = PyArray_DiscoverDTypeAndShape( - obj, maxdims, shape, &cache, NULL, NULL, out_dtype, 1); + obj, maxdims, shape, &cache, NULL, NULL, out_dtype, 1, NULL); if (ndim < 0) { return -1; } diff --git a/numpy/_core/src/multiarray/common.h b/numpy/_core/src/multiarray/common.h index 1a01224b1670..19fba9e66d01 100644 --- a/numpy/_core/src/multiarray/common.h +++ b/numpy/_core/src/multiarray/common.h @@ -8,6 +8,7 @@ #include "npy_cpu_dispatch.h" #include "numpy/npy_cpu.h" +#include "npy_static_data.h" #include "npy_import.h" #include @@ -139,25 +140,14 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix) { /* Check that index is valid, taking into account negative indices */ if (NPY_UNLIKELY((*axis < -ndim) || (*axis >= ndim))) { - /* - * Load the exception type, if we don't already have it. Unfortunately - * we don't have access to npy_cache_import here - */ - static PyObject *AxisError_cls = NULL; - PyObject *exc; - - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - /* Invoke the AxisError constructor */ - exc = PyObject_CallFunction(AxisError_cls, "iiO", - *axis, ndim, msg_prefix); + PyObject *exc = PyObject_CallFunction( + npy_static_pydata.AxisError, "iiO", *axis, ndim, + msg_prefix); if (exc == NULL) { return -1; } - PyErr_SetObject(AxisError_cls, exc); + PyErr_SetObject(npy_static_pydata.AxisError, exc); Py_DECREF(exc); return -1; diff --git a/numpy/_core/src/multiarray/common_dtype.c b/numpy/_core/src/multiarray/common_dtype.c index f2ec41e0c7aa..a65aba060a55 100644 --- a/numpy/_core/src/multiarray/common_dtype.c +++ b/numpy/_core/src/multiarray/common_dtype.c @@ -10,6 +10,7 @@ #include "convert_datatype.h" #include "dtypemeta.h" #include "abstractdtypes.h" +#include "npy_static_data.h" /* @@ -63,7 +64,7 @@ PyArray_CommonDType(PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2) } if (common_dtype == (PyArray_DTypeMeta *)Py_NotImplemented) { Py_DECREF(Py_NotImplemented); - PyErr_Format(npy_DTypePromotionError, + PyErr_Format(npy_static_pydata.DTypePromotionError, "The DTypes %S and %S do not have a common DType. " "For example they cannot be stored in a single array unless " "the dtype is `object`.", dtype1, dtype2); @@ -131,7 +132,7 @@ reduce_dtypes_to_most_knowledgeable( } if (res == (PyArray_DTypeMeta *)Py_NotImplemented) { - /* guess at other being more "knowledgable" */ + /* guess at other being more "knowledgeable" */ PyArray_DTypeMeta *tmp = dtypes[low]; dtypes[low] = dtypes[high]; dtypes[high] = tmp; @@ -284,7 +285,7 @@ PyArray_PromoteDTypeSequence( Py_INCREF(dtypes_in[l]); PyTuple_SET_ITEM(dtypes_in_tuple, l, (PyObject *)dtypes_in[l]); } - PyErr_Format(npy_DTypePromotionError, + PyErr_Format(npy_static_pydata.DTypePromotionError, "The DType %S could not be promoted by %S. This means that " "no common DType exists for the given inputs. " "For example they cannot be stored in a single array unless " diff --git a/numpy/_core/src/multiarray/compiled_base.c b/numpy/_core/src/multiarray/compiled_base.c index 27455797cfa3..48524aff4dac 100644 --- a/numpy/_core/src/multiarray/compiled_base.c +++ b/numpy/_core/src/multiarray/compiled_base.c @@ -101,7 +101,7 @@ minmax(const npy_intp *data, npy_intp data_len, npy_intp *mn, npy_intp *mx) * arr_bincount is registered as bincount. * * bincount accepts one, two or three arguments. The first is an array of - * non-negative integers The second, if present, is an array of weights, + * non-negative integers. The second, if present, is an array of weights, * which must be promotable to double. Call these arguments list and * weight. Both must be one-dimensional with len(weight) == len(list). If * weight is not present then bincount(list)[i] is the number of occurrences @@ -130,9 +130,57 @@ arr_bincount(PyObject *NPY_UNUSED(self), PyObject *const *args, return NULL; } - lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + /* + * Accepting arbitrary lists that are cast to NPY_INTP, possibly + * losing precision because of unsafe casts, is deprecated. We + * continue to use PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1) + * to convert the input during the deprecation period, but we also + * check to see if a deprecation warning should be generated. + * Some refactoring will be needed when the deprecation expires. + */ + + /* Check to see if we should generate a deprecation warning. */ + if (!PyArray_Check(list)) { + /* list is not a numpy array, so convert it. */ + PyArrayObject *tmp1 = (PyArrayObject *)PyArray_FromAny( + list, NULL, 1, 1, + NPY_ARRAY_DEFAULT, NULL); + if (tmp1 == NULL) { + goto fail; + } + if (PyArray_SIZE(tmp1) > 0) { + /* The input is not empty, so convert it to NPY_INTP. */ + lst = (PyArrayObject *)PyArray_ContiguousFromAny((PyObject *)tmp1, + NPY_INTP, 1, 1); + Py_DECREF(tmp1); + if (lst == NULL) { + /* Failed converting to NPY_INTP. */ + if (PyErr_ExceptionMatches(PyExc_TypeError)) { + PyErr_Clear(); + /* Deprecated 2024-08-02, NumPy 2.1 */ + if (DEPRECATE("Non-integer input passed to bincount. In a " + "future version of NumPy, this will be an " + "error. (Deprecated NumPy 2.1)") < 0) { + goto fail; + } + } + else { + /* Failure was not a TypeError. */ + goto fail; + } + } + } + else { + /* Got an empty list. */ + Py_DECREF(tmp1); + } + } + if (lst == NULL) { - goto fail; + lst = (PyArrayObject *)PyArray_ContiguousFromAny(list, NPY_INTP, 1, 1); + if (lst == NULL) { + goto fail; + } } len = PyArray_SIZE(lst); @@ -1414,14 +1462,7 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *const *args, Py_ssize_t /* Don't add docstrings */ #if PY_VERSION_HEX > 0x030b0000 - static long optimize = -1000; - if (optimize < 0) { - PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ - PyObject *level = PyObject_GetAttrString(flags, "optimize"); - optimize = PyLong_AsLong(level); - Py_DECREF(level); - } - if (optimize > 1) { + if (npy_static_cdata.optimize > 1) { #else if (Py_OptimizeFlag > 1) { #endif @@ -1754,15 +1795,6 @@ pack_bits(PyObject *input, int axis, char order) static PyObject * unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) { - static int unpack_init = 0; - /* - * lookuptable for bitorder big as it has been around longer - * bitorder little is handled via byteswapping in the loop - */ - static union { - npy_uint8 bytes[8]; - npy_uint64 uint64; - } unpack_lookup_big[256]; PyArrayObject *inp; PyArrayObject *new = NULL; PyArrayObject *out = NULL; @@ -1848,22 +1880,6 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) goto fail; } - /* - * setup lookup table under GIL, 256 8 byte blocks representing 8 bits - * expanded to 1/0 bytes - */ - if (unpack_init == 0) { - npy_intp j; - for (j=0; j < 256; j++) { - npy_intp k; - for (k=0; k < 8; k++) { - npy_uint8 v = (j & (1 << k)) == (1 << k); - unpack_lookup_big[j].bytes[7 - k] = v; - } - } - unpack_init = 1; - } - count = PyArray_DIM(new, axis) * 8; if (outdims[axis] > count) { in_n = count / 8; @@ -1890,7 +1906,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) /* for unity stride we can just copy out of the lookup table */ if (order == 'b') { for (index = 0; index < in_n; index++) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; memcpy(outptr, &v, 8); outptr += 8; inptr += in_stride; @@ -1898,7 +1914,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } else { for (index = 0; index < in_n; index++) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } @@ -1909,7 +1925,7 @@ unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order) } /* Clean up the tail portion */ if (in_tail) { - npy_uint64 v = unpack_lookup_big[*inptr].uint64; + npy_uint64 v = npy_static_cdata.unpack_lookup_big[*inptr].uint64; if (order != 'b') { v = npy_bswap8(v); } diff --git a/numpy/_core/src/multiarray/conversion_utils.c b/numpy/_core/src/multiarray/conversion_utils.c index d58fee3823ee..e7b1936d1706 100644 --- a/numpy/_core/src/multiarray/conversion_utils.c +++ b/numpy/_core/src/multiarray/conversion_utils.c @@ -10,7 +10,7 @@ #include "numpy/npy_math.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "arraytypes.h" @@ -18,6 +18,7 @@ #include "conversion_utils.h" #include "alloc.h" #include "npy_buffer.h" +#include "npy_static_data.h" #include "multiarraymodule.h" static int @@ -234,10 +235,8 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { } int int_copymode; - static PyObject* numpy_CopyMode = NULL; - npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); - if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_static_pydata._CopyMode) { PyObject* mode_value = PyObject_GetAttrString(obj, "value"); if (mode_value == NULL) { return NPY_FAIL; @@ -249,6 +248,12 @@ PyArray_CopyConverter(PyObject *obj, NPY_COPYMODE *copymode) { return NPY_FAIL; } } + else if(PyUnicode_Check(obj)) { + PyErr_SetString(PyExc_ValueError, + "strings are not allowed for 'copy' keyword. " + "Use True/False/None instead."); + return NPY_FAIL; + } else { npy_bool bool_copymode; if (!PyArray_BoolConverter(obj, &bool_copymode)) { @@ -265,10 +270,8 @@ NPY_NO_EXPORT int PyArray_AsTypeCopyConverter(PyObject *obj, NPY_ASTYPECOPYMODE *copymode) { int int_copymode; - static PyObject* numpy_CopyMode = NULL; - npy_cache_import("numpy", "_CopyMode", &numpy_CopyMode); - if (numpy_CopyMode != NULL && (PyObject *)Py_TYPE(obj) == numpy_CopyMode) { + if ((PyObject *)Py_TYPE(obj) == npy_static_pydata._CopyMode) { PyErr_SetString(PyExc_ValueError, "_CopyMode enum is not allowed for astype function. " "Use true/false instead."); @@ -1197,7 +1200,7 @@ PyArray_IntpFromSequence(PyObject *seq, npy_intp *vals, int maxvals) * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag = 0; +NPY_NO_EXPORT NPY_TLS int evil_global_disable_warn_O4O8_flag = 0; /* * Convert a gentype (that is actually a generic kind character) and @@ -1341,13 +1344,19 @@ PyArray_TypestrConvert(int itemsize, int gentype) break; case NPY_DEPRECATED_STRINGLTR2: - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); - newtype = NPY_STRING; + { + /* + * raise a deprecation warning, which might be an exception + * if warnings are errors, so leave newtype unset in that + * case + */ + int ret = DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead."); + if (ret == 0) { + newtype = NPY_STRING; + } break; - + } case NPY_UNICODELTR: newtype = NPY_UNICODE; break; @@ -1403,12 +1412,7 @@ PyArray_IntTupleFromIntp(int len, npy_intp const *vals) NPY_NO_EXPORT int _not_NoValue(PyObject *obj, PyObject **out) { - static PyObject *NoValue = NULL; - npy_cache_import("numpy", "_NoValue", &NoValue); - if (NoValue == NULL) { - return 0; - } - if (obj == NoValue) { + if (obj == npy_static_pydata._NoValue) { *out = NULL; } else { @@ -1428,7 +1432,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device) } if (PyUnicode_Check(object) && - PyUnicode_Compare(object, npy_ma_str_cpu) == 0) { + PyUnicode_Compare(object, npy_interned_str.cpu) == 0) { *device = NPY_DEVICE_CPU; return NPY_SUCCEED; } diff --git a/numpy/_core/src/multiarray/conversion_utils.h b/numpy/_core/src/multiarray/conversion_utils.h index f138c3b98529..bff1db0c069d 100644 --- a/numpy/_core/src/multiarray/conversion_utils.h +++ b/numpy/_core/src/multiarray/conversion_utils.h @@ -113,7 +113,7 @@ PyArray_DeviceConverterOptional(PyObject *object, NPY_DEVICE *device); * that it is in an unpickle context instead of a normal context without * evil global state like we create here. */ -extern NPY_NO_EXPORT int evil_global_disable_warn_O4O8_flag; +extern NPY_NO_EXPORT NPY_TLS int evil_global_disable_warn_O4O8_flag; /* * Convert function which replaces np._NoValue with NULL. diff --git a/numpy/_core/src/multiarray/convert.c b/numpy/_core/src/multiarray/convert.c index c6b164d7f4e9..57a76cd5f9bd 100644 --- a/numpy/_core/src/multiarray/convert.c +++ b/numpy/_core/src/multiarray/convert.c @@ -9,7 +9,7 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" -#include "npy_pycompat.h" + #include "common.h" #include "arrayobject.h" @@ -408,9 +408,9 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj) char *value = (char *)value_buffer_stack; PyArray_Descr *descr = PyArray_DESCR(arr); - if ((size_t)descr->elsize > sizeof(value_buffer_stack)) { + if (PyDataType_ELSIZE(descr) > sizeof(value_buffer_stack)) { /* We need a large temporary buffer... */ - value_buffer_heap = PyObject_Calloc(1, descr->elsize); + value_buffer_heap = PyMem_Calloc(1, PyDataType_ELSIZE(descr)); if (value_buffer_heap == NULL) { PyErr_NoMemory(); return -1; diff --git a/numpy/_core/src/multiarray/convert_datatype.c b/numpy/_core/src/multiarray/convert_datatype.c index 2225ee94859c..fc1cd84883b3 100644 --- a/numpy/_core/src/multiarray/convert_datatype.c +++ b/numpy/_core/src/multiarray/convert_datatype.c @@ -35,7 +35,8 @@ #include "dtype_transfer.h" #include "dtype_traversal.h" #include "arrayobject.h" - +#include "npy_static_data.h" +#include "multiarraymodule.h" /* * Required length of string when converting from unsigned integer type. @@ -48,14 +49,17 @@ */ NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[] = {0, 3, 5, 10, 10, 20, 20, 20, 20}; -/* - * Whether or not legacy value-based promotion/casting is used. - */ -NPY_NO_EXPORT int npy_promotion_state = NPY_USE_LEGACY_PROMOTION; -NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX = NULL; -NPY_NO_EXPORT PyObject *npy_DTypePromotionError = NULL; -NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError = NULL; +static NPY_TLS int npy_promotion_state = NPY_USE_WEAK_PROMOTION; + +NPY_NO_EXPORT int +get_npy_promotion_state() { + return npy_promotion_state; +} +NPY_NO_EXPORT void +set_npy_promotion_state(int new_promotion_state) { + npy_promotion_state = new_promotion_state; +} static PyObject * PyArray_GetGenericToVoidCastingImpl(void); @@ -79,15 +83,15 @@ npy_give_promotion_warnings(void) { PyObject *val; - npy_cache_import( + if (npy_cache_import_runtime( "numpy._core._ufunc_config", "NO_NEP50_WARNING", - &NO_NEP50_WARNING_CTX); - if (NO_NEP50_WARNING_CTX == NULL) { + &npy_runtime_imports.NO_NEP50_WARNING) == -1) { PyErr_WriteUnraisable(NULL); return 1; } - if (PyContextVar_Get(NO_NEP50_WARNING_CTX, Py_False, &val) < 0) { + if (PyContextVar_Get(npy_runtime_imports.NO_NEP50_WARNING, + Py_False, &val) < 0) { /* Errors should not really happen, but if it does assume we warn. */ PyErr_WriteUnraisable(NULL); return 1; @@ -100,13 +104,14 @@ npy_give_promotion_warnings(void) NPY_NO_EXPORT PyObject * npy__get_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(arg)) { - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + int promotion_state = get_npy_promotion_state(); + if (promotion_state == NPY_USE_WEAK_PROMOTION) { return PyUnicode_FromString("weak"); } - else if (npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { + else if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) { return PyUnicode_FromString("weak_and_warn"); } - else if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + else if (promotion_state == NPY_USE_LEGACY_PROMOTION) { return PyUnicode_FromString("legacy"); } PyErr_SetString(PyExc_SystemError, "invalid promotion state!"); @@ -123,14 +128,15 @@ npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) "must be a string."); return NULL; } + int new_promotion_state; if (PyUnicode_CompareWithASCIIString(arg, "weak") == 0) { - npy_promotion_state = NPY_USE_WEAK_PROMOTION; + new_promotion_state = NPY_USE_WEAK_PROMOTION; } else if (PyUnicode_CompareWithASCIIString(arg, "weak_and_warn") == 0) { - npy_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; + new_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; } else if (PyUnicode_CompareWithASCIIString(arg, "legacy") == 0) { - npy_promotion_state = NPY_USE_LEGACY_PROMOTION; + new_promotion_state = NPY_USE_LEGACY_PROMOTION; } else { PyErr_Format(PyExc_TypeError, @@ -138,6 +144,7 @@ npy__set_promotion_state(PyObject *NPY_UNUSED(mod), PyObject *arg) "'weak', 'legacy', or 'weak_and_warn' but got '%.100S'", arg); return NULL; } + set_npy_promotion_state(new_promotion_state); Py_RETURN_NONE; } @@ -395,12 +402,7 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num) !PyTypeNum_ISCOMPLEX(type_num) && PyTypeNum_ISNUMBER(type_num) && !PyTypeNum_ISBOOL(type_num)) { - static PyObject *cls = NULL; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return NULL; - } - int ret = PyErr_WarnEx(cls, + int ret = PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -465,14 +467,17 @@ _get_cast_safety_from_castingimpl(PyArrayMethodObject *castingimpl, /* * Check for less harmful non-standard returns. The following two returns * should never happen: - * 1. No-casting must imply a view offset of 0. + * 1. No-casting must imply a view offset of 0 unless the DType + defines a finalization function, which implies it stores data + on the descriptor * 2. Equivalent-casting + 0 view offset is (usually) the definition * of a "no" cast. However, changing the order of fields can also * create descriptors that are not equivalent but views. * Note that unsafe casts can have a view offset. For example, in * principle, casting `finalize_descr == NULL)) { assert(casting != NPY_NO_CASTING); } else { @@ -648,6 +653,35 @@ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to) } +/* + * This function returns true if the two types can be safely cast at + * *minimum_safety* casting level. Sets the *view_offset* if that is set + * for the cast. If ignore_error is set, the error indicator is cleared + * if there are any errors in cast setup and returns false, otherwise + * the error indicator is left set and returns -1. + */ +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_error) +{ + if (type1 == type2) { + *view_offset = 0; + return 1; + } + + NPY_CASTING safety = PyArray_GetCastInfo(type1, type2, NULL, view_offset); + if (safety < 0) { + if (ignore_error) { + PyErr_Clear(); + return 0; + } + return -1; + } + return PyArray_MinCastSafety(safety, minimum_safety) == minimum_safety; +} + + /* Provides an ordering for the dtype 'kind' character codes */ NPY_NO_EXPORT int dtype_kind_to_ordering(char kind) @@ -863,18 +897,29 @@ can_cast_pyscalar_scalar_to( } /* - * For all other cases we use the default dtype. + * For all other cases we need to make a bit of a dance to find the cast + * safety. We do so by finding the descriptor for the "scalar" (without + * a value; for parametric user dtypes a value may be needed eventually). */ - PyArray_Descr *from; + PyArray_DTypeMeta *from_DType; + PyArray_Descr *default_dtype; if (flags & NPY_ARRAY_WAS_PYTHON_INT) { - from = PyArray_DescrFromType(NPY_LONG); + default_dtype = PyArray_DescrNewFromType(NPY_INTP); + from_DType = &PyArray_PyLongDType; } else if (flags & NPY_ARRAY_WAS_PYTHON_FLOAT) { - from = PyArray_DescrFromType(NPY_DOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_FLOAT64); + from_DType = &PyArray_PyFloatDType; } else { - from = PyArray_DescrFromType(NPY_CDOUBLE); + default_dtype = PyArray_DescrNewFromType(NPY_COMPLEX128); + from_DType = &PyArray_PyComplexDType; } + + PyArray_Descr *from = npy_find_descr_for_scalar( + NULL, default_dtype, from_DType, NPY_DTYPE(to)); + Py_DECREF(default_dtype); + int res = PyArray_CanCastTypeTo(from, to, casting); Py_DECREF(from); return res; @@ -898,7 +943,7 @@ PyArray_CanCastArrayTo(PyArrayObject *arr, PyArray_Descr *to, to = NULL; } - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (get_npy_promotion_state() == NPY_USE_LEGACY_PROMOTION) { /* * If it's a scalar, check the value. (This only currently matters for * numeric types and for `to == NULL` it can't be numeric.) @@ -1796,17 +1841,17 @@ PyArray_ResultType( all_descriptors[i_all] = NULL; /* no descriptor for py-scalars */ if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_INT) { /* This could even be an object dtype here for large ints */ - all_DTypes[i_all] = &PyArray_PyIntAbstractDType; + all_DTypes[i_all] = &PyArray_PyLongDType; if (PyArray_TYPE(arrs[i]) != NPY_LONG) { /* Not a "normal" scalar, so we cannot avoid the legacy path */ all_pyscalar = 0; } } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_FLOAT) { - all_DTypes[i_all] = &PyArray_PyFloatAbstractDType; + all_DTypes[i_all] = &PyArray_PyFloatDType; } else if (PyArray_FLAGS(arrs[i]) & NPY_ARRAY_WAS_PYTHON_COMPLEX) { - all_DTypes[i_all] = &PyArray_PyComplexAbstractDType; + all_DTypes[i_all] = &PyArray_PyComplexDType; } else { all_descriptors[i_all] = PyArray_DTYPE(arrs[i]); @@ -1922,10 +1967,11 @@ PyArray_CheckLegacyResultType( npy_intp ndtypes, PyArray_Descr **dtypes) { PyArray_Descr *ret = NULL; - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION) { + int promotion_state = get_npy_promotion_state(); + if (promotion_state == NPY_USE_WEAK_PROMOTION) { return 0; } - if (npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN + if (promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN && !npy_give_promotion_warnings()) { return 0; } @@ -2022,12 +2068,13 @@ PyArray_CheckLegacyResultType( Py_DECREF(ret); return 0; } - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + + if (promotion_state == NPY_USE_LEGACY_PROMOTION) { Py_SETREF(*new_result, ret); return 0; } - assert(npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); + assert(promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN); if (PyErr_WarnFormat(PyExc_UserWarning, 1, "result dtype changed due to the removal of value-based " "promotion from NumPy. Changed from %S to %S.", @@ -2131,7 +2178,6 @@ PyArray_Zero(PyArrayObject *arr) { char *zeroval; int ret, storeflags; - static PyObject * zero_obj = NULL; if (_check_object_rec(PyArray_DESCR(arr)) < 0) { return NULL; @@ -2142,12 +2188,6 @@ PyArray_Zero(PyArrayObject *arr) return NULL; } - if (zero_obj == NULL) { - zero_obj = PyLong_FromLong((long) 0); - if (zero_obj == NULL) { - return NULL; - } - } if (PyArray_ISOBJECT(arr)) { /* XXX this is dangerous, the caller probably is not aware that zeroval is actually a static PyObject* @@ -2155,12 +2195,12 @@ PyArray_Zero(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(zeroval, &zero_obj, sizeof(PyObject *)); + memcpy(zeroval, &npy_static_pydata.zero_obj, sizeof(PyObject *)); return zeroval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, zeroval, zero_obj); + ret = PyArray_SETITEM(arr, zeroval, npy_static_pydata.zero_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(zeroval); @@ -2177,7 +2217,6 @@ PyArray_One(PyArrayObject *arr) { char *oneval; int ret, storeflags; - static PyObject * one_obj = NULL; if (_check_object_rec(PyArray_DESCR(arr)) < 0) { return NULL; @@ -2188,12 +2227,6 @@ PyArray_One(PyArrayObject *arr) return NULL; } - if (one_obj == NULL) { - one_obj = PyLong_FromLong((long) 1); - if (one_obj == NULL) { - return NULL; - } - } if (PyArray_ISOBJECT(arr)) { /* XXX this is dangerous, the caller probably is not aware that oneval is actually a static PyObject* @@ -2201,13 +2234,13 @@ PyArray_One(PyArrayObject *arr) if they simply memcpy it into a ndarray without using setitem(), refcount errors will occur */ - memcpy(oneval, &one_obj, sizeof(PyObject *)); + memcpy(oneval, &npy_static_pydata.one_obj, sizeof(PyObject *)); return oneval; } storeflags = PyArray_FLAGS(arr); PyArray_ENABLEFLAGS(arr, NPY_ARRAY_BEHAVED); - ret = PyArray_SETITEM(arr, oneval, one_obj); + ret = PyArray_SETITEM(arr, oneval, npy_static_pydata.one_obj); ((PyArrayObject_fields *)arr)->flags = storeflags; if (ret < 0) { PyDataMem_FREE(oneval); @@ -2440,8 +2473,8 @@ PyArray_AddCastingImplementation_FromSpec(PyArrayMethod_Spec *spec, int private) NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2483,7 +2516,7 @@ legacy_cast_get_strided_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; int out_needs_api = 0; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; @@ -2507,8 +2540,8 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2548,7 +2581,7 @@ get_byteswap_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; assert(descrs[0]->kind == descrs[1]->kind); assert(descrs[0]->elsize == descrs[1]->elsize); int itemsize = descrs[0]->elsize; @@ -2590,13 +2623,7 @@ complex_to_noncomplex_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - static PyObject *cls = NULL; - int ret; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return -1; - } - ret = PyErr_WarnEx(cls, + int ret = PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards " "the imaginary part", 1); if (ret < 0) { @@ -2727,8 +2754,8 @@ PyArray_InitializeNumericCasts(void) static int cast_to_string_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -2879,8 +2906,8 @@ add_other_to_and_from_string_cast( NPY_NO_EXPORT NPY_CASTING string_to_string_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -2932,7 +2959,7 @@ string_to_string_get_loop( NPY_ARRAYMETHOD_FLAGS *flags) { int unicode_swap = 0; - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; assert(NPY_DTYPE(descrs[0]) == NPY_DTYPE(descrs[1])); *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; @@ -3033,7 +3060,7 @@ PyArray_InitializeStringCasts(void) */ static NPY_CASTING cast_to_void_dtype_class( - PyArray_Descr **given_descrs, PyArray_Descr **loop_descrs, + PyArray_Descr *const *given_descrs, PyArray_Descr **loop_descrs, npy_intp *view_offset) { /* `dtype="V"` means unstructured currently (compare final path) */ @@ -3058,8 +3085,8 @@ cast_to_void_dtype_class( static NPY_CASTING nonstructured_to_structured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3220,39 +3247,19 @@ nonstructured_to_structured_get_loop( return 0; } - static PyObject * PyArray_GetGenericToVoidCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->name = "any_to_void_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = -1; - method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; - method->get_strided_loop = &nonstructured_to_structured_get_loop; - method->nin = 1; - method->nout = 1; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.GenericToVoidMethod); + return npy_static_pydata.GenericToVoidMethod; } static NPY_CASTING structured_to_nonstructured_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3381,27 +3388,8 @@ structured_to_nonstructured_get_loop( static PyObject * PyArray_GetVoidToGenericCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->name = "void_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = -1; - method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; - method->get_strided_loop = &structured_to_nonstructured_get_loop; - method->nin = 1; - method->nout = 1; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.VoidToGenericMethod); + return npy_static_pydata.VoidToGenericMethod; } @@ -3521,8 +3509,8 @@ can_cast_fields_safety( static NPY_CASTING void_to_void_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset) { @@ -3720,8 +3708,8 @@ PyArray_InitializeVoidToVoidCast(void) static NPY_CASTING object_to_any_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -3765,37 +3753,17 @@ object_to_any_resolve_descriptors( static PyObject * PyArray_GetObjectToGenericCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->nin = 1; - method->nout = 1; - method->name = "object_to_any_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_UNSAFE_CASTING; - method->resolve_descriptors = &object_to_any_resolve_descriptors; - method->get_strided_loop = &object_to_any_get_loop; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.ObjectToGenericMethod); + return npy_static_pydata.ObjectToGenericMethod; } - /* Any object is simple (could even use the default) */ static NPY_CASTING any_to_object_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -3822,27 +3790,8 @@ any_to_object_resolve_descriptors( static PyObject * PyArray_GetGenericToObjectCastingImpl(void) { - static PyArrayMethodObject *method = NULL; - - if (method != NULL) { - Py_INCREF(method); - return (PyObject *)method; - } - - method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); - if (method == NULL) { - return PyErr_NoMemory(); - } - - method->nin = 1; - method->nout = 1; - method->name = "any_to_object_cast"; - method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; - method->casting = NPY_SAFE_CASTING; - method->resolve_descriptors = &any_to_object_resolve_descriptors; - method->get_strided_loop = &any_to_object_get_loop; - - return (PyObject *)method; + Py_INCREF(npy_static_pydata.GenericToObjectMethod); + return npy_static_pydata.GenericToObjectMethod; } @@ -3894,6 +3843,71 @@ PyArray_InitializeObjectToObjectCast(void) return res; } +static int +initialize_void_and_object_globals(void) { + PyArrayMethodObject *method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->name = "void_to_any_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = -1; + method->resolve_descriptors = &structured_to_nonstructured_resolve_descriptors; + method->get_strided_loop = &structured_to_nonstructured_get_loop; + method->nin = 1; + method->nout = 1; + npy_static_pydata.VoidToGenericMethod = (PyObject *)method; + + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->name = "any_to_void_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = -1; + method->resolve_descriptors = &nonstructured_to_structured_resolve_descriptors; + method->get_strided_loop = &nonstructured_to_structured_get_loop; + method->nin = 1; + method->nout = 1; + npy_static_pydata.GenericToVoidMethod = (PyObject *)method; + + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->nin = 1; + method->nout = 1; + method->name = "object_to_any_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = NPY_UNSAFE_CASTING; + method->resolve_descriptors = &object_to_any_resolve_descriptors; + method->get_strided_loop = &object_to_any_get_loop; + npy_static_pydata.ObjectToGenericMethod = (PyObject *)method; + + method = PyObject_New(PyArrayMethodObject, &PyArrayMethod_Type); + if (method == NULL) { + PyErr_NoMemory(); + return -1; + } + + method->nin = 1; + method->nout = 1; + method->name = "any_to_object_cast"; + method->flags = NPY_METH_SUPPORTS_UNALIGNED | NPY_METH_REQUIRES_PYAPI; + method->casting = NPY_SAFE_CASTING; + method->resolve_descriptors = &any_to_object_resolve_descriptors; + method->get_strided_loop = &any_to_object_get_loop; + npy_static_pydata.GenericToObjectMethod = (PyObject *)method; + + return 0; +} + NPY_NO_EXPORT int PyArray_InitializeCasts() @@ -3914,5 +3928,10 @@ PyArray_InitializeCasts() if (PyArray_InitializeDatetimeCasts() < 0) { return -1; } + + if (initialize_void_and_object_globals() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/multiarray/convert_datatype.h b/numpy/_core/src/multiarray/convert_datatype.h index b32b637d8e55..f848ad3b4c8e 100644 --- a/numpy/_core/src/multiarray/convert_datatype.h +++ b/numpy/_core/src/multiarray/convert_datatype.h @@ -12,10 +12,6 @@ extern NPY_NO_EXPORT npy_intp REQUIRED_STR_LEN[]; #define NPY_USE_LEGACY_PROMOTION 0 #define NPY_USE_WEAK_PROMOTION 1 #define NPY_USE_WEAK_PROMOTION_AND_WARN 2 -extern NPY_NO_EXPORT int npy_promotion_state; -extern NPY_NO_EXPORT PyObject *NO_NEP50_WARNING_CTX; -extern NPY_NO_EXPORT PyObject *npy_DTypePromotionError; -extern NPY_NO_EXPORT PyObject *npy_UFuncNoLoopError; NPY_NO_EXPORT int npy_give_promotion_warnings(void); @@ -102,6 +98,11 @@ PyArray_GetCastInfo( PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype, npy_intp *view_offset); +NPY_NO_EXPORT npy_intp +PyArray_SafeCast(PyArray_Descr *type1, PyArray_Descr *type2, + npy_intp* view_offset, NPY_CASTING minimum_safety, + npy_intp ignore_errors); + NPY_NO_EXPORT int PyArray_CheckCastSafety(NPY_CASTING casting, PyArray_Descr *from, PyArray_Descr *to, PyArray_DTypeMeta *to_dtype); @@ -109,8 +110,8 @@ PyArray_CheckCastSafety(NPY_CASTING casting, NPY_NO_EXPORT NPY_CASTING legacy_same_dtype_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset); @@ -124,14 +125,20 @@ legacy_cast_get_strided_loop( NPY_NO_EXPORT NPY_CASTING simple_cast_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[2], - PyArray_Descr *input_descrs[2], + PyArray_DTypeMeta *const dtypes[2], + PyArray_Descr *const input_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *view_offset); NPY_NO_EXPORT int PyArray_InitializeCasts(void); +NPY_NO_EXPORT int +get_npy_promotion_state(); + +NPY_NO_EXPORT void +set_npy_promotion_state(int new_promotion_state); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/ctors.c b/numpy/_core/src/multiarray/ctors.c index a475f3986759..c659dfa356cd 100644 --- a/numpy/_core/src/multiarray/ctors.c +++ b/numpy/_core/src/multiarray/ctors.c @@ -7,15 +7,16 @@ #include #include "numpy/arrayobject.h" +#include "arrayobject.h" #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" #include "npy_config.h" - -#include "npy_ctypes.h" #include "npy_pycompat.h" -#include "multiarraymodule.h" +#include "npy_ctypes.h" + +#include "npy_static_data.h" #include "common.h" #include "ctors.h" @@ -39,6 +40,13 @@ #include "umathmodule.h" + +NPY_NO_EXPORT const char *npy_no_copy_err_msg = ( + "Unable to avoid copy while creating an array as requested.\n" + "If using `np.array(obj, copy=False)` replace it with `np.asarray(obj)` " + "to allow a copy when needed (no behavior change in NumPy 1.x).\n" + "For more details, see https://numpy.org/devdocs/numpy_2_0_migration_guide.html#adapting-to-changes-in-the-copy-keyword."); + /* * Reading from a file or a string. * @@ -603,15 +611,6 @@ PyArray_AssignFromCache(PyArrayObject *self, coercion_cache_obj *cache) { static void raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy._core._exceptions", "_ArrayMemoryError", - &exc_type); - if (exc_type == NULL) { - goto fail; - } - PyObject *shape = PyArray_IntTupleFromIntp(nd, dims); if (shape == NULL) { goto fail; @@ -623,7 +622,7 @@ raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr) if (exc_value == NULL){ goto fail; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject(npy_static_pydata._ArrayMemoryError, exc_value); Py_DECREF(exc_value); return; @@ -823,6 +822,12 @@ PyArray_NewFromDescr_int( if (data == NULL) { + /* This closely follows PyArray_ZeroContiguousBuffer. We can't use + * that because here we need to allocate after checking if there is + * custom zeroing logic and that function accepts an already-allocated + * array + */ + /* float errors do not matter and we do not release GIL */ NPY_ARRAYMETHOD_FLAGS zero_flags; PyArrayMethod_GetTraverseLoop *get_fill_zero_loop = @@ -929,17 +934,11 @@ PyArray_NewFromDescr_int( */ if (subtype != &PyArray_Type) { PyObject *res, *func; - static PyObject *ndarray_array_finalize = NULL; - /* First time, cache ndarray's __array_finalize__ */ - if (ndarray_array_finalize == NULL) { - ndarray_array_finalize = PyObject_GetAttr( - (PyObject *)&PyArray_Type, npy_ma_str_array_finalize); - } - func = PyObject_GetAttr((PyObject *)subtype, npy_ma_str_array_finalize); + func = PyObject_GetAttr((PyObject *)subtype, npy_interned_str.array_finalize); if (func == NULL) { goto fail; } - else if (func == ndarray_array_finalize) { + else if (func == npy_static_pydata.ndarray_array_finalize) { Py_DECREF(func); } else if (func == Py_None) { @@ -1422,6 +1421,8 @@ _array_from_buffer_3118(PyObject *memoryview) * @param writeable whether the result must be writeable. * @param context Unused parameter, must be NULL (should be removed later). * @param copy Specifies the copy behavior. + * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy + * was made by implementor. * * @returns The array object, Py_NotImplemented if op is not array-like, * or NULL with an error set. (A new reference to Py_NotImplemented @@ -1430,7 +1431,7 @@ _array_from_buffer_3118(PyObject *memoryview) NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, - int copy) { + int copy, int *was_copied_by__array__) { PyObject* tmp; /* @@ -1478,7 +1479,8 @@ _array_from_array_like(PyObject *op, } if (tmp == Py_NotImplemented) { - tmp = PyArray_FromArrayAttr_int(op, requested_dtype, copy); + tmp = PyArray_FromArrayAttr_int( + op, requested_dtype, copy, was_copied_by__array__); if (tmp == NULL) { return NULL; } @@ -1565,13 +1567,17 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, // Default is copy = None int copy = -1; + int was_copied_by__array__ = 0; if (flags & NPY_ARRAY_ENSURENOCOPY) { copy = 0; + } else if (flags & NPY_ARRAY_ENSURECOPY) { + copy = 1; } ndim = PyArray_DiscoverDTypeAndShape( - op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, copy); + op, NPY_MAXDIMS, dims, &cache, in_DType, in_descr, &dtype, + copy, &was_copied_by__array__); if (ndim < 0) { return NULL; @@ -1608,6 +1614,9 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, assert(cache->converted_obj == op); arr = (PyArrayObject *)(cache->arr_or_sequence); /* we may need to cast or assert flags (e.g. copy) */ + if (was_copied_by__array__ == 1) { + flags = flags & ~NPY_ARRAY_ENSURECOPY; + } PyObject *res = PyArray_FromArray(arr, dtype, flags); npy_unlink_coercion_cache(cache); return res; @@ -1637,9 +1646,8 @@ PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, * If we got this far, we definitely have to create a copy, since we are * converting either from a scalar (cache == NULL) or a (nested) sequence. */ - if (flags & NPY_ARRAY_ENSURENOCOPY ) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array."); + if (flags & NPY_ARRAY_ENSURENOCOPY) { + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(dtype); npy_free_coercion_cache(cache); return NULL; @@ -1847,8 +1855,7 @@ PyArray_CheckFromAny_int(PyObject *op, PyArray_Descr *in_descr, && !PyArray_ElementStrides(obj)) { PyObject *ret; if (requires & NPY_ARRAY_ENSURENOCOPY) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating a new array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); return NULL; } ret = PyArray_NewCopy((PyArrayObject *)obj, NPY_ANYORDER); @@ -1893,6 +1900,10 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) newtype->elsize = oldtype->elsize; } + if (flags & NPY_ARRAY_SAME_KIND_CASTING) { + casting = NPY_SAME_KIND_CASTING; + } + /* If the casting if forced, use the 'unsafe' casting rule */ if (flags & NPY_ARRAY_FORCECAST) { casting = NPY_UNSAFE_CASTING; @@ -1908,8 +1919,10 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) } arrflags = PyArray_FLAGS(arr); - /* If a guaranteed copy was requested */ - copy = (flags & NPY_ARRAY_ENSURECOPY) || + + + copy = /* If a guaranteed copy was requested */ + (flags & NPY_ARRAY_ENSURECOPY) || /* If C contiguous was requested, and arr is not */ ((flags & NPY_ARRAY_C_CONTIGUOUS) && (!(arrflags & NPY_ARRAY_C_CONTIGUOUS))) || @@ -1921,13 +1934,17 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) (!(arrflags & NPY_ARRAY_F_CONTIGUOUS))) || /* If a writeable array was requested, and arr is not */ ((flags & NPY_ARRAY_WRITEABLE) && - (!(arrflags & NPY_ARRAY_WRITEABLE))) || - !PyArray_EquivTypes(oldtype, newtype); + (!(arrflags & NPY_ARRAY_WRITEABLE))); + + if (!copy) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, newtype, &view_offset, NPY_NO_CASTING, 1); + copy = !(is_safe && (view_offset != NPY_MIN_INTP)); + } if (copy) { - if (flags & NPY_ARRAY_ENSURENOCOPY ) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array from given array."); + if (flags & NPY_ARRAY_ENSURENOCOPY) { + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(newtype); return NULL; } @@ -2019,7 +2036,7 @@ PyArray_FromStructInterface(PyObject *input) PyObject *attr; char endian = NPY_NATBYTE; - attr = PyArray_LookupSpecial_OnInstance(input, npy_ma_str_array_struct); + attr = PyArray_LookupSpecial_OnInstance(input, npy_interned_str.array_struct); if (attr == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2143,7 +2160,7 @@ PyArray_FromInterface(PyObject *origin) npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS]; int dataflags = NPY_ARRAY_BEHAVED; - iface = PyArray_LookupSpecial_OnInstance(origin, npy_ma_str_array_interface); + iface = PyArray_LookupSpecial_OnInstance(origin, npy_interned_str.array_interface); if (iface == NULL) { if (PyErr_Occurred()) { @@ -2169,10 +2186,10 @@ PyArray_FromInterface(PyObject *origin) } /* Get type string from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "typestr"); - if (attr == NULL) { + int result = PyDict_GetItemStringRef(iface, "typestr", &attr); + if (result <= 0) { Py_DECREF(iface); - if (!PyErr_Occurred()) { + if (result == 0) { PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ typestr"); } @@ -2196,43 +2213,47 @@ PyArray_FromInterface(PyObject *origin) * the 'descr' attribute. */ if (dtype->type_num == NPY_VOID) { - PyObject *descr = _PyDict_GetItemStringWithError(iface, "descr"); - if (descr == NULL && PyErr_Occurred()) { + PyObject *descr = NULL; + result = PyDict_GetItemStringRef(iface, "descr", &descr); + if (result == -1) { goto fail; } PyArray_Descr *new_dtype = NULL; - if (descr != NULL) { + if (result == 1) { int is_default = _is_default_descr(descr, attr); if (is_default < 0) { + Py_DECREF(descr); goto fail; } if (!is_default) { if (PyArray_DescrConverter2(descr, &new_dtype) != NPY_SUCCEED) { + Py_DECREF(descr); goto fail; } if (new_dtype != NULL) { - Py_DECREF(dtype); - dtype = new_dtype; + Py_SETREF(dtype, new_dtype); } } - + Py_DECREF(descr); } - } + Py_CLEAR(attr); /* Get shape tuple from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "shape"); - if (attr == NULL) { - if (PyErr_Occurred()) { - return NULL; - } + result = PyDict_GetItemStringRef(iface, "shape", &attr); + if (result < 0) { + return NULL; + } + if (result == 0) { /* Shape must be specified when 'data' is specified */ - PyObject *data = _PyDict_GetItemStringWithError(iface, "data"); - if (data == NULL && PyErr_Occurred()) { + int result = PyDict_ContainsString(iface, "data"); + if (result < 0) { + Py_DECREF(attr); return NULL; } - else if (data != NULL) { + else if (result == 1) { Py_DECREF(iface); + Py_DECREF(attr); PyErr_SetString(PyExc_ValueError, "Missing __array_interface__ shape"); return NULL; @@ -2260,10 +2281,11 @@ PyArray_FromInterface(PyObject *origin) } } } + Py_CLEAR(attr); /* Get data buffer from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "data"); - if (attr == NULL && PyErr_Occurred()){ + result = PyDict_GetItemStringRef(iface, "data", &attr); + if (result == -1){ return NULL; } @@ -2326,20 +2348,24 @@ PyArray_FromInterface(PyObject *origin) PyBuffer_Release(&view); /* Get offset number from interface specification */ - attr = _PyDict_GetItemStringWithError(iface, "offset"); - if (attr == NULL && PyErr_Occurred()) { + PyObject *offset = NULL; + result = PyDict_GetItemStringRef(iface, "offset", &offset); + if (result == -1) { goto fail; } - else if (attr) { - npy_longlong num = PyLong_AsLongLong(attr); + else if (result == 1) { + npy_longlong num = PyLong_AsLongLong(offset); if (error_converting(num)) { PyErr_SetString(PyExc_TypeError, "__array_interface__ offset must be an integer"); + Py_DECREF(offset); goto fail; } data += num; + Py_DECREF(offset); } } + Py_CLEAR(attr); ret = (PyArrayObject *)PyArray_NewFromDescrAndBase( &PyArray_Type, dtype, @@ -2365,11 +2391,11 @@ PyArray_FromInterface(PyObject *origin) goto fail; } } - attr = _PyDict_GetItemStringWithError(iface, "strides"); - if (attr == NULL && PyErr_Occurred()){ + result = PyDict_GetItemStringRef(iface, "strides", &attr); + if (result == -1){ return NULL; } - if (attr != NULL && attr != Py_None) { + if (result == 1 && attr != Py_None) { if (!PyTuple_Check(attr)) { PyErr_SetString(PyExc_TypeError, "strides must be a tuple"); @@ -2393,18 +2419,77 @@ PyArray_FromInterface(PyObject *origin) if (n) { memcpy(PyArray_STRIDES(ret), strides, n*sizeof(npy_intp)); } + Py_DECREF(attr); } PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL); Py_DECREF(iface); return (PyObject *)ret; fail: + Py_XDECREF(attr); Py_XDECREF(dtype); Py_XDECREF(iface); return NULL; } + +/* + * Returns -1 and an error set or 0 with the original error cleared, must + * be called with an error set. + */ +static inline int +check_or_clear_and_warn_error_if_due_to_copy_kwarg(PyObject *kwnames) +{ + if (kwnames == NULL) { + return -1; /* didn't pass kwnames, can't possibly be the reason */ + } + if (!PyErr_ExceptionMatches(PyExc_TypeError)) { + return -1; + } + + /* + * In most cases, if we fail, we assume the error was unrelated to the + * copy kwarg and simply restore the original one. + */ + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (value == NULL) { + goto restore_error; + } + + PyObject *str_value = PyObject_Str(value); + if (str_value == NULL) { + goto restore_error; + } + int copy_kwarg_unsupported = PyUnicode_Contains( + str_value, npy_interned_str.array_err_msg_substr); + Py_DECREF(str_value); + if (copy_kwarg_unsupported == -1) { + goto restore_error; + } + if (copy_kwarg_unsupported) { + /* + * TODO: As of now NumPy 2.0, the this warning is only triggered with + * `copy=False` allowing downstream to not notice it. + */ + Py_DECREF(type); + Py_DECREF(value); + Py_XDECREF(traceback); + if (DEPRECATE("__array__ implementation doesn't accept a copy keyword, " + "so passing copy=False failed. __array__ must implement " + "'dtype' and 'copy' keyword arguments.") < 0) { + return -1; + } + return 0; + } + + restore_error: + PyErr_Restore(type, value, traceback); + return -1; +} + + /** * Check for an __array__ attribute and call it when it exists. * @@ -2418,17 +2503,19 @@ PyArray_FromInterface(PyObject *origin) * NOTE: For copy == -1 it passes `op.__array__(copy=None)`, * for copy == 0, `op.__array__(copy=False)`, and * for copy == 1, `op.__array__(copy=True). + * @param was_copied_by__array__ Set to 1 if it can be assumed that a copy + * was made by implementor. * @returns NotImplemented if `__array__` is not defined or a NumPy array * (or subclass). On error, return NULL. */ NPY_NO_EXPORT PyObject * -PyArray_FromArrayAttr_int( - PyObject *op, PyArray_Descr *descr, int copy) +PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, + int *was_copied_by__array__) { PyObject *new; PyObject *array_meth; - array_meth = PyArray_LookupSpecial_OnInstance(op, npy_ma_str_array); + array_meth = PyArray_LookupSpecial_OnInstance(op, npy_interned_str.array); if (array_meth == NULL) { if (PyErr_Occurred()) { return NULL; @@ -2447,67 +2534,51 @@ PyArray_FromArrayAttr_int( return Py_NotImplemented; } - PyObject *kwargs = PyDict_New(); + Py_ssize_t nargs = 0; + PyObject *arguments[2]; + PyObject *kwnames = NULL; + + if (descr != NULL) { + arguments[0] = (PyObject *)descr; + nargs++; + } /* * Only if the value of `copy` isn't the default one, we try to pass it * along; for backwards compatibility we then retry if it fails because the * signature of the __array__ method being called does not have `copy`. */ - int copy_passed = 0; if (copy != -1) { - copy_passed = 1; - PyObject *copy_obj = copy == 1 ? Py_True : Py_False; - PyDict_SetItemString(kwargs, "copy", copy_obj); + kwnames = npy_static_pydata.kwnames_is_copy; + arguments[nargs] = copy == 1 ? Py_True : Py_False; } - PyObject *args = descr != NULL ? PyTuple_Pack(1, descr) : PyTuple_New(0); - - new = PyObject_Call(array_meth, args, kwargs); + int must_copy_but_copy_kwarg_unimplemented = 0; + new = PyObject_Vectorcall(array_meth, arguments, nargs, kwnames); if (new == NULL) { - if (npy_ma_str_array_err_msg_substr == NULL) { + if (check_or_clear_and_warn_error_if_due_to_copy_kwarg(kwnames) < 0) { + /* Error was not cleared (or a new error set) */ + Py_DECREF(array_meth); return NULL; } - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); - if (value != NULL) { - PyObject *str_value = PyObject_Str(value); - if (PyUnicode_Contains( - str_value, npy_ma_str_array_err_msg_substr) > 0) { - Py_DECREF(type); - Py_DECREF(value); - Py_XDECREF(traceback); - if (PyErr_WarnEx(PyExc_UserWarning, - "__array__ should implement 'dtype' and " - "'copy' keywords", 1) < 0) { - Py_DECREF(str_value); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - if (copy_passed) { /* try again */ - PyDict_DelItemString(kwargs, "copy"); - new = PyObject_Call(array_meth, args, kwargs); - if (new == NULL) { - Py_DECREF(str_value); - Py_DECREF(args); - Py_DECREF(kwargs); - return NULL; - } - } - } - Py_DECREF(str_value); + if (copy == 0) { + /* Cannot possibly avoid a copy, so error out. */ + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); + Py_DECREF(array_meth); + return NULL; } + /* + * The error seems to have been due to passing copy. We try to see + * more precisely what the message is and may try again. + */ + must_copy_but_copy_kwarg_unimplemented = 1; + new = PyObject_Vectorcall(array_meth, arguments, nargs, NULL); if (new == NULL) { - PyErr_Restore(type, value, traceback); - Py_DECREF(args); - Py_DECREF(kwargs); + Py_DECREF(array_meth); return NULL; } } - Py_DECREF(args); - Py_DECREF(kwargs); Py_DECREF(array_meth); if (!PyArray_Check(new)) { @@ -2517,6 +2588,13 @@ PyArray_FromArrayAttr_int( Py_DECREF(new); return NULL; } + /* TODO: Remove was_copied_by__array__ argument */ + if (was_copied_by__array__ != NULL && copy == 1 && + must_copy_but_copy_kwarg_unimplemented == 0) { + /* We can assume that a copy was made */ + *was_copied_by__array__ = 1; + } + return new; } @@ -2531,7 +2609,7 @@ PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, PyObject *context) return NULL; } - return PyArray_FromArrayAttr_int(op, typecode, 0); + return PyArray_FromArrayAttr_int(op, typecode, 0, NULL); } diff --git a/numpy/_core/src/multiarray/ctors.h b/numpy/_core/src/multiarray/ctors.h index d2577f83ef96..094589968b66 100644 --- a/numpy/_core/src/multiarray/ctors.h +++ b/numpy/_core/src/multiarray/ctors.h @@ -1,6 +1,9 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_CTORS_H_ +extern NPY_NO_EXPORT const char *npy_no_copy_err_msg; + + NPY_NO_EXPORT PyObject * PyArray_NewFromDescr( PyTypeObject *subtype, PyArray_Descr *descr, int nd, @@ -51,7 +54,7 @@ PyArray_New( NPY_NO_EXPORT PyObject * _array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype, npy_bool writeable, PyObject *context, - int copy); + int copy, int *was_copied_by__array__); NPY_NO_EXPORT PyObject * PyArray_FromAny_int(PyObject *op, PyArray_Descr *in_descr, @@ -81,8 +84,8 @@ NPY_NO_EXPORT PyObject * PyArray_FromInterface(PyObject *input); NPY_NO_EXPORT PyObject * -PyArray_FromArrayAttr_int( - PyObject *op, PyArray_Descr *descr, int copy); +PyArray_FromArrayAttr_int(PyObject *op, PyArray_Descr *descr, int copy, + int *was_copied_by__array__); NPY_NO_EXPORT PyObject * PyArray_FromArrayAttr(PyObject *op, PyArray_Descr *typecode, diff --git a/numpy/_core/src/multiarray/datetime.c b/numpy/_core/src/multiarray/datetime.c index 7397381daf91..474c048db6cf 100644 --- a/numpy/_core/src/multiarray/datetime.c +++ b/numpy/_core/src/multiarray/datetime.c @@ -16,7 +16,7 @@ #include "numpyos.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "numpy/arrayscalars.h" @@ -3780,7 +3780,7 @@ time_to_time_get_loop( { int requires_wrap = 0; int inner_aligned = aligned; - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; PyArray_DatetimeMetaData *meta1 = get_datetime_metadata_from_dtype(descrs[0]); @@ -3929,7 +3929,7 @@ datetime_to_string_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; if (descrs[1]->type_num == NPY_STRING) { @@ -3989,7 +3989,7 @@ string_to_datetime_cast_get_loop( PyArrayMethod_StridedLoop **out_loop, NpyAuxData **out_transferdata, NPY_ARRAYMETHOD_FLAGS *flags) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; *flags = context->method->flags & NPY_METH_RUNTIME_FLAGS; if (descrs[0]->type_num == NPY_STRING) { diff --git a/numpy/_core/src/multiarray/datetime_busday.c b/numpy/_core/src/multiarray/datetime_busday.c index 93ed0972ec98..73c88811a0a9 100644 --- a/numpy/_core/src/multiarray/datetime_busday.c +++ b/numpy/_core/src/multiarray/datetime_busday.c @@ -15,7 +15,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "numpy/arrayscalars.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/_core/src/multiarray/datetime_busdaycal.c b/numpy/_core/src/multiarray/datetime_busdaycal.c index 880efe934c09..3a7e3a383dca 100644 --- a/numpy/_core/src/multiarray/datetime_busdaycal.c +++ b/numpy/_core/src/multiarray/datetime_busdaycal.c @@ -17,7 +17,7 @@ #include "numpy/arrayscalars.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/_core/src/multiarray/datetime_strings.c b/numpy/_core/src/multiarray/datetime_strings.c index 090277e16939..f92eec3f5a59 100644 --- a/numpy/_core/src/multiarray/datetime_strings.c +++ b/numpy/_core/src/multiarray/datetime_strings.c @@ -16,7 +16,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "numpy/arrayscalars.h" #include "convert_datatype.h" diff --git a/numpy/_core/src/multiarray/descriptor.c b/numpy/_core/src/multiarray/descriptor.c index c77b380e9386..a47a71d39196 100644 --- a/numpy/_core/src/multiarray/descriptor.c +++ b/numpy/_core/src/multiarray/descriptor.c @@ -6,6 +6,8 @@ #include #include +#include + #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" #include "numpy/npy_math.h" @@ -13,13 +15,15 @@ #include "npy_config.h" #include "npy_ctypes.h" #include "npy_import.h" -#include "npy_pycompat.h" + #include "_datetime.h" #include "common.h" #include "conversion_utils.h" /* for PyArray_TypestrConvert */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "descriptor.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" // for thread unsafe state access #include "alloc.h" #include "assert.h" #include "npy_buffer.h" @@ -721,13 +725,13 @@ _convert_from_commastring(PyObject *obj, int align) { PyObject *parsed; PyArray_Descr *res; - static PyObject *_commastring = NULL; assert(PyUnicode_Check(obj)); - npy_cache_import("numpy._core._internal", "_commastring", &_commastring); - if (_commastring == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_commastring", + &npy_runtime_imports._commastring) == -1) { return NULL; } - parsed = PyObject_CallOneArg(_commastring, obj); + parsed = PyObject_CallOneArg(npy_runtime_imports._commastring, obj); if (parsed == NULL) { return NULL; } @@ -1806,14 +1810,27 @@ _convert_from_str(PyObject *obj, int align) /* Python byte string characters are unsigned */ check_num = (unsigned char) type[0]; } - /* A kind + size like 'f8' */ + /* Possibly a kind + size like 'f8' but also could be 'bool' */ else { char *typeend = NULL; int kind; - /* Parse the integer, make sure it's the rest of the string */ - elsize = (int)strtol(type + 1, &typeend, 10); - if (typeend - type == len) { + /* Attempt to parse the integer, make sure it's the rest of the string */ + errno = 0; + long result = strtol(type + 1, &typeend, 10); + npy_bool some_parsing_happened = !(type == typeend); + npy_bool entire_string_consumed = *typeend == '\0'; + npy_bool parsing_succeeded = + (errno == 0) && some_parsing_happened && entire_string_consumed; + // make sure it doesn't overflow or go negative + if (result > INT_MAX || result < 0) { + goto fail; + } + + elsize = (int)result; + + + if (parsing_succeeded && typeend - type == len) { kind = type[0]; switch (kind) { @@ -1822,10 +1839,10 @@ _convert_from_str(PyObject *obj, int align) break; case NPY_DEPRECATED_STRINGLTR2: - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); + if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead.") < 0) { + return NULL; + } check_num = NPY_STRING; break; @@ -1859,6 +1876,9 @@ _convert_from_str(PyObject *obj, int align) } } } + else if (parsing_succeeded) { + goto fail; + } } if (PyErr_Occurred()) { @@ -1894,10 +1914,10 @@ _convert_from_str(PyObject *obj, int align) } if (strcmp(type, "a") == 0) { - DEPRECATE( - "Data type alias `a` was removed in NumPy 2.0. " - "Use `S` alias instead." - ); + if (DEPRECATE("Data type alias 'a' was deprecated in NumPy 2.0. " + "Use the 'S' alias instead.") < 0) { + return NULL; + } } /* @@ -2019,6 +2039,8 @@ arraydescr_dealloc(PyArray_Descr *self) { Py_XDECREF(self->typeobj); if (!PyDataType_ISLEGACY(self)) { + /* non legacy dtypes must not have fields, etc. */ + Py_TYPE(self)->tp_free((PyObject *)self); return; } _PyArray_LegacyDescr *lself = (_PyArray_LegacyDescr *)self; @@ -2696,7 +2718,7 @@ arraydescr_reduce(PyArray_Descr *self, PyObject *NPY_UNUSED(args)) Py_DECREF(ret); return NULL; } - obj = PyObject_GetAttrString(mod, "dtype"); + obj = PyObject_GetAttr(mod, npy_interned_str.dtype); Py_DECREF(mod); if (obj == NULL) { Py_DECREF(ret); diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index d26701df8c57..51cb454b3a66 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -8,9 +8,15 @@ #include "numpy/arrayobject.h" #include "npy_argparse.h" #include "npy_dlpack.h" +#include "npy_static_data.h" +#include "conversion_utils.h" + +/* + * Deleter for a NumPy exported dlpack DLManagedTensor(Versioned). + */ static void -array_dlpack_deleter(DLManagedTensor *self) +array_dlpack_deleter(DLManagedTensorVersioned *self) { /* * Leak the pyobj if not initialized. This can happen if we are running @@ -32,48 +38,86 @@ array_dlpack_deleter(DLManagedTensor *self) PyGILState_Release(state); } -/* This is exactly as mandated by dlpack */ -static void dlpack_capsule_deleter(PyObject *self) { - if (PyCapsule_IsValid(self, NPY_DLPACK_USED_CAPSULE_NAME)) { +/* TODO: Basically same as above until dlpack v0 is removed: */ +static void +array_dlpack_deleter_unversioned(DLManagedTensor *self) +{ + if (!Py_IsInitialized()) { return; } - /* an exception may be in-flight, we must save it in case we create another one */ - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); + PyGILState_STATE state = PyGILState_Ensure(); - DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); + PyArrayObject *array = (PyArrayObject *)self->manager_ctx; + PyMem_Free(self); + Py_XDECREF(array); + + PyGILState_Release(state); +} + + +/* + * Deleter for a DLPack capsule wrapping a DLManagedTensor(Versioed). + * + * This is exactly as mandated by dlpack + */ +static void +dlpack_capsule_deleter(PyObject *self) { + if (PyCapsule_IsValid(self, NPY_DLPACK_VERSIONED_USED_CAPSULE_NAME)) { + return; + } + + DLManagedTensorVersioned *managed = + (DLManagedTensorVersioned *)PyCapsule_GetPointer( + self, NPY_DLPACK_VERSIONED_CAPSULE_NAME); if (managed == NULL) { - PyErr_WriteUnraisable(self); - goto done; + PyErr_WriteUnraisable(NULL); + return; } /* - * the spec says the deleter can be NULL if there is no way for the caller + * The spec says the deleter can be NULL if there is no way for the caller * to provide a reasonable destructor. */ if (managed->deleter) { managed->deleter(managed); - /* TODO: is the deleter allowed to set a python exception? */ - assert(!PyErr_Occurred()); } - -done: - PyErr_Restore(type, value, traceback); } -/* used internally, almost identical to dlpack_capsule_deleter() */ -static void array_dlpack_internal_capsule_deleter(PyObject *self) -{ - /* an exception may be in-flight, we must save it in case we create another one */ - PyObject *type, *value, *traceback; - PyErr_Fetch(&type, &value, &traceback); +/* TODO: Basically same as above until dlpack v0 is removed: */ +static void +dlpack_capsule_deleter_unversioned(PyObject *self) { + if (PyCapsule_IsValid(self, NPY_DLPACK_USED_CAPSULE_NAME)) { + return; + } DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_INTERNAL_CAPSULE_NAME); + (DLManagedTensor *)PyCapsule_GetPointer(self, NPY_DLPACK_CAPSULE_NAME); if (managed == NULL) { - PyErr_WriteUnraisable(self); - goto done; + PyErr_WriteUnraisable(NULL); + return; + } + + if (managed->deleter) { + managed->deleter(managed); + } +} + + +/* + * Deleter for the capsule used as a `base` in `from_dlpack`. + * + * This is almost identical to the above used internally as the base for our array + * so that we can consume (rename) the original capsule. + */ +static void +array_dlpack_internal_capsule_deleter(PyObject *self) +{ + DLManagedTensorVersioned *managed = + (DLManagedTensorVersioned *)PyCapsule_GetPointer( + self, NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME); + if (managed == NULL) { + PyErr_WriteUnraisable(NULL); + return; } /* * the spec says the deleter can be NULL if there is no way for the caller @@ -84,9 +128,24 @@ static void array_dlpack_internal_capsule_deleter(PyObject *self) /* TODO: is the deleter allowed to set a python exception? */ assert(!PyErr_Occurred()); } +} + +/* TODO: Basically same as above until dlpack v0 is removed: */ +static void +array_dlpack_internal_capsule_deleter_unversioned(PyObject *self) +{ + DLManagedTensor *managed = + (DLManagedTensor *)PyCapsule_GetPointer( + self, NPY_DLPACK_INTERNAL_CAPSULE_NAME); + if (managed == NULL) { + PyErr_WriteUnraisable(NULL); + return; + } -done: - PyErr_Restore(type, value, traceback); + if (managed->deleter) { + managed->deleter(managed); + assert(!PyErr_Occurred()); + } } @@ -108,41 +167,33 @@ array_get_dl_device(PyArrayObject *self) { // The outer if is due to the fact that NumPy arrays are on the CPU // by default (if not created from DLPack). if (PyCapsule_IsValid(base, NPY_DLPACK_INTERNAL_CAPSULE_NAME)) { - DLManagedTensor *managed = PyCapsule_GetPointer( + DLManagedTensor *managed = (DLManagedTensor *)PyCapsule_GetPointer( base, NPY_DLPACK_INTERNAL_CAPSULE_NAME); if (managed == NULL) { return ret; } return managed->dl_tensor.device; } + else if (PyCapsule_IsValid(base, NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME)) { + DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)PyCapsule_GetPointer( + base, NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME); + if (managed == NULL) { + return ret; + } + return managed->dl_tensor.device; + } return ret; } -PyObject * -array_dlpack(PyArrayObject *self, - PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +/* + * Fill the dl_tensor struct from the `self` array. + * This struct could be versioned, but as of now is not. + */ +static int +fill_dl_tensor_information( + DLTensor *dl_tensor, PyArrayObject *self, DLDevice *result_device) { - PyObject *stream = Py_None; - NPY_PREPARE_ARGPARSER; - if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, - "$stream", NULL, &stream, NULL, NULL, NULL)) { - return NULL; - } - - if (stream != Py_None) { - PyErr_SetString(PyExc_RuntimeError, - "NumPy only supports stream=None."); - return NULL; - } - - if ( !(PyArray_FLAGS(self) & NPY_ARRAY_WRITEABLE)) { - PyErr_SetString(PyExc_BufferError, - "Cannot export readonly array since signalling readonly " - "is unsupported by DLPack."); - return NULL; - } - npy_intp itemsize = PyArray_ITEMSIZE(self); int ndim = PyArray_NDIM(self); npy_intp *strides = PyArray_STRIDES(self); @@ -154,7 +205,7 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports strides which are a multiple of " "itemsize."); - return NULL; + return -1; } } } @@ -165,7 +216,7 @@ array_dlpack(PyArrayObject *self, if (PyDataType_ISBYTESWAPPED(dtype)) { PyErr_SetString(PyExc_BufferError, "DLPack only supports native byte order."); - return NULL; + return -1; } managed_dtype.bits = 8 * itemsize; @@ -187,7 +238,7 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports IEEE floating point types " "without padding (longdouble typically is not IEEE)."); - return NULL; + return -1; } managed_dtype.code = kDLFloat; } @@ -198,7 +249,7 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports IEEE floating point types " "without padding (longdouble typically is not IEEE)."); - return NULL; + return -1; } managed_dtype.code = kDLComplex; } @@ -206,25 +257,9 @@ array_dlpack(PyArrayObject *self, PyErr_SetString(PyExc_BufferError, "DLPack only supports signed/unsigned integers, float " "and complex dtypes."); - return NULL; + return -1; } - DLDevice device = array_get_dl_device(self); - if (PyErr_Occurred()) { - return NULL; - } - - // ensure alignment - int offset = sizeof(DLManagedTensor) % sizeof(void *); - void *ptr = PyMem_Malloc(sizeof(DLManagedTensor) + offset + - (sizeof(int64_t) * ndim * 2)); - if (ptr == NULL) { - PyErr_NoMemory(); - return NULL; - } - - DLManagedTensor *managed = ptr; - /* * Note: the `dlpack.h` header suggests/standardizes that `data` must be * 256-byte aligned. We ignore this intentionally, because `__dlpack__` @@ -238,34 +273,94 @@ array_dlpack(PyArrayObject *self, * that NumPy MUST use `byte_offset` to adhere to the standard (as * specified in the header)! */ - managed->dl_tensor.data = PyArray_DATA(self); - managed->dl_tensor.byte_offset = 0; - managed->dl_tensor.device = device; - managed->dl_tensor.dtype = managed_dtype; - - int64_t *managed_shape_strides = (int64_t *)((char *)ptr + - sizeof(DLManagedTensor) + offset); + dl_tensor->data = PyArray_DATA(self); + dl_tensor->byte_offset = 0; + dl_tensor->device = *result_device; + dl_tensor->dtype = managed_dtype; - int64_t *managed_shape = managed_shape_strides; - int64_t *managed_strides = managed_shape_strides + ndim; for (int i = 0; i < ndim; ++i) { - managed_shape[i] = shape[i]; + dl_tensor->shape[i] = shape[i]; // Strides in DLPack are items; in NumPy are bytes. - managed_strides[i] = strides[i] / itemsize; + dl_tensor->strides[i] = strides[i] / itemsize; + } + + dl_tensor->ndim = ndim; + if (PyArray_IS_C_CONTIGUOUS(self)) { + /* No need to pass strides, so just NULL it again */ + dl_tensor->strides = NULL; + } + dl_tensor->byte_offset = 0; + + return 0; +} + + +static PyObject * +create_dlpack_capsule( + PyArrayObject *self, int versioned, DLDevice *result_device, int copied) +{ + int ndim = PyArray_NDIM(self); + + /* + * We align shape and strides at the end but need to align them, offset + * gives the offset of the shape (and strides) including the struct size. + */ + size_t align = sizeof(int64_t); + size_t struct_size = ( + versioned ? sizeof(DLManagedTensorVersioned) : sizeof(DLManagedTensor)); + + size_t offset = (struct_size + align - 1) / align * align; + void *ptr = PyMem_Malloc(offset + (sizeof(int64_t) * ndim * 2)); + if (ptr == NULL) { + PyErr_NoMemory(); + return NULL; + } + + DLTensor *dl_tensor; + PyCapsule_Destructor capsule_deleter; + const char *capsule_name; + + if (versioned) { + DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)ptr; + capsule_name = NPY_DLPACK_VERSIONED_CAPSULE_NAME; + capsule_deleter = (PyCapsule_Destructor)dlpack_capsule_deleter; + managed->deleter = array_dlpack_deleter; + managed->manager_ctx = self; + + dl_tensor = &managed->dl_tensor; + + /* The versioned tensor has additional fields that we need to set */ + managed->version.major = 1; + managed->version.minor = 0; + + managed->flags = 0; + if (!PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) { + managed->flags |= DLPACK_FLAG_BITMASK_READ_ONLY; + } + if (copied) { + managed->flags |= DLPACK_FLAG_BITMASK_IS_COPIED; + } } + else { + DLManagedTensor *managed = (DLManagedTensor *)ptr; + capsule_name = NPY_DLPACK_CAPSULE_NAME; + capsule_deleter = (PyCapsule_Destructor)dlpack_capsule_deleter_unversioned; + managed->deleter = array_dlpack_deleter_unversioned; + managed->manager_ctx = self; - managed->dl_tensor.ndim = ndim; - managed->dl_tensor.shape = managed_shape; - managed->dl_tensor.strides = NULL; - if (PyArray_SIZE(self) != 1 && !PyArray_IS_C_CONTIGUOUS(self)) { - managed->dl_tensor.strides = managed_strides; + dl_tensor = &managed->dl_tensor; } - managed->dl_tensor.byte_offset = 0; - managed->manager_ctx = self; - managed->deleter = array_dlpack_deleter; - PyObject *capsule = PyCapsule_New(managed, NPY_DLPACK_CAPSULE_NAME, - dlpack_capsule_deleter); + dl_tensor->shape = (int64_t *)((char *)ptr + offset); + /* Note that strides may be set to NULL later if C-contiguous */ + dl_tensor->strides = dl_tensor->shape + ndim; + + if (fill_dl_tensor_information(dl_tensor, self, result_device) < 0) { + PyMem_Free(ptr); + return NULL; + } + + PyObject *capsule = PyCapsule_New(ptr, capsule_name, capsule_deleter); if (capsule == NULL) { PyMem_Free(ptr); return NULL; @@ -273,10 +368,119 @@ array_dlpack(PyArrayObject *self, // the capsule holds a reference Py_INCREF(self); + return capsule; } -PyObject * + +static int +device_converter(PyObject *obj, DLDevice *result_device) +{ + int type, id; + if (obj == Py_None) { + return NPY_SUCCEED; + } + if (!PyTuple_Check(obj)) { + PyErr_SetString(PyExc_TypeError, "dl_device must be a tuple"); + return NPY_FAIL; + } + if (!PyArg_ParseTuple(obj, "ii", &type, &id)) { + return NPY_FAIL; + } + /* We can honor the request if matches the existing one or is CPU */ + if (type == result_device->device_type && id == result_device->device_id) { + return NPY_SUCCEED; + } + if (type == kDLCPU && id == 0) { + result_device->device_type = type; + result_device->device_id = id; + return NPY_SUCCEED; + } + + PyErr_SetString(PyExc_ValueError, "unsupported device requested"); + return NPY_FAIL; +} + + +NPY_NO_EXPORT PyObject * +array_dlpack(PyArrayObject *self, + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *stream = Py_None; + PyObject *max_version = Py_None; + NPY_COPYMODE copy_mode = NPY_COPY_IF_NEEDED; + long major_version = 0; + /* We allow the user to request a result device in principle. */ + DLDevice result_device = array_get_dl_device(self); + if (PyErr_Occurred()) { + return NULL; + } + + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("__dlpack__", args, len_args, kwnames, + "$stream", NULL, &stream, + "$max_version", NULL, &max_version, + "$dl_device", &device_converter, &result_device, + "$copy", &PyArray_CopyConverter, ©_mode, + NULL, NULL, NULL)) { + return NULL; + } + + if (max_version != Py_None) { + if (!PyTuple_Check(max_version) || PyTuple_GET_SIZE(max_version) != 2) { + PyErr_SetString(PyExc_TypeError, + "max_version must be None or a tuple with two elements."); + return NULL; + } + major_version = PyLong_AsLong(PyTuple_GET_ITEM(max_version, 0)); + if (major_version == -1 && PyErr_Occurred()) { + return NULL; + } + } + + if (stream != Py_None) { + PyErr_SetString(PyExc_RuntimeError, + "NumPy only supports stream=None."); + return NULL; + } + + /* If the user requested a copy be made, honor that here already */ + if (copy_mode == NPY_COPY_ALWAYS) { + /* TODO: It may be good to check ability to export dtype first. */ + self = (PyArrayObject *)PyArray_NewCopy(self, NPY_KEEPORDER); + if (self == NULL) { + return NULL; + } + } + else { + Py_INCREF(self); + } + + if (major_version < 1 && !(PyArray_FLAGS(self) & NPY_ARRAY_WRITEABLE)) { + PyErr_SetString(PyExc_BufferError, + "Cannot export readonly array since signalling readonly " + "is unsupported by DLPack (supported by newer DLPack version)."); + Py_DECREF(self); + return NULL; + } + + /* + * TODO: The versioned and non-versioned structs of DLPack are very + * similar but not ABI compatible so that the function called here requires + * branching (templating didn't seem worthwhile). + * + * Version 0 support should be deprecated in NumPy 2.1 and the branches + * can then be removed again. + */ + PyObject *res = create_dlpack_capsule( + self, major_version >= 1, &result_device, + copy_mode == NPY_COPY_ALWAYS); + Py_DECREF(self); + + return res; +} + +NPY_NO_EXPORT PyObject * array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)) { DLDevice device = array_get_dl_device(self); @@ -287,23 +491,120 @@ array_dlpack_device(PyArrayObject *self, PyObject *NPY_UNUSED(args)) } NPY_NO_EXPORT PyObject * -from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { - PyObject *capsule = PyObject_CallMethod((PyObject *)obj->ob_type, - "__dlpack__", "O", obj); - if (capsule == NULL) { +from_dlpack(PyObject *NPY_UNUSED(self), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) +{ + PyObject *obj, *copy = Py_None, *device = Py_None; + NPY_PREPARE_ARGPARSER; + if (npy_parse_arguments("from_dlpack", args, len_args, kwnames, + "obj", NULL, &obj, + "$copy", NULL, ©, + "$device", NULL, &device, + NULL, NULL, NULL) < 0) { return NULL; } - DLManagedTensor *managed = - (DLManagedTensor *)PyCapsule_GetPointer(capsule, - NPY_DLPACK_CAPSULE_NAME); + /* Prepare the arguments to call objects __dlpack__() method */ + static PyObject *call_kwnames = NULL; + static PyObject *dl_cpu_device_tuple = NULL; + static PyObject *max_version = NULL; - if (managed == NULL) { - Py_DECREF(capsule); - return NULL; + if (call_kwnames == NULL) { + call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + if (call_kwnames == NULL) { + return NULL; + } + } + if (dl_cpu_device_tuple == NULL) { + dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (dl_cpu_device_tuple == NULL) { + return NULL; + } + } + if (max_version == NULL) { + max_version = Py_BuildValue("(i,i)", 1, 0); + if (max_version == NULL) { + return NULL; + } } - const int ndim = managed->dl_tensor.ndim; + /* + * Prepare arguments for the full call. We always forward copy and pass + * our max_version. `device` is always passed as `None`, but if the user + * provided a device, we will replace it with the "cpu": (1, 0). + */ + PyObject *call_args[] = {obj, Py_None, copy, max_version}; + Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; + + /* If device is passed it must be "cpu" and replace it with (1, 0) */ + if (device != Py_None) { + /* test that device is actually CPU */ + NPY_DEVICE device_request = NPY_DEVICE_CPU; + if (!PyArray_DeviceConverterOptional(device, &device_request)) { + return NULL; + } + assert(device_request == NPY_DEVICE_CPU); + call_args[1] = dl_cpu_device_tuple; + } + + + PyObject *capsule = PyObject_VectorcallMethod( + npy_interned_str.__dlpack__, call_args, nargsf, call_kwnames); + if (capsule == NULL) { + /* + * TODO: This path should be deprecated in NumPy 2.1. Once deprecated + * the below code can be simplified w.r.t. to versioned/unversioned. + * + * We try without any arguments if both device and copy are None, + * since the exporter may not support older versions of the protocol. + */ + if (PyErr_ExceptionMatches(PyExc_TypeError) + && device == Py_None && copy == Py_None) { + /* max_version may be unsupported, try without kwargs */ + PyErr_Clear(); + capsule = PyObject_VectorcallMethod( + npy_interned_str.__dlpack__, call_args, nargsf, NULL); + } + if (capsule == NULL) { + return NULL; + } + } + + void *managed_ptr; + DLTensor dl_tensor; + int readonly; + int versioned = PyCapsule_IsValid(capsule, NPY_DLPACK_VERSIONED_CAPSULE_NAME); + if (versioned) { + managed_ptr = PyCapsule_GetPointer(capsule, NPY_DLPACK_VERSIONED_CAPSULE_NAME); + DLManagedTensorVersioned *managed = (DLManagedTensorVersioned *)managed_ptr; + if (managed == NULL) { + Py_DECREF(capsule); + return NULL; + } + + if (managed->version.major > 1) { + PyErr_SetString(PyExc_BufferError, + "from_dlpack(): the exported DLPack major version is too " + "high to be imported by this version of NumPy."); + Py_DECREF(capsule); + return NULL; + } + + dl_tensor = managed->dl_tensor; + readonly = (managed->flags & DLPACK_FLAG_BITMASK_READ_ONLY) != 0; + } + else { + managed_ptr = PyCapsule_GetPointer(capsule, NPY_DLPACK_CAPSULE_NAME); + DLManagedTensor *managed = (DLManagedTensor *)managed_ptr; + if (managed == NULL) { + Py_DECREF(capsule); + return NULL; + } + dl_tensor = managed->dl_tensor; + readonly = 0; + } + + const int ndim = dl_tensor.ndim; if (ndim > NPY_MAXDIMS) { PyErr_SetString(PyExc_RuntimeError, "maxdims of DLPack tensor is higher than the supported " @@ -312,7 +613,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { return NULL; } - DLDeviceType device_type = managed->dl_tensor.device.device_type; + DLDeviceType device_type = dl_tensor.device.device_type; if (device_type != kDLCPU && device_type != kDLCUDAHost && device_type != kDLROCMHost && @@ -323,7 +624,7 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { return NULL; } - if (managed->dl_tensor.dtype.lanes != 1) { + if (dl_tensor.dtype.lanes != 1) { PyErr_SetString(PyExc_RuntimeError, "Unsupported lanes in DLTensor dtype."); Py_DECREF(capsule); @@ -331,9 +632,9 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { } int typenum = -1; - const uint8_t bits = managed->dl_tensor.dtype.bits; + const uint8_t bits = dl_tensor.dtype.bits; const npy_intp itemsize = bits / 8; - switch (managed->dl_tensor.dtype.code) { + switch (dl_tensor.dtype.code) { case kDLBool: if (bits == 8) { typenum = NPY_BOOL; @@ -385,15 +686,14 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { npy_intp strides[NPY_MAXDIMS]; for (int i = 0; i < ndim; ++i) { - shape[i] = managed->dl_tensor.shape[i]; + shape[i] = dl_tensor.shape[i]; // DLPack has elements as stride units, NumPy has bytes. - if (managed->dl_tensor.strides != NULL) { - strides[i] = managed->dl_tensor.strides[i] * itemsize; + if (dl_tensor.strides != NULL) { + strides[i] = dl_tensor.strides[i] * itemsize; } } - char *data = (char *)managed->dl_tensor.data + - managed->dl_tensor.byte_offset; + char *data = (char *)dl_tensor.data + dl_tensor.byte_offset; PyArray_Descr *descr = PyArray_DescrFromType(typenum); if (descr == NULL) { @@ -402,15 +702,27 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { } PyObject *ret = PyArray_NewFromDescr(&PyArray_Type, descr, ndim, shape, - managed->dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); + dl_tensor.strides != NULL ? strides : NULL, data, 0, NULL); if (ret == NULL) { Py_DECREF(capsule); return NULL; } + if (readonly) { + PyArray_CLEARFLAGS((PyArrayObject *)ret, NPY_ARRAY_WRITEABLE); + } - PyObject *new_capsule = PyCapsule_New(managed, + PyObject *new_capsule; + if (versioned) { + new_capsule = PyCapsule_New(managed_ptr, + NPY_DLPACK_VERSIONED_INTERNAL_CAPSULE_NAME, + (PyCapsule_Destructor)array_dlpack_internal_capsule_deleter); + } + else { + new_capsule = PyCapsule_New(managed_ptr, NPY_DLPACK_INTERNAL_CAPSULE_NAME, - array_dlpack_internal_capsule_deleter); + (PyCapsule_Destructor)array_dlpack_internal_capsule_deleter_unversioned); + } + if (new_capsule == NULL) { Py_DECREF(capsule); Py_DECREF(ret); @@ -423,7 +735,10 @@ from_dlpack(PyObject *NPY_UNUSED(self), PyObject *obj) { return NULL; } - if (PyCapsule_SetName(capsule, NPY_DLPACK_USED_CAPSULE_NAME) < 0) { + const char *new_name = ( + versioned ? NPY_DLPACK_VERSIONED_USED_CAPSULE_NAME + : NPY_DLPACK_USED_CAPSULE_NAME); + if (PyCapsule_SetName(capsule, new_name) < 0) { Py_DECREF(capsule); Py_DECREF(ret); return NULL; diff --git a/numpy/_core/src/multiarray/dragon4.c b/numpy/_core/src/multiarray/dragon4.c index 480b78bdbb32..7cd8afbed6d8 100644 --- a/numpy/_core/src/multiarray/dragon4.c +++ b/numpy/_core/src/multiarray/dragon4.c @@ -163,28 +163,7 @@ typedef struct { char repr[16384]; } Dragon4_Scratch; -static int _bigint_static_in_use = 0; -static Dragon4_Scratch _bigint_static; - -static Dragon4_Scratch* -get_dragon4_bigint_scratch(void) { - /* this test+set is not threadsafe, but no matter because we have GIL */ - if (_bigint_static_in_use) { - PyErr_SetString(PyExc_RuntimeError, - "numpy float printing code is not re-entrant. " - "Ping the devs to fix it."); - return NULL; - } - _bigint_static_in_use = 1; - - /* in this dummy implementation we only return the static allocation */ - return &_bigint_static; -} - -static void -free_dragon4_bigint_scratch(Dragon4_Scratch *mem){ - _bigint_static_in_use = 0; -} +static NPY_TLS Dragon4_Scratch _bigint_static; /* Copy integer */ static void @@ -2210,11 +2189,11 @@ Format_floatbits(char *buffer, npy_uint32 bufferSize, BigInt *mantissa, */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary16( - Dragon4_Scratch *scratch, npy_half *value, Dragon4_Options *opt) + npy_half *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint16 val = *value; npy_uint32 floatExponent, floatMantissa, floatSign; @@ -2297,12 +2276,12 @@ Dragon4_PrintFloat_IEEE_binary16( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary32( - Dragon4_Scratch *scratch, npy_float32 *value, + npy_float32 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; union { @@ -2390,11 +2369,11 @@ Dragon4_PrintFloat_IEEE_binary32( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary64( - Dragon4_Scratch *scratch, npy_float64 *value, Dragon4_Options *opt) + npy_float64 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; union { @@ -2505,11 +2484,11 @@ typedef struct FloatVal128 { */ static npy_uint32 Dragon4_PrintFloat_Intel_extended( - Dragon4_Scratch *scratch, FloatVal128 value, Dragon4_Options *opt) + FloatVal128 value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint32 floatExponent, floatSign; npy_uint64 floatMantissa; @@ -2603,7 +2582,7 @@ Dragon4_PrintFloat_Intel_extended( */ static npy_uint32 Dragon4_PrintFloat_Intel_extended80( - Dragon4_Scratch *scratch, npy_float80 *value, Dragon4_Options *opt) + npy_float80 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2619,7 +2598,7 @@ Dragon4_PrintFloat_Intel_extended80( val128.lo = buf80.integer.a; val128.hi = buf80.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_10_BYTES_LE */ @@ -2627,7 +2606,7 @@ Dragon4_PrintFloat_Intel_extended80( /* Intel's 80-bit IEEE extended precision format, 96-bit storage */ static npy_uint32 Dragon4_PrintFloat_Intel_extended96( - Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt) + npy_float96 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2643,7 +2622,7 @@ Dragon4_PrintFloat_Intel_extended96( val128.lo = buf96.integer.a; val128.hi = buf96.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE */ @@ -2651,7 +2630,7 @@ Dragon4_PrintFloat_Intel_extended96( /* Motorola Big-endian equivalent of the Intel-extended 96 fp format */ static npy_uint32 Dragon4_PrintFloat_Motorola_extended96( - Dragon4_Scratch *scratch, npy_float96 *value, Dragon4_Options *opt) + npy_float96 *value, Dragon4_Options *opt) { FloatVal128 val128; union { @@ -2668,7 +2647,7 @@ Dragon4_PrintFloat_Motorola_extended96( val128.hi = buf96.integer.a >> 16; /* once again we assume the int has same endianness as the float */ - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE */ @@ -2688,7 +2667,7 @@ typedef union FloatUnion128 /* Intel's 80-bit IEEE extended precision format, 128-bit storage */ static npy_uint32 Dragon4_PrintFloat_Intel_extended128( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2698,7 +2677,7 @@ Dragon4_PrintFloat_Intel_extended128( val128.lo = buf128.integer.a; val128.hi = buf128.integer.b; - return Dragon4_PrintFloat_Intel_extended(scratch, val128, opt); + return Dragon4_PrintFloat_Intel_extended(val128, opt); } #endif /* HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE */ @@ -2717,11 +2696,11 @@ Dragon4_PrintFloat_Intel_extended128( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary128( - Dragon4_Scratch *scratch, FloatVal128 val128, Dragon4_Options *opt) + FloatVal128 val128, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; npy_uint32 floatExponent, floatSign; @@ -2802,7 +2781,7 @@ Dragon4_PrintFloat_IEEE_binary128( #if defined(HAVE_LDOUBLE_IEEE_QUAD_LE) static npy_uint32 Dragon4_PrintFloat_IEEE_binary128_le( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2811,7 +2790,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( val128.lo = buf128.integer.a; val128.hi = buf128.integer.b; - return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt); + return Dragon4_PrintFloat_IEEE_binary128(val128, opt); } #endif /* HAVE_LDOUBLE_IEEE_QUAD_LE */ @@ -2822,7 +2801,7 @@ Dragon4_PrintFloat_IEEE_binary128_le( */ static npy_uint32 Dragon4_PrintFloat_IEEE_binary128_be( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { FloatVal128 val128; FloatUnion128 buf128; @@ -2831,7 +2810,7 @@ Dragon4_PrintFloat_IEEE_binary128_be( val128.lo = buf128.integer.b; val128.hi = buf128.integer.a; - return Dragon4_PrintFloat_IEEE_binary128(scratch, val128, opt); + return Dragon4_PrintFloat_IEEE_binary128(val128, opt); } #endif /* HAVE_LDOUBLE_IEEE_QUAD_BE */ @@ -2877,11 +2856,11 @@ Dragon4_PrintFloat_IEEE_binary128_be( */ static npy_uint32 Dragon4_PrintFloat_IBM_double_double( - Dragon4_Scratch *scratch, npy_float128 *value, Dragon4_Options *opt) + npy_float128 *value, Dragon4_Options *opt) { - char *buffer = scratch->repr; - const npy_uint32 bufferSize = sizeof(scratch->repr); - BigInt *bigints = scratch->bigints; + char *buffer = _bigint_static.repr; + const npy_uint32 bufferSize = sizeof(_bigint_static.repr); + BigInt *bigints = _bigint_static.bigints; FloatVal128 val128; FloatUnion128 buf128; @@ -3068,16 +3047,10 @@ PyObject *\ Dragon4_Positional_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ {\ PyObject *ret;\ - Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\ - if (scratch == NULL) {\ - return NULL;\ - }\ - if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\ - free_dragon4_bigint_scratch(scratch);\ + if (Dragon4_PrintFloat_##format(val, opt) < 0) {\ return NULL;\ }\ - ret = PyUnicode_FromString(scratch->repr);\ - free_dragon4_bigint_scratch(scratch);\ + ret = PyUnicode_FromString(_bigint_static.repr);\ return ret;\ }\ \ @@ -3106,16 +3079,10 @@ PyObject *\ Dragon4_Scientific_##Type##_opt(npy_type *val, Dragon4_Options *opt)\ {\ PyObject *ret;\ - Dragon4_Scratch *scratch = get_dragon4_bigint_scratch();\ - if (scratch == NULL) {\ - return NULL;\ - }\ - if (Dragon4_PrintFloat_##format(scratch, val, opt) < 0) {\ - free_dragon4_bigint_scratch(scratch);\ + if (Dragon4_PrintFloat_##format(val, opt) < 0) { \ return NULL;\ }\ - ret = PyUnicode_FromString(scratch->repr);\ - free_dragon4_bigint_scratch(scratch);\ + ret = PyUnicode_FromString(_bigint_static.repr);\ return ret;\ }\ PyObject *\ diff --git a/numpy/_core/src/multiarray/dragon4.h b/numpy/_core/src/multiarray/dragon4.h index 0e29c42e3c09..8986c1672e71 100644 --- a/numpy/_core/src/multiarray/dragon4.h +++ b/numpy/_core/src/multiarray/dragon4.h @@ -38,7 +38,7 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "numpy/arrayscalars.h" /* Half binary format */ diff --git a/numpy/_core/src/multiarray/dtype_transfer.c b/numpy/_core/src/multiarray/dtype_transfer.c index 513b2e6be478..d7a5e80800b6 100644 --- a/numpy/_core/src/multiarray/dtype_transfer.c +++ b/numpy/_core/src/multiarray/dtype_transfer.c @@ -21,7 +21,7 @@ #include "numpy/npy_math.h" #include "lowlevel_strided_loops.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "ctors.h" diff --git a/numpy/_core/src/multiarray/dtype_traversal.c b/numpy/_core/src/multiarray/dtype_traversal.c index 0402ad2c084d..91b1889b7d1f 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.c +++ b/numpy/_core/src/multiarray/dtype_traversal.c @@ -32,7 +32,7 @@ typedef int get_traverse_func_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags); @@ -42,7 +42,7 @@ typedef int get_traverse_func_function( static int get_clear_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *clear_info, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -99,7 +99,7 @@ PyArray_GetClearFunction( static int get_zerofill_function( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp stride, NPY_traverse_info *zerofill_info, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -136,7 +136,7 @@ get_zerofill_function( static int clear_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { @@ -156,7 +156,7 @@ clear_object_strided_loop( NPY_NO_EXPORT int npy_get_clear_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) @@ -171,7 +171,7 @@ npy_get_clear_object_strided_loop( static int fill_zero_object_strided_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { @@ -188,7 +188,7 @@ fill_zero_object_strided_loop( NPY_NO_EXPORT int npy_object_get_fill_zero_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *NPY_UNUSED(descr), + const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, @@ -275,7 +275,7 @@ fields_traverse_data_clone(NpyAuxData *data) static int traverse_fields_function( - void *traverse_context, _PyArray_LegacyDescr *NPY_UNUSED(descr), + void *traverse_context, const _PyArray_LegacyDescr *NPY_UNUSED(descr), char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { @@ -315,7 +315,7 @@ traverse_fields_function( static int get_fields_traverse_function( - void *traverse_context, _PyArray_LegacyDescr *dtype, int NPY_UNUSED(aligned), + void *traverse_context, const _PyArray_LegacyDescr *dtype, int NPY_UNUSED(aligned), npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags, get_traverse_func_function *get_traverse_func) @@ -398,13 +398,6 @@ subarray_traverse_data_free(NpyAuxData *data) } -/* - * We seem to be neither using nor exposing this right now, so leave it NULL. - * (The implementation below should be functional.) - */ -#define subarray_traverse_data_clone NULL - -#ifndef subarray_traverse_data_clone /* traverse data copy function */ static NpyAuxData * subarray_traverse_data_clone(NpyAuxData *data) @@ -426,19 +419,18 @@ subarray_traverse_data_clone(NpyAuxData *data) return (NpyAuxData *)newdata; } -#endif static int traverse_subarray_func( - void *traverse_context, PyArray_Descr *NPY_UNUSED(descr), + void *traverse_context, const PyArray_Descr *NPY_UNUSED(descr), char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { subarray_traverse_data *subarr_data = (subarray_traverse_data *)auxdata; PyArrayMethod_TraverseLoop *func = subarr_data->info.func; - PyArray_Descr *sub_descr = subarr_data->info.descr; + const PyArray_Descr *sub_descr = subarr_data->info.descr; npy_intp sub_N = subarr_data->count; NpyAuxData *sub_auxdata = subarr_data->info.auxdata; npy_intp sub_stride = sub_descr->elsize; @@ -456,7 +448,7 @@ traverse_subarray_func( static int get_subarray_traverse_func( - void *traverse_context, PyArray_Descr *dtype, int aligned, + void *traverse_context, const PyArray_Descr *dtype, int aligned, npy_intp size, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags, get_traverse_func_function *get_traverse_func) @@ -469,7 +461,7 @@ get_subarray_traverse_func( auxdata->count = size; auxdata->base.free = &subarray_traverse_data_free; - auxdata->base.clone = subarray_traverse_data_clone; + auxdata->base.clone = &subarray_traverse_data_clone; if (get_traverse_func( traverse_context, dtype, aligned, @@ -493,7 +485,7 @@ get_subarray_traverse_func( static int clear_no_op( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), char *NPY_UNUSED(data), npy_intp NPY_UNUSED(size), npy_intp NPY_UNUSED(stride), NpyAuxData *NPY_UNUSED(auxdata)) { @@ -502,7 +494,7 @@ clear_no_op( NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) { @@ -569,7 +561,7 @@ npy_get_clear_void_and_legacy_user_dtype_loop( static int zerofill_fields_function( - void *traverse_context, _PyArray_LegacyDescr *descr, + void *traverse_context, const _PyArray_LegacyDescr *descr, char *data, npy_intp N, npy_intp stride, NpyAuxData *auxdata) { @@ -598,7 +590,7 @@ zerofill_fields_function( */ NPY_NO_EXPORT int npy_get_zerofill_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags) { diff --git a/numpy/_core/src/multiarray/dtype_traversal.h b/numpy/_core/src/multiarray/dtype_traversal.h index bd3918ba4b65..5e915ba4d40e 100644 --- a/numpy/_core/src/multiarray/dtype_traversal.h +++ b/numpy/_core/src/multiarray/dtype_traversal.h @@ -7,14 +7,14 @@ NPY_NO_EXPORT int npy_get_clear_object_strided_loop( - void *traverse_context, PyArray_Descr *descr, int aligned, + void *traverse_context, const PyArray_Descr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); NPY_NO_EXPORT int npy_get_clear_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *descr, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *descr, int aligned, npy_intp fixed_stride, PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_traversedata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -23,14 +23,14 @@ npy_get_clear_void_and_legacy_user_dtype_loop( NPY_NO_EXPORT int npy_object_get_fill_zero_loop( - void *NPY_UNUSED(traverse_context), PyArray_Descr *NPY_UNUSED(descr), + void *NPY_UNUSED(traverse_context), const PyArray_Descr *NPY_UNUSED(descr), int NPY_UNUSED(aligned), npy_intp NPY_UNUSED(fixed_stride), PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **NPY_UNUSED(out_auxdata), NPY_ARRAYMETHOD_FLAGS *flags); NPY_NO_EXPORT int npy_get_zerofill_void_and_legacy_user_dtype_loop( - void *traverse_context, _PyArray_LegacyDescr *dtype, int aligned, + void *traverse_context, const _PyArray_LegacyDescr *dtype, int aligned, npy_intp stride, PyArrayMethod_TraverseLoop **out_func, NpyAuxData **out_auxdata, NPY_ARRAYMETHOD_FLAGS *flags); @@ -40,7 +40,7 @@ npy_get_zerofill_void_and_legacy_user_dtype_loop( typedef struct { PyArrayMethod_TraverseLoop *func; NpyAuxData *auxdata; - PyArray_Descr *descr; + const PyArray_Descr *descr; } NPY_traverse_info; @@ -69,18 +69,22 @@ static inline int NPY_traverse_info_copy( NPY_traverse_info *traverse_info, NPY_traverse_info *original) { - traverse_info->func = NULL; + /* Note that original may be identical to traverse_info! */ if (original->func == NULL) { /* Allow copying also of unused clear info */ + traverse_info->func = NULL; return 0; } - traverse_info->auxdata = NULL; if (original->auxdata != NULL) { traverse_info->auxdata = NPY_AUXDATA_CLONE(original->auxdata); if (traverse_info->auxdata == NULL) { + traverse_info->func = NULL; return -1; } } + else { + traverse_info->auxdata = NULL; + } Py_INCREF(original->descr); traverse_info->descr = original->descr; traverse_info->func = original->func; diff --git a/numpy/_core/src/multiarray/dtypemeta.c b/numpy/_core/src/multiarray/dtypemeta.c index 626b3bde1032..316a61d31da4 100644 --- a/numpy/_core/src/multiarray/dtypemeta.c +++ b/numpy/_core/src/multiarray/dtypemeta.c @@ -9,7 +9,7 @@ #include #include #include -#include "npy_pycompat.h" + #include "npy_import.h" #include "abstractdtypes.h" @@ -26,6 +26,8 @@ #include "templ_common.h" #include "refcount.h" #include "dtype_traversal.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" #include @@ -157,9 +159,8 @@ PyArray_ArrFuncs default_funcs = { /* * Internal version of PyArrayInitDTypeMeta_FromSpec. * - * See the documentation of that function for more details. Does not do any - * error checking. - + * See the documentation of that function for more details. + * * Setting priv to a nonzero value indicates that a dtypemeta is being * initialized from inside NumPy, otherwise this function is being called by * the public implementation. @@ -444,12 +445,6 @@ string_unicode_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) return NULL; } - PyArray_Descr *res = PyArray_DescrNewFromType(self->type_num); - - if (res == NULL) { - return NULL; - } - if (self->type_num == NPY_UNICODE) { // unicode strings are 4 bytes per character if (npy_mul_sizes_with_overflow(&size, size, 4)) { @@ -466,6 +461,12 @@ string_unicode_new(PyArray_DTypeMeta *self, PyObject *args, PyObject *kwargs) return NULL; } + PyArray_Descr *res = PyArray_DescrNewFromType(self->type_num); + + if (res == NULL) { + return NULL; + } + res->elsize = (int)size; return (PyObject *)res; } @@ -752,7 +753,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->subarray == NULL && descr1->names == NULL && descr2->subarray == NULL && descr2->names == NULL) { if (descr1->elsize != descr2->elsize) { - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "Invalid type promotion with void datatypes of different " "lengths. Use the `np.bytes_` datatype instead to pad the " "shorter value with trailing zero bytes."); @@ -764,13 +765,13 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) if (descr1->names != NULL && descr2->names != NULL) { /* If both have fields promoting individual fields may be possible */ - static PyObject *promote_fields_func = NULL; - npy_cache_import("numpy._core._internal", "_promote_fields", - &promote_fields_func); - if (promote_fields_func == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_promote_fields", + &npy_runtime_imports._promote_fields) == -1) { return NULL; } - PyObject *result = PyObject_CallFunctionObjArgs(promote_fields_func, + PyObject *result = PyObject_CallFunctionObjArgs( + npy_runtime_imports._promote_fields, descr1, descr2, NULL); if (result == NULL) { return NULL; @@ -791,7 +792,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return NULL; } if (!cmp) { - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "invalid type promotion with subarray datatypes " "(shape mismatch)."); return NULL; @@ -821,7 +822,7 @@ void_common_instance(_PyArray_LegacyDescr *descr1, _PyArray_LegacyDescr *descr2) return new_descr; } - PyErr_SetString(npy_DTypePromotionError, + PyErr_SetString(npy_static_pydata.DTypePromotionError, "invalid type promotion with structured datatype(s)."); return NULL; } @@ -838,22 +839,13 @@ python_builtins_are_known_scalar_types( * This is necessary only for python scalar classes which we discover * as valid DTypes. */ - if (pytype == &PyFloat_Type) { - return 1; - } - if (pytype == &PyLong_Type) { - return 1; - } - if (pytype == &PyBool_Type) { - return 1; - } - if (pytype == &PyComplex_Type) { - return 1; - } - if (pytype == &PyUnicode_Type) { - return 1; - } - if (pytype == &PyBytes_Type) { + if (pytype == &PyFloat_Type || + pytype == &PyLong_Type || + pytype == &PyBool_Type || + pytype == &PyComplex_Type || + pytype == &PyUnicode_Type || + pytype == &PyBytes_Type) + { return 1; } return 0; @@ -920,12 +912,15 @@ static PyArray_DTypeMeta * default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) { assert(cls->type_num < NPY_NTYPES_LEGACY); - if (NPY_UNLIKELY(NPY_DT_is_abstract(other))) { + if (NPY_UNLIKELY(!NPY_DT_is_legacy(other))) { /* - * The abstract complex has a lower priority than the concrete inexact - * types to ensure the correct promotion with integers. + * Deal with the non-legacy types we understand: python scalars. + * These may have lower priority than the concrete inexact types, + * but can change the type of the result (complex, float, int). + * If our own DType is not numerical or has lower priority (e.g. + * integer but abstract one is float), signal not implemented. */ - if (other == &PyArray_PyComplexAbstractDType) { + if (other == &PyArray_PyComplexDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num)) { Py_INCREF(cls); return cls; @@ -940,14 +935,14 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return NPY_DT_NewRef(&PyArray_CLongDoubleDType); } } - else if (other == &PyArray_PyFloatAbstractDType) { + else if (other == &PyArray_PyFloatDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num) || PyTypeNum_ISFLOAT(cls->type_num)) { Py_INCREF(cls); return cls; } } - else if (other == &PyArray_PyIntAbstractDType) { + else if (other == &PyArray_PyLongDType) { if (PyTypeNum_ISCOMPLEX(cls->type_num) || PyTypeNum_ISFLOAT(cls->type_num) || PyTypeNum_ISINTEGER(cls->type_num) @@ -956,8 +951,10 @@ default_builtin_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other) return cls; } } + Py_INCREF(Py_NotImplemented); + return (PyArray_DTypeMeta *)Py_NotImplemented; } - if (!NPY_DT_is_legacy(other) || other->type_num > cls->type_num) { + if (other->type_num > cls->type_num) { /* * Let the more generic (larger type number) DType handle this * (note that half is after all others, which works out here.) @@ -1072,8 +1069,9 @@ object_common_dtype( * @returns 0 on success, -1 on failure. */ NPY_NO_EXPORT int -dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, - PyArray_ArrFuncs *arr_funcs, const char *name, const char *alias) +dtypemeta_wrap_legacy_descriptor( + _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, + PyTypeObject *dtype_super_class, const char *name, const char *alias) { int has_type_set = Py_TYPE(descr) == &PyArrayDescr_Type; @@ -1127,7 +1125,7 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, .tp_name = NULL, /* set below */ .tp_basicsize = sizeof(_PyArray_LegacyDescr), .tp_flags = Py_TPFLAGS_DEFAULT, - .tp_base = &PyArrayDescr_Type, + .tp_base = NULL, /* set below */ .tp_new = (newfunc)legacy_dtype_default_new, .tp_doc = ( "DType class corresponding to the scalar type and dtype of " @@ -1140,11 +1138,12 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, /* Further fields are not common between DTypes */ }; memcpy(dtype_class, &prototype, sizeof(PyArray_DTypeMeta)); - /* Fix name of the Type*/ + /* Fix name and superclass of the Type*/ ((PyTypeObject *)dtype_class)->tp_name = name; + ((PyTypeObject *)dtype_class)->tp_base = dtype_super_class, dtype_class->dt_slots = dt_slots; - /* Let python finish the initialization (probably unnecessary) */ + /* Let python finish the initialization */ if (PyType_Ready((PyTypeObject *)dtype_class) < 0) { Py_DECREF(dtype_class); return -1; @@ -1240,14 +1239,13 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, /* And it to the types submodule if it is a builtin dtype */ if (!PyTypeNum_ISUSERDEF(descr->type_num)) { - static PyObject *add_dtype_helper = NULL; - npy_cache_import("numpy.dtypes", "_add_dtype_helper", &add_dtype_helper); - if (add_dtype_helper == NULL) { + if (npy_cache_import_runtime("numpy.dtypes", "_add_dtype_helper", + &npy_runtime_imports._add_dtype_helper) == -1) { return -1; } if (PyObject_CallFunction( - add_dtype_helper, + npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)dtype_class, alias) == NULL) { return -1; } @@ -1258,22 +1256,22 @@ dtypemeta_wrap_legacy_descriptor(_PyArray_LegacyDescr *descr, static PyObject * -dtypemeta_get_abstract(PyArray_DTypeMeta *self) { +dtypemeta_get_abstract(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_abstract(self)); } static PyObject * -dtypemeta_get_legacy(PyArray_DTypeMeta *self) { +dtypemeta_get_legacy(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_legacy(self)); } static PyObject * -dtypemeta_get_parametric(PyArray_DTypeMeta *self) { +dtypemeta_get_parametric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_parametric(self)); } static PyObject * -dtypemeta_get_is_numeric(PyArray_DTypeMeta *self) { +dtypemeta_get_is_numeric(PyArray_DTypeMeta *self, void *NPY_UNUSED(ignored)) { return PyBool_FromLong(NPY_DT_is_numeric(self)); } @@ -1403,13 +1401,13 @@ PyArray_DTypeMeta *_Void_dtype = NULL; * This function is exposed with an underscore "privately" because the * public version is a static inline function which only calls the function * on 2.x but directly accesses the `descr` struct on 1.x. - * Once 1.x backwards compatibility is gone, it shoudl be exported without + * Once 1.x backwards compatibility is gone, it should be exported without * the underscore directly. * Internally, we define a private inline function `PyDataType_GetArrFuncs` * for convenience as we are allowed to access the `DType` slots directly. */ NPY_NO_EXPORT PyArray_ArrFuncs * -_PyDataType_GetArrFuncs(PyArray_Descr *descr) +_PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return PyDataType_GetArrFuncs(descr); } diff --git a/numpy/_core/src/multiarray/dtypemeta.h b/numpy/_core/src/multiarray/dtypemeta.h index 667f9280eb13..344b440b38e8 100644 --- a/numpy/_core/src/multiarray/dtypemeta.h +++ b/numpy/_core/src/multiarray/dtypemeta.h @@ -155,8 +155,8 @@ python_builtins_are_known_scalar_types( NPY_NO_EXPORT int dtypemeta_wrap_legacy_descriptor( - _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, - const char *name, const char *alias); + _PyArray_LegacyDescr *descr, PyArray_ArrFuncs *arr_funcs, + PyTypeObject *dtype_super_class, const char *name, const char *alias); NPY_NO_EXPORT void initialize_legacy_dtypemeta_aliases(_PyArray_LegacyDescr **_builtin_descrs); @@ -261,7 +261,7 @@ extern PyArray_DTypeMeta PyArray_StringDType; /* Internal version see dtypmeta.c for more information. */ static inline PyArray_ArrFuncs * -PyDataType_GetArrFuncs(PyArray_Descr *descr) +PyDataType_GetArrFuncs(const PyArray_Descr *descr) { return &NPY_DT_SLOTS(NPY_DTYPE(descr))->f; } diff --git a/numpy/_core/src/multiarray/einsum.c.src b/numpy/_core/src/multiarray/einsum.c.src index cf84c5a7629c..81d3f3e1d79b 100644 --- a/numpy/_core/src/multiarray/einsum.c.src +++ b/numpy/_core/src/multiarray/einsum.c.src @@ -16,7 +16,7 @@ #define _MULTIARRAYMODULE #include #include -#include + #include //PyArray_AssignRawScalar #include diff --git a/numpy/_core/src/multiarray/flagsobject.c b/numpy/_core/src/multiarray/flagsobject.c index 4751b2a8bfed..8257727030c0 100644 --- a/numpy/_core/src/multiarray/flagsobject.c +++ b/numpy/_core/src/multiarray/flagsobject.c @@ -12,7 +12,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "array_assign.h" #include "common.h" diff --git a/numpy/_core/src/multiarray/getset.c b/numpy/_core/src/multiarray/getset.c index d53fc53601d6..8482b6006e3e 100644 --- a/numpy/_core/src/multiarray/getset.c +++ b/numpy/_core/src/multiarray/getset.c @@ -9,7 +9,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "npy_import.h" #include "common.h" @@ -25,6 +25,8 @@ #include "alloc.h" #include "npy_buffer.h" #include "shape.h" +#include "multiarraymodule.h" +#include "array_api_standard.h" /******************* array attribute get and set routines ******************/ @@ -385,16 +387,16 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored)) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(newtype)) { - static PyObject *checkfunc = NULL; PyObject *safe; - npy_cache_import("numpy._core._internal", "_view_is_safe", &checkfunc); - if (checkfunc == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_view_is_safe", + &npy_runtime_imports._view_is_safe) == -1) { goto fail; } - safe = PyObject_CallFunction(checkfunc, "OO", - PyArray_DESCR(self), newtype); + safe = PyObject_CallFunction(npy_runtime_imports._view_is_safe, + "OO", PyArray_DESCR(self), newtype); if (safe == NULL) { goto fail; } @@ -883,12 +885,6 @@ array_itemset(PyArrayObject *self, PyObject *args) return NULL; } -static PyObject * -array_device(PyArrayObject *self, void *NPY_UNUSED(ignored)) -{ - return PyUnicode_FromString("cpu"); -} - NPY_NO_EXPORT PyGetSetDef array_getsetlist[] = { {"ndim", (getter)array_ndim_get, diff --git a/numpy/_core/src/multiarray/hashdescr.c b/numpy/_core/src/multiarray/hashdescr.c index 925179e30a53..f570caf1588f 100644 --- a/numpy/_core/src/multiarray/hashdescr.c +++ b/numpy/_core/src/multiarray/hashdescr.c @@ -8,7 +8,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "hashdescr.h" diff --git a/numpy/_core/src/multiarray/item_selection.c b/numpy/_core/src/multiarray/item_selection.c index 7f7d8394d6f3..4d98ce0c350c 100644 --- a/numpy/_core/src/multiarray/item_selection.c +++ b/numpy/_core/src/multiarray/item_selection.c @@ -13,9 +13,9 @@ #include "npy_config.h" -#include "npy_pycompat.h" -#include "multiarraymodule.h" + +#include "npy_static_data.h" #include "common.h" #include "dtype_transfer.h" #include "dtypemeta.h" @@ -243,9 +243,12 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, if (self == NULL) { return NULL; } - indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, - NPY_INTP, - 0, 0); + + indices = (PyArrayObject *)PyArray_FromAny(indices0, + PyArray_DescrFromType(NPY_INTP), + 0, 0, + NPY_ARRAY_SAME_KIND_CASTING | NPY_ARRAY_DEFAULT, + NULL); if (indices == NULL) { goto fail; } @@ -394,6 +397,11 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, goto fail; } ni = PyArray_SIZE(indices); + if ((ni > 0) && (PyArray_Size((PyObject *)self) == 0)) { + PyErr_SetString(PyExc_IndexError, + "cannot replace elements of an empty array"); + goto fail; + } Py_INCREF(PyArray_DESCR(self)); values = (PyArrayObject *)PyArray_FromAny(values0, PyArray_DESCR(self), 0, 0, NPY_ARRAY_DEFAULT | NPY_ARRAY_FORCECAST, NULL); @@ -414,9 +422,8 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, Py_INCREF(PyArray_DESCR(self)); obj = (PyArrayObject *)PyArray_FromArray(self, PyArray_DESCR(self), flags); - if (obj != self) { - copied = 1; - } + copied = 1; + assert(self != obj); self = obj; } max_item = PyArray_SIZE(self); @@ -2255,10 +2262,10 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2) } /* Handle negative axes with standard Python indexing rules */ - if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str_axis1) < 0) { + if (check_and_adjust_axis_msg(&axis1, ndim, npy_interned_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str_axis2) < 0) { + if (check_and_adjust_axis_msg(&axis2, ndim, npy_interned_str.axis2) < 0) { return NULL; } if (axis1 == axis2) { @@ -2792,6 +2799,23 @@ NPY_NO_EXPORT PyObject * PyArray_Nonzero(PyArrayObject *self) { int i, ndim = PyArray_NDIM(self); + if (ndim == 0) { + char const* msg; + if (PyArray_ISBOOL(self)) { + msg = + "Calling nonzero on 0d arrays is not allowed. " + "Use np.atleast_1d(scalar).nonzero() instead. " + "If the context of this error is of the form " + "`arr[nonzero(cond)]`, just use `arr[cond]`."; + } else { + msg = + "Calling nonzero on 0d arrays is not allowed. " + "Use np.atleast_1d(scalar).nonzero() instead."; + } + PyErr_SetString(PyExc_ValueError, msg); + return NULL; + } + PyArrayObject *ret = NULL; PyObject *ret_tuple; npy_intp ret_dims[2]; @@ -2813,42 +2837,6 @@ PyArray_Nonzero(PyArrayObject *self) nonzero = PyDataType_GetArrFuncs(dtype)->nonzero; needs_api = PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI); - /* Special case - nonzero(zero_d) is nonzero(atleast_1d(zero_d)) */ - if (ndim == 0) { - char const* msg; - if (PyArray_ISBOOL(self)) { - msg = - "Calling nonzero on 0d arrays is deprecated, as it behaves " - "surprisingly. Use `atleast_1d(cond).nonzero()` if the old " - "behavior was intended. If the context of this warning is of " - "the form `arr[nonzero(cond)]`, just use `arr[cond]`."; - } - else { - msg = - "Calling nonzero on 0d arrays is deprecated, as it behaves " - "surprisingly. Use `atleast_1d(arr).nonzero()` if the old " - "behavior was intended."; - } - if (DEPRECATE(msg) < 0) { - return NULL; - } - - static npy_intp const zero_dim_shape[1] = {1}; - static npy_intp const zero_dim_strides[1] = {0}; - - Py_INCREF(PyArray_DESCR(self)); /* array creation steals reference */ - PyArrayObject *self_1d = (PyArrayObject *)PyArray_NewFromDescrAndBase( - Py_TYPE(self), PyArray_DESCR(self), - 1, zero_dim_shape, zero_dim_strides, PyArray_BYTES(self), - PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self); - if (self_1d == NULL) { - return NULL; - } - ret_tuple = PyArray_Nonzero(self_1d); - Py_DECREF(self_1d); - return ret_tuple; - } - /* * First count the number of non-zeros in 'self'. */ diff --git a/numpy/_core/src/multiarray/iterators.c b/numpy/_core/src/multiarray/iterators.c index 68881a1f004f..2806670d3e07 100644 --- a/numpy/_core/src/multiarray/iterators.c +++ b/numpy/_core/src/multiarray/iterators.c @@ -11,7 +11,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "arrayobject.h" #include "iterators.h" diff --git a/numpy/_core/src/multiarray/mapping.c b/numpy/_core/src/multiarray/mapping.c index f17e4ffa65c1..4a6c1f093769 100644 --- a/numpy/_core/src/multiarray/mapping.c +++ b/numpy/_core/src/multiarray/mapping.c @@ -1580,7 +1580,7 @@ array_subscript(PyArrayObject *self, PyObject *op) if (PyArray_GetDTypeTransferFunction(is_aligned, itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + PyArray_DESCR(self), PyArray_DESCR((PyArrayObject *)result), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto finish; } @@ -1960,6 +1960,10 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) tmp_arr = (PyArrayObject *)op; } + if (tmp_arr && solve_may_share_memory(self, tmp_arr, 1) != 0) { + Py_SETREF(tmp_arr, (PyArrayObject *)PyArray_NewCopy(tmp_arr, NPY_ANYORDER)); + } + /* * Special case for very simple 1-d fancy indexing, which however * is quite common. This saves not only a lot of setup time in the @@ -1992,9 +1996,9 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) npy_intp itemsize = PyArray_ITEMSIZE(self); int is_aligned = IsUintAligned(self) && IsUintAligned(tmp_arr); - if (PyArray_GetDTypeTransferFunction(is_aligned, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + if (PyArray_GetDTypeTransferFunction( + is_aligned, itemsize, itemsize, + PyArray_DESCR(tmp_arr), PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } @@ -2030,6 +2034,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) goto fail; } + int allocated_array = 0; if (tmp_arr == NULL) { /* Fill extra op, need to swap first */ tmp_arr = mit->extra_op; @@ -2043,6 +2048,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) if (PyArray_CopyObject(tmp_arr, op) < 0) { goto fail; } + allocated_array = 1; } if (PyArray_MapIterCheckIndices(mit) < 0) { @@ -2086,10 +2092,12 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op) /* May need a generic copy function (only for refs and odd sizes) */ NPY_ARRAYMETHOD_FLAGS transfer_flags; npy_intp itemsize = PyArray_ITEMSIZE(self); - - if (PyArray_GetDTypeTransferFunction(1, - itemsize, itemsize, - PyArray_DESCR(self), PyArray_DESCR(self), + // TODO: the heuristic used here to determine the src_dtype might be subtly wrong + // for non-REFCHK user DTypes. See gh-27057 for the prior discussion about this. + if (PyArray_GetDTypeTransferFunction( + 1, itemsize, itemsize, + allocated_array ? PyArray_DESCR(mit->extra_op) : PyArray_DESCR(self), + PyArray_DESCR(self), 0, &cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } diff --git a/numpy/_core/src/multiarray/methods.c b/numpy/_core/src/multiarray/methods.c index b61cbed4c957..2a950d6ca5d1 100644 --- a/numpy/_core/src/multiarray/methods.c +++ b/numpy/_core/src/multiarray/methods.c @@ -29,10 +29,12 @@ #include "strfuncs.h" #include "array_assign.h" #include "npy_dlpack.h" +#include "npy_static_data.h" #include "multiarraymodule.h" #include "methods.h" #include "alloc.h" +#include "array_api_standard.h" #include @@ -111,13 +113,13 @@ npy_forward_method( * initialization is not thread-safe, but relies on the CPython GIL to * be correct. */ -#define NPY_FORWARD_NDARRAY_METHOD(name) \ - static PyObject *callable = NULL; \ - npy_cache_import("numpy._core._methods", name, &callable); \ - if (callable == NULL) { \ - return NULL; \ - } \ - return npy_forward_method(callable, (PyObject *)self, args, len_args, kwnames) +#define NPY_FORWARD_NDARRAY_METHOD(name) \ + if (npy_cache_import_runtime("numpy._core._methods", #name, \ + &npy_runtime_imports.name) == -1) { \ + return NULL; \ + } \ + return npy_forward_method(npy_runtime_imports.name, \ + (PyObject *)self, args, len_args, kwnames) static PyObject * @@ -181,14 +183,16 @@ array_put(PyArrayObject *self, PyObject *args, PyObject *kwds) static PyObject * array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) { - static char *keywords[] = {"order", NULL}; + static char *keywords[] = {"order", "copy", NULL}; PyArray_Dims newshape; PyObject *ret; NPY_ORDER order = NPY_CORDER; + NPY_COPYMODE copy = NPY_COPY_IF_NEEDED; Py_ssize_t n = PyTuple_Size(args); - if (!NpyArg_ParseKeywords(kwds, "|O&", keywords, - PyArray_OrderConverter, &order)) { + if (!NpyArg_ParseKeywords(kwds, "|$O&O&", keywords, + PyArray_OrderConverter, &order, + PyArray_CopyConverter, ©)) { return NULL; } @@ -210,7 +214,7 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) goto fail; } } - ret = PyArray_Newshape(self, &newshape, order); + ret = _reshape_with_copy_arg(self, &newshape, order, copy); npy_free_cache_dim_obj(newshape); return ret; @@ -354,14 +358,14 @@ static PyObject * array_max(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_amax"); + NPY_FORWARD_NDARRAY_METHOD(_amax); } static PyObject * array_min(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_amin"); + NPY_FORWARD_NDARRAY_METHOD(_amin); } static PyObject * @@ -385,7 +389,6 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) { PyObject *ret = NULL; PyObject *safe; - static PyObject *checkfunc = NULL; int self_elsize, typed_elsize; if (self == NULL) { @@ -402,15 +405,16 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset) /* check that we are not reinterpreting memory containing Objects. */ if (_may_have_objects(PyArray_DESCR(self)) || _may_have_objects(typed)) { - npy_cache_import("numpy._core._internal", "_getfield_is_safe", - &checkfunc); - if (checkfunc == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_getfield_is_safe", + &npy_runtime_imports._getfield_is_safe) == -1) { Py_DECREF(typed); return NULL; } /* only returns True or raises */ - safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self), + safe = PyObject_CallFunction(npy_runtime_imports._getfield_is_safe, + "OOi", PyArray_DESCR(self), typed, offset); if (safe == NULL) { Py_DECREF(typed); @@ -811,8 +815,8 @@ array_astype(PyArrayObject *self, /* * If the memory layout matches and, data types are equivalent, - * and it's not a subtype if subok is False, then we - * can skip the copy. + * it's not a subtype if subok is False, and if the cast says + * view are possible, we can skip the copy. */ if (forcecopy != NPY_AS_TYPE_COPY_ALWAYS && (order == NPY_KEEPORDER || @@ -823,11 +827,15 @@ array_astype(PyArrayObject *self, PyArray_IS_C_CONTIGUOUS(self)) || (order == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(self))) && - (subok || PyArray_CheckExact(self)) && - PyArray_EquivTypes(dtype, PyArray_DESCR(self))) { - Py_DECREF(dtype); - Py_INCREF(self); - return (PyObject *)self; + (subok || PyArray_CheckExact(self))) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(dtype, PyArray_DESCR(self), + &view_offset, NPY_NO_CASTING, 1); + if (is_safe && (view_offset != NPY_MIN_INTP)) { + Py_DECREF(dtype); + Py_INCREF(self); + return (PyObject *)self; + } } if (!PyArray_CanCastArrayTo(self, dtype, casting)) { @@ -980,8 +988,7 @@ array_getarray(PyArrayObject *self, PyObject *args, PyObject *kwds) Py_DECREF(self); return ret; } else { // copy == NPY_COPY_NEVER - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating an array from given array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); Py_DECREF(self); return NULL; } @@ -1039,7 +1046,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } Py_DECREF(out_kwd_obj); /* check where if it exists */ - where_obj = PyDict_GetItemWithError(kwds, npy_ma_str_where); + where_obj = PyDict_GetItemWithError(kwds, npy_interned_str.where); if (where_obj == NULL) { if (PyErr_Occurred()) { return -1; @@ -1113,7 +1120,14 @@ array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kw &func, &types, &args, &kwargs)) { return NULL; } - + if (!PyTuple_CheckExact(args)) { + PyErr_SetString(PyExc_TypeError, "args must be a tuple."); + return NULL; + } + if (!PyDict_CheckExact(kwargs)) { + PyErr_SetString(PyExc_TypeError, "kwargs must be a dict."); + return NULL; + } types = PySequence_Fast( types, "types argument to ndarray.__array_function__ must be iterable"); @@ -2239,17 +2253,20 @@ array_setstate(PyArrayObject *self, PyObject *args) NPY_NO_EXPORT int PyArray_Dump(PyObject *self, PyObject *file, int protocol) { - static PyObject *method = NULL; PyObject *ret; - npy_cache_import("numpy._core._methods", "_dump", &method); - if (method == NULL) { + if (npy_cache_import_runtime( + "numpy._core._methods", "_dump", + &npy_runtime_imports._dump) == -1) { return -1; } + if (protocol < 0) { - ret = PyObject_CallFunction(method, "OO", self, file); + ret = PyObject_CallFunction( + npy_runtime_imports._dump, "OO", self, file); } else { - ret = PyObject_CallFunction(method, "OOi", self, file, protocol); + ret = PyObject_CallFunction( + npy_runtime_imports._dump, "OOi", self, file, protocol); } if (ret == NULL) { return -1; @@ -2262,16 +2279,16 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol) NPY_NO_EXPORT PyObject * PyArray_Dumps(PyObject *self, int protocol) { - static PyObject *method = NULL; - npy_cache_import("numpy._core._methods", "_dumps", &method); - if (method == NULL) { + if (npy_cache_import_runtime("numpy._core._methods", "_dumps", + &npy_runtime_imports._dumps) == -1) { return NULL; } if (protocol < 0) { - return PyObject_CallFunction(method, "O", self); + return PyObject_CallFunction(npy_runtime_imports._dumps, "O", self); } else { - return PyObject_CallFunction(method, "Oi", self, protocol); + return PyObject_CallFunction( + npy_runtime_imports._dumps, "Oi", self, protocol); } } @@ -2280,7 +2297,7 @@ static PyObject * array_dump(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_dump"); + NPY_FORWARD_NDARRAY_METHOD(_dump); } @@ -2288,7 +2305,7 @@ static PyObject * array_dumps(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_dumps"); + NPY_FORWARD_NDARRAY_METHOD(_dumps); } @@ -2340,14 +2357,14 @@ static PyObject * array_mean(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_mean"); + NPY_FORWARD_NDARRAY_METHOD(_mean); } static PyObject * array_sum(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_sum"); + NPY_FORWARD_NDARRAY_METHOD(_sum); } @@ -2377,7 +2394,7 @@ static PyObject * array_prod(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_prod"); + NPY_FORWARD_NDARRAY_METHOD(_prod); } static PyObject * @@ -2437,7 +2454,7 @@ static PyObject * array_any(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_any"); + NPY_FORWARD_NDARRAY_METHOD(_any); } @@ -2445,21 +2462,21 @@ static PyObject * array_all(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_all"); + NPY_FORWARD_NDARRAY_METHOD(_all); } static PyObject * array_stddev(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_std"); + NPY_FORWARD_NDARRAY_METHOD(_std); } static PyObject * array_variance(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_var"); + NPY_FORWARD_NDARRAY_METHOD(_var); } static PyObject * @@ -2540,7 +2557,7 @@ static PyObject * array_clip(PyArrayObject *self, PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - NPY_FORWARD_NDARRAY_METHOD("_clip"); + NPY_FORWARD_NDARRAY_METHOD(_clip); } @@ -2795,72 +2812,6 @@ array_class_getitem(PyObject *cls, PyObject *args) return Py_GenericAlias(cls, args); } -static PyObject * -array_array_namespace(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"api_version", NULL}; - PyObject *array_api_version = Py_None; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|$O:__array_namespace__", kwlist, - &array_api_version)) { - return NULL; - } - - if (array_api_version != Py_None) { - if (!PyUnicode_Check(array_api_version)) - { - PyErr_Format(PyExc_ValueError, - "Only None and strings are allowed as the Array API version, " - "but received: %S.", array_api_version); - return NULL; - } else if (PyUnicode_CompareWithASCIIString(array_api_version, "2021.12") != 0 && - PyUnicode_CompareWithASCIIString(array_api_version, "2022.12") != 0) - { - PyErr_Format(PyExc_ValueError, - "Version \"%U\" of the Array API Standard is not supported.", - array_api_version); - return NULL; - } - } - - PyObject *numpy_module = PyImport_ImportModule("numpy"); - if (numpy_module == NULL){ - return NULL; - } - - return numpy_module; -} - -static PyObject * -array_to_device(PyArrayObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"", "stream", NULL}; - char *device = ""; - PyObject *stream = Py_None; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "s|$O:to_device", kwlist, - &device, - &stream)) { - return NULL; - } - - if (stream != Py_None) { - PyErr_SetString(PyExc_ValueError, - "The stream argument in to_device() " - "is not supported"); - return NULL; - } - - if (strcmp(device, "cpu") != 0) { - PyErr_Format(PyExc_ValueError, - "Unsupported device: %s.", device); - return NULL; - } - - Py_INCREF(self); - return (PyObject *)self; -} - NPY_NO_EXPORT PyMethodDef array_methods[] = { /* for subtypes */ @@ -2918,7 +2869,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = { (PyCFunction) array_format, METH_VARARGS, NULL}, - /* for typing; requires python >= 3.9 */ {"__class_getitem__", (PyCFunction)array_class_getitem, METH_CLASS | METH_O, NULL}, diff --git a/numpy/_core/src/multiarray/methods.h b/numpy/_core/src/multiarray/methods.h index 9d06794de2aa..f49e0205894d 100644 --- a/numpy/_core/src/multiarray/methods.h +++ b/numpy/_core/src/multiarray/methods.h @@ -1,6 +1,7 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ #define NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ +#include "npy_static_data.h" #include "npy_import.h" extern NPY_NO_EXPORT PyMethodDef array_methods[]; @@ -13,22 +14,12 @@ extern NPY_NO_EXPORT PyMethodDef array_methods[]; static inline PyObject * NpyPath_PathlikeToFspath(PyObject *file) { - static PyObject *os_PathLike = NULL; - static PyObject *os_fspath = NULL; - npy_cache_import("os", "PathLike", &os_PathLike); - if (os_PathLike == NULL) { - return NULL; - } - npy_cache_import("os", "fspath", &os_fspath); - if (os_fspath == NULL) { - return NULL; - } - - if (!PyObject_IsInstance(file, os_PathLike)) { + if (!PyObject_IsInstance(file, npy_static_pydata.os_PathLike)) { Py_INCREF(file); return file; } - return PyObject_CallFunctionObjArgs(os_fspath, file, NULL); + return PyObject_CallFunctionObjArgs(npy_static_pydata.os_fspath, + file, NULL); } #endif /* NUMPY_CORE_SRC_MULTIARRAY_METHODS_H_ */ diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index 3c153adb83a8..e02743693212 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -23,11 +23,13 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" +#include "multiarraymodule.h" #include "numpy/npy_math.h" #include "npy_argparse.h" #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" +#include "npy_static_data.h" #include "convert_datatype.h" #include "legacy_dtype_implementation.h" @@ -63,7 +65,6 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; #include "ctors.h" #include "array_assign.h" #include "common.h" -#include "multiarraymodule.h" #include "cblasfuncs.h" #include "vdot.h" #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ @@ -97,26 +98,46 @@ NPY_NO_EXPORT PyObject * _umath_strings_richcompare( PyArrayObject *self, PyArrayObject *other, int cmp_op, int rstrip); -/* - * global variable to determine if legacy printing is enabled, accessible from - * C. For simplicity the mode is encoded as an integer where INT_MAX means no - * legacy mode, and '113'/'121' means 1.13/1.21 legacy mode; and 0 maps to - * INT_MAX. We can upgrade this if we have more complex requirements in the - * future. - */ -int npy_legacy_print_mode = INT_MAX; - -static PyObject * -set_legacy_print_mode(PyObject *NPY_UNUSED(self), PyObject *args) -{ - if (!PyArg_ParseTuple(args, "i", &npy_legacy_print_mode)) { - return NULL; +NPY_NO_EXPORT int +get_legacy_print_mode(void) { + /* Get the C value of the legacy printing mode. + * + * It is stored as a Python context variable so we access it via the C + * API. For simplicity the mode is encoded as an integer where INT_MAX + * means no legacy mode, and '113'/'121'/'125' means 1.13/1.21/1.25 legacy + * mode; and 0 maps to INT_MAX. We can upgrade this if we have more + * complex requirements in the future. + */ + PyObject *format_options = NULL; + PyContextVar_Get(npy_static_pydata.format_options, NULL, &format_options); + if (format_options == NULL) { + PyErr_SetString(PyExc_SystemError, + "NumPy internal error: unable to get format_options " + "context variable"); + return -1; } - if (!npy_legacy_print_mode) { - npy_legacy_print_mode = INT_MAX; + PyObject *legacy_print_mode = NULL; + if (PyDict_GetItemRef(format_options, npy_interned_str.legacy, + &legacy_print_mode) == -1) { + return -1; } - Py_RETURN_NONE; + Py_DECREF(format_options); + if (legacy_print_mode == NULL) { + PyErr_SetString(PyExc_SystemError, + "NumPy internal error: unable to get legacy print " + "mode"); + return -1; + } + Py_ssize_t ret = PyLong_AsSsize_t(legacy_print_mode); + Py_DECREF(legacy_print_mode); + if (error_converting(ret)) { + return -1; + } + if (ret > INT_MAX) { + return INT_MAX; + } + return (int)ret; } @@ -136,7 +157,7 @@ PyArray_GetPriority(PyObject *obj, double default_) return NPY_SCALAR_PRIORITY; } - ret = PyArray_LookupSpecial_OnInstance(obj, npy_ma_str_array_priority); + ret = PyArray_LookupSpecial_OnInstance(obj, npy_interned_str.array_priority); if (ret == NULL) { if (PyErr_Occurred()) { /* TODO[gh-14801]: propagate crashes during attribute access? */ @@ -467,7 +488,6 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis, if (ret == NULL) { return NULL; } - assert(PyArray_DESCR(ret) == descr); } /* @@ -1449,23 +1469,6 @@ PyArray_EquivTypes(PyArray_Descr *type1, PyArray_Descr *type2) return 1; } - if (Py_TYPE(Py_TYPE(type1)) == &PyType_Type) { - /* - * 2021-12-17: This case is nonsense and should be removed eventually! - * - * boost::python has/had a bug effectively using EquivTypes with - * `type(arbitrary_obj)`. That is clearly wrong as that cannot be a - * `PyArray_Descr *`. We assume that `type(type(type(arbitrary_obj))` - * is always in practice `type` (this is the type of the metaclass), - * but for our descriptors, `type(type(descr))` is DTypeMeta. - * - * In that case, we just return False. There is a possibility that - * this actually _worked_ effectively (returning 1 sometimes). - * We ignore that possibility for simplicity; it really is not our bug. - */ - return 0; - } - /* * Do not use PyArray_CanCastTypeTo because it supports legacy flexible * dtypes as input. @@ -1586,8 +1589,7 @@ _array_fromobject_generic( } else { if (copy == NPY_COPY_NEVER) { - PyErr_SetString(PyExc_ValueError, - "Unable to avoid copy while creating a new array."); + PyErr_SetString(PyExc_ValueError, npy_no_copy_err_msg); goto finish; } ret = (PyArrayObject *)PyArray_NewCopy(oparr, order); @@ -1609,7 +1611,10 @@ _array_fromobject_generic( /* One more chance for faster exit if user specified the dtype. */ oldtype = PyArray_DESCR(oparr); - if (PyArray_EquivTypes(oldtype, dtype)) { + npy_intp view_offset; + npy_intp is_safe = PyArray_SafeCast(oldtype, dtype, &view_offset, NPY_NO_CASTING, 1); + npy_intp view_safe = (is_safe && (view_offset != NPY_MIN_INTP)); + if (view_safe) { if (copy != NPY_COPY_ALWAYS && STRIDING_OK(oparr, order)) { if (oldtype == dtype) { Py_INCREF(op); @@ -1797,8 +1802,10 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { PyObject *op; + NPY_COPYMODE copy = NPY_COPY_IF_NEEDED; npy_dtype_info dt_info = {NULL, NULL}; NPY_ORDER order = NPY_KEEPORDER; + NPY_DEVICE device = NPY_DEVICE_CPU; PyObject *like = Py_None; NPY_PREPARE_ARGPARSER; @@ -1807,6 +1814,8 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), "a", NULL, &op, "|dtype", &PyArray_DTypeOrDescrConverterOptional, &dt_info, "|order", &PyArray_OrderConverter, &order, + "$device", &PyArray_DeviceConverterOptional, &device, + "$copy", &PyArray_CopyConverter, ©, "$like", NULL, &like, NULL, NULL, NULL) < 0) { Py_XDECREF(dt_info.descr); @@ -1828,7 +1837,7 @@ array_asanyarray(PyObject *NPY_UNUSED(ignored), } PyObject *res = _array_fromobject_generic( - op, dt_info.descr, dt_info.dtype, NPY_COPY_IF_NEEDED, order, NPY_TRUE, 0); + op, dt_info.descr, dt_info.dtype, copy, order, NPY_TRUE, 0); Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); return res; @@ -1920,29 +1929,64 @@ array_asfortranarray(PyObject *NPY_UNUSED(ignored), static PyObject * -array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds) +array_copyto(PyObject *NPY_UNUSED(ignored), + PyObject *const *args, Py_ssize_t len_args, PyObject *kwnames) { - static char *kwlist[] = {"dst", "src", "casting", "where", NULL}; - PyObject *wheremask_in = NULL; - PyArrayObject *dst = NULL, *src = NULL, *wheremask = NULL; + PyObject *dst_obj, *src_obj, *wheremask_in = NULL; + PyArrayObject *src = NULL, *wheremask = NULL; NPY_CASTING casting = NPY_SAME_KIND_CASTING; + NPY_PREPARE_ARGPARSER; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O&|O&O:copyto", kwlist, - &PyArray_Type, &dst, - &PyArray_Converter, &src, - &PyArray_CastingConverter, &casting, - &wheremask_in)) { + if (npy_parse_arguments("copyto", args, len_args, kwnames, + "dst", NULL, &dst_obj, + "src", NULL, &src_obj, + "|casting", &PyArray_CastingConverter, &casting, + "|where", NULL, &wheremask_in, + NULL, NULL, NULL) < 0) { + goto fail; + } + + if (!PyArray_Check(dst_obj)) { + PyErr_Format(PyExc_TypeError, + "copyto() argument 1 must be a numpy.ndarray, not %s", + Py_TYPE(dst_obj)->tp_name); goto fail; } + PyArrayObject *dst = (PyArrayObject *)dst_obj; + + src = (PyArrayObject *)PyArray_FromAny(src_obj, NULL, 0, 0, 0, NULL); + if (src == NULL) { + goto fail; + } + PyArray_DTypeMeta *DType = NPY_DTYPE(PyArray_DESCR(src)); + Py_INCREF(DType); + if (npy_mark_tmp_array_if_pyscalar(src_obj, src, &DType)) { + /* The user passed a Python scalar */ + PyArray_Descr *descr = npy_find_descr_for_scalar( + src_obj, PyArray_DESCR(src), DType, + NPY_DTYPE(PyArray_DESCR(dst))); + Py_DECREF(DType); + if (descr == NULL) { + goto fail; + } + int res = npy_update_operand_for_scalar(&src, src_obj, descr, casting); + Py_DECREF(descr); + if (res < 0) { + goto fail; + } + } + else { + Py_DECREF(DType); + } if (wheremask_in != NULL) { /* Get the boolean where mask */ - PyArray_Descr *dtype = PyArray_DescrFromType(NPY_BOOL); - if (dtype == NULL) { + PyArray_Descr *descr = PyArray_DescrFromType(NPY_BOOL); + if (descr == NULL) { goto fail; } wheremask = (PyArrayObject *)PyArray_FromAny(wheremask_in, - dtype, 0, 0, 0, NULL); + descr, 0, 0, 0, NULL); if (wheremask == NULL) { goto fail; } @@ -2013,14 +2057,11 @@ array_empty(PyObject *NPY_UNUSED(ignored), ret = (PyArrayObject *)PyArray_Empty_int( shape.len, shape.ptr, dt_info.descr, dt_info.dtype, is_f_order); - npy_free_cache_dim_obj(shape); - return (PyObject *)ret; - fail: Py_XDECREF(dt_info.descr); Py_XDECREF(dt_info.dtype); npy_free_cache_dim_obj(shape); - return NULL; + return (PyObject *)ret; } static PyObject * @@ -3191,31 +3232,6 @@ array__reconstruct(PyObject *NPY_UNUSED(dummy), PyObject *args) return NULL; } -static PyObject * -array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args, - PyObject *kwds) -{ - PyObject *op = NULL; - int repr = 1; - static char *kwlist[] = {"f", "repr", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) { - return NULL; - } - /* reset the array_repr function to built-in */ - if (op == Py_None) { - op = NULL; - } - if (op != NULL && !PyCallable_Check(op)) { - PyErr_SetString(PyExc_TypeError, - "Argument must be callable."); - return NULL; - } - PyArray_SetStringFunction(op, repr); - Py_RETURN_NONE; -} - - static PyObject * array_set_datetimeparse_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args), PyObject *NPY_UNUSED(kwds)) @@ -3273,7 +3289,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) return NULL; } - NPY_cast_info cast_info = {.func = NULL}; + NPY_cast_info x_cast_info = {.func = NULL}; + NPY_cast_info y_cast_info = {.func = NULL}; ax = (PyArrayObject*)PyArray_FROM_O(x); if (ax == NULL) { @@ -3297,13 +3314,33 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) NPY_ITER_READONLY | NPY_ITER_ALIGNED, NPY_ITER_READONLY | NPY_ITER_ALIGNED }; + common_dt = PyArray_ResultType(2, &op_in[2], 0, NULL); if (common_dt == NULL) { goto fail; } + npy_intp itemsize = common_dt->elsize; + + // If x and y don't have references, we ask the iterator to create buffers + // using the common data type of x and y and then do fast trivial copies + // in the loop below. + // Otherwise trivial copies aren't possible and we handle the cast item by item + // in the loop. + PyArray_Descr *x_dt, *y_dt; + int trivial_copy_loop = !PyDataType_REFCHK(common_dt) && + ((itemsize == 16) || (itemsize == 8) || (itemsize == 4) || + (itemsize == 2) || (itemsize == 1)); + if (trivial_copy_loop) { + x_dt = common_dt; + y_dt = common_dt; + } + else { + x_dt = PyArray_DESCR(op_in[2]); + y_dt = PyArray_DESCR(op_in[3]); + } /* `PyArray_DescrFromType` cannot fail for simple builtin types: */ - PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), - common_dt, common_dt}; + PyArray_Descr * op_dt[4] = {common_dt, PyArray_DescrFromType(NPY_BOOL), x_dt, y_dt}; + NpyIter * iter; NPY_BEGIN_THREADS_DEF; @@ -3317,26 +3354,27 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) /* Get the result from the iterator object array */ ret = (PyObject*)NpyIter_GetOperandArray(iter)[0]; - - npy_intp itemsize = common_dt->elsize; - - int has_ref = PyDataType_REFCHK(common_dt); + PyArray_Descr *ret_dt = PyArray_DESCR((PyArrayObject *)ret); NPY_ARRAYMETHOD_FLAGS transfer_flags = 0; - npy_intp transfer_strides[2] = {itemsize, itemsize}; + npy_intp x_strides[2] = {x_dt->elsize, itemsize}; + npy_intp y_strides[2] = {y_dt->elsize, itemsize}; npy_intp one = 1; - if (has_ref || ((itemsize != 16) && (itemsize != 8) && (itemsize != 4) && - (itemsize != 2) && (itemsize != 1))) { + if (!trivial_copy_loop) { // The iterator has NPY_ITER_ALIGNED flag so no need to check alignment // of the input arrays. - // - // There's also no need to set up a cast for y, since the iterator - // ensures both casts are identical. if (PyArray_GetDTypeTransferFunction( - 1, itemsize, itemsize, common_dt, common_dt, 0, - &cast_info, &transfer_flags) != NPY_SUCCEED) { + 1, x_strides[0], x_strides[1], + PyArray_DESCR(op_in[2]), ret_dt, 0, + &x_cast_info, &transfer_flags) != NPY_SUCCEED) { + goto fail; + } + if (PyArray_GetDTypeTransferFunction( + 1, y_strides[0], y_strides[1], + PyArray_DESCR(op_in[3]), ret_dt, 0, + &y_cast_info, &transfer_flags) != NPY_SUCCEED) { goto fail; } } @@ -3368,19 +3406,19 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) npy_intp ystride = strides[3]; /* constant sizes so compiler replaces memcpy */ - if (!has_ref && itemsize == 16) { + if (trivial_copy_loop && itemsize == 16) { INNER_WHERE_LOOP(16); } - else if (!has_ref && itemsize == 8) { + else if (trivial_copy_loop && itemsize == 8) { INNER_WHERE_LOOP(8); } - else if (!has_ref && itemsize == 4) { + else if (trivial_copy_loop && itemsize == 4) { INNER_WHERE_LOOP(4); } - else if (!has_ref && itemsize == 2) { + else if (trivial_copy_loop && itemsize == 2) { INNER_WHERE_LOOP(2); } - else if (!has_ref && itemsize == 1) { + else if (trivial_copy_loop && itemsize == 1) { INNER_WHERE_LOOP(1); } else { @@ -3389,18 +3427,18 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) if (*csrc) { char *args[2] = {xsrc, dst}; - if (cast_info.func( - &cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { + if (x_cast_info.func( + &x_cast_info.context, args, &one, + x_strides, x_cast_info.auxdata) < 0) { goto fail; } } else { char *args[2] = {ysrc, dst}; - if (cast_info.func( - &cast_info.context, args, &one, - transfer_strides, cast_info.auxdata) < 0) { + if (y_cast_info.func( + &y_cast_info.context, args, &one, + y_strides, y_cast_info.auxdata) < 0) { goto fail; } } @@ -3420,7 +3458,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_DECREF(ax); Py_DECREF(ay); Py_DECREF(common_dt); - NPY_cast_info_xfree(&cast_info); + NPY_cast_info_xfree(&x_cast_info); + NPY_cast_info_xfree(&y_cast_info); if (NpyIter_Deallocate(iter) != NPY_SUCCEED) { Py_DECREF(ret); @@ -3434,7 +3473,8 @@ PyArray_Where(PyObject *condition, PyObject *x, PyObject *y) Py_XDECREF(ax); Py_XDECREF(ay); Py_XDECREF(common_dt); - NPY_cast_info_xfree(&cast_info); + NPY_cast_info_xfree(&x_cast_info); + NPY_cast_info_xfree(&y_cast_info); return NULL; } @@ -3503,6 +3543,36 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), if (PyArray_Check(from_obj)) { ret = PyArray_CanCastArrayTo((PyArrayObject *)from_obj, d2, casting); } + else if (PyArray_IsScalar(from_obj, Generic)) { + /* + * TODO: `PyArray_IsScalar` should not be required for new dtypes. + * weak-promotion branch is in practice identical to dtype one. + */ + if (get_npy_promotion_state() == NPY_USE_WEAK_PROMOTION) { + PyObject *descr = PyObject_GetAttr(from_obj, npy_interned_str.dtype); + if (descr == NULL) { + goto finish; + } + if (!PyArray_DescrCheck(descr)) { + Py_DECREF(descr); + PyErr_SetString(PyExc_TypeError, + "numpy_scalar.dtype did not return a dtype instance."); + goto finish; + } + ret = PyArray_CanCastTypeTo((PyArray_Descr *)descr, d2, casting); + Py_DECREF(descr); + } + else { + /* need to convert to object to consider old value-based logic */ + PyArrayObject *arr; + arr = (PyArrayObject *)PyArray_FROM_O(from_obj); + if (arr == NULL) { + goto finish; + } + ret = PyArray_CanCastArrayTo(arr, d2, casting); + Py_DECREF(arr); + } + } else if (PyArray_IsPythonNumber(from_obj)) { PyErr_SetString(PyExc_TypeError, "can_cast() does not support Python ints, floats, and " @@ -3511,15 +3581,6 @@ array_can_cast_safely(PyObject *NPY_UNUSED(self), "explicitly allow them again in the future."); goto finish; } - else if (PyArray_IsScalar(from_obj, Generic)) { - PyArrayObject *arr; - arr = (PyArrayObject *)PyArray_FROM_O(from_obj); - if (arr == NULL) { - goto finish; - } - ret = PyArray_CanCastArrayTo(arr, d2, casting); - Py_DECREF(arr); - } /* Otherwise use CanCastTypeTo */ else { if (!PyArray_DescrConverter2(from_obj, &d1) || d1 == NULL) { @@ -4260,11 +4321,8 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_ } else if (result == MEM_OVERLAP_TOO_HARD) { if (raise_exceptions) { - static PyObject *too_hard_cls = NULL; - npy_cache_import("numpy.exceptions", "TooHardError", &too_hard_cls); - if (too_hard_cls) { - PyErr_SetString(too_hard_cls, "Exceeded max_work"); - } + PyErr_SetString(npy_static_pydata.TooHardError, + "Exceeded max_work"); return NULL; } else { @@ -4330,8 +4388,8 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) if (res < 0) { return NULL; } - int old_value = numpy_warn_if_no_mem_policy; - numpy_warn_if_no_mem_policy = res; + int old_value = npy_thread_unsafe_state.warn_if_no_mem_policy; + npy_thread_unsafe_state.warn_if_no_mem_policy = res; if (old_value) { Py_RETURN_TRUE; } @@ -4343,8 +4401,6 @@ _set_numpy_warn_if_no_mem_policy(PyObject *NPY_UNUSED(self), PyObject *arg) static PyObject * _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { - static int initialized = 0; - #if !defined(PYPY_VERSION) if (PyThreadState_Get()->interp != PyInterpreterState_Main()) { if (PyErr_WarnEx(PyExc_UserWarning, @@ -4360,11 +4416,11 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } /* No need to give the other warning in a sub-interpreter as well... */ - initialized = 1; + npy_thread_unsafe_state.reload_guard_initialized = 1; Py_RETURN_NONE; } #endif - if (initialized) { + if (npy_thread_unsafe_state.reload_guard_initialized) { if (PyErr_WarnEx(PyExc_UserWarning, "The NumPy module was reloaded (imported a second time). " "This can in some cases result in small but subtle issues " @@ -4372,7 +4428,7 @@ _reload_guard(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args)) { return NULL; } } - initialized = 1; + npy_thread_unsafe_state.reload_guard_initialized = 1; Py_RETURN_NONE; } @@ -4387,9 +4443,6 @@ static struct PyMethodDef array_module_methods[] = { {"_reconstruct", (PyCFunction)array__reconstruct, METH_VARARGS, NULL}, - {"set_string_function", - (PyCFunction)array_set_string_function, - METH_VARARGS|METH_KEYWORDS, NULL}, {"set_datetimeparse_function", (PyCFunction)array_set_datetimeparse_function, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4413,7 +4466,7 @@ static struct PyMethodDef array_module_methods[] = { METH_FASTCALL | METH_KEYWORDS, NULL}, {"copyto", (PyCFunction)array_copyto, - METH_VARARGS|METH_KEYWORDS, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {"nested_iters", (PyCFunction)NpyIter_NestedIters, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4551,8 +4604,6 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"normalize_axis_index", (PyCFunction)normalize_axis_index, METH_FASTCALL | METH_KEYWORDS, NULL}, - {"set_legacy_print_mode", (PyCFunction)set_legacy_print_mode, - METH_VARARGS, NULL}, {"_discover_array_parameters", (PyCFunction)_discover_array_parameters, METH_FASTCALL | METH_KEYWORDS, NULL}, {"_get_castingimpl", (PyCFunction)_get_castingimpl, @@ -4598,7 +4649,7 @@ static struct PyMethodDef array_module_methods[] = { METH_NOARGS, "Give a warning on reload and big warning in sub-interpreters."}, {"from_dlpack", (PyCFunction)from_dlpack, - METH_O, NULL}, + METH_FASTCALL | METH_KEYWORDS, NULL}, {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -4713,7 +4764,7 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict)) DUAL_INHERIT(CDouble, Complex, ComplexFloating); SINGLE_INHERIT(CLongDouble, ComplexFloating); - DUAL_INHERIT2(String, String, Character); + DUAL_INHERIT2(String, Bytes, Character); DUAL_INHERIT2(Unicode, Unicode, Character); SINGLE_INHERIT(Void, Flexible); @@ -4769,150 +4820,22 @@ set_flaginfo(PyObject *d) return; } -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_current_allocator = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_function = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_struct = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_interface = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_priority = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_implementation = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_like = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_numpy = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_where = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_preserve = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_convert_if_no_array = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_cpu = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_err_msg_substr = NULL; +// static variables are automatically zero-initialized +NPY_VISIBILITY_HIDDEN npy_thread_unsafe_state_struct npy_thread_unsafe_state; static int -intern_strings(void) -{ - npy_ma_str_current_allocator = PyUnicode_InternFromString("current_allocator"); - if (npy_ma_str_current_allocator == NULL) { - return -1; - } - npy_ma_str_array = PyUnicode_InternFromString("__array__"); - if (npy_ma_str_array == NULL) { - return -1; - } - npy_ma_str_array_function = PyUnicode_InternFromString("__array_function__"); - if (npy_ma_str_array_function == NULL) { - return -1; - } - npy_ma_str_array_struct = PyUnicode_InternFromString("__array_struct__"); - if (npy_ma_str_array_struct == NULL) { - return -1; - } - npy_ma_str_array_priority = PyUnicode_InternFromString("__array_priority__"); - if (npy_ma_str_array_priority == NULL) { - return -1; - } - npy_ma_str_array_interface = PyUnicode_InternFromString("__array_interface__"); - if (npy_ma_str_array_interface == NULL) { - return -1; - } - npy_ma_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_ma_str_array_wrap == NULL) { - return -1; - } - npy_ma_str_array_finalize = PyUnicode_InternFromString("__array_finalize__"); - if (npy_ma_str_array_finalize == NULL) { - return -1; - } - npy_ma_str_implementation = PyUnicode_InternFromString("_implementation"); - if (npy_ma_str_implementation == NULL) { - return -1; - } - npy_ma_str_axis1 = PyUnicode_InternFromString("axis1"); - if (npy_ma_str_axis1 == NULL) { - return -1; - } - npy_ma_str_axis2 = PyUnicode_InternFromString("axis2"); - if (npy_ma_str_axis2 == NULL) { - return -1; - } - npy_ma_str_like = PyUnicode_InternFromString("like"); - if (npy_ma_str_like == NULL) { - return -1; - } - npy_ma_str_numpy = PyUnicode_InternFromString("numpy"); - if (npy_ma_str_numpy == NULL) { - return -1; - } - npy_ma_str_where = PyUnicode_InternFromString("where"); - if (npy_ma_str_where == NULL) { - return -1; - } - /* scalar policies */ - npy_ma_str_convert = PyUnicode_InternFromString("convert"); - if (npy_ma_str_convert == NULL) { - return -1; - } - npy_ma_str_preserve = PyUnicode_InternFromString("preserve"); - if (npy_ma_str_preserve == NULL) { - return -1; - } - npy_ma_str_convert_if_no_array = PyUnicode_InternFromString("convert_if_no_array"); - if (npy_ma_str_convert_if_no_array == NULL) { - return -1; - } - npy_ma_str_cpu = PyUnicode_InternFromString("cpu"); - if (npy_ma_str_cpu == NULL) { - return -1; - } - npy_ma_str_array_err_msg_substr = PyUnicode_InternFromString( - "__array__() got an unexpected keyword argument 'copy'"); - if (npy_ma_str_array_err_msg_substr == NULL) { - return -1; - } - return 0; -} - - -/* - * Initializes global constants. At some points these need to be cleaned - * up, and sometimes we also import them where they are needed. But for - * some things, adding an `npy_cache_import` everywhere seems inconvenient. - * - * These globals should not need the C-layer at all and will be imported - * before anything on the C-side is initialized. - */ -static int -initialize_static_globals(void) -{ - assert(npy_DTypePromotionError == NULL); - npy_cache_import( - "numpy.exceptions", "DTypePromotionError", - &npy_DTypePromotionError); - if (npy_DTypePromotionError == NULL) { - return -1; - } - - assert(npy_UFuncNoLoopError == NULL); - npy_cache_import( - "numpy._core._exceptions", "_UFuncNoLoopError", - &npy_UFuncNoLoopError); - if (npy_UFuncNoLoopError == NULL) { - return -1; - } - +initialize_thread_unsafe_state(void) { char *env = getenv("NUMPY_WARN_IF_NO_MEM_POLICY"); if ((env != NULL) && (strncmp(env, "1", 1) == 0)) { - numpy_warn_if_no_mem_policy = 1; + npy_thread_unsafe_state.warn_if_no_mem_policy = 1; } else { - numpy_warn_if_no_mem_policy = 0; + npy_thread_unsafe_state.warn_if_no_mem_policy = 0; } return 0; } - static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "_multiarray_umath", @@ -4975,6 +4898,14 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + if (initialize_thread_unsafe_state() < 0) { + goto err; + } + + if (init_import_mutex() < 0) { + goto err; + } + if (init_extobj() < 0) { goto err; } @@ -5174,6 +5105,23 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + // initialize static references to ndarray.__array_*__ special methods + npy_static_pydata.ndarray_array_finalize = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_finalize__"); + if (npy_static_pydata.ndarray_array_finalize == NULL) { + goto err; + } + npy_static_pydata.ndarray_array_ufunc = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_ufunc__"); + if (npy_static_pydata.ndarray_array_ufunc == NULL) { + goto err; + } + npy_static_pydata.ndarray_array_function = PyObject_GetAttrString( + (PyObject *)&PyArray_Type, "__array_function__"); + if (npy_static_pydata.ndarray_array_function == NULL) { + goto err; + } + /* * Initialize np.dtypes.StringDType * @@ -5183,14 +5131,15 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { * init_string_dtype() but that needs to happen after * the legacy dtypemeta classes are available. */ - static PyObject *add_dtype_helper = NULL; - npy_cache_import("numpy.dtypes", "_add_dtype_helper", &add_dtype_helper); - if (add_dtype_helper == NULL) { + + if (npy_cache_import_runtime( + "numpy.dtypes", "_add_dtype_helper", + &npy_runtime_imports._add_dtype_helper) == -1) { goto err; } if (PyObject_CallFunction( - add_dtype_helper, + npy_runtime_imports._add_dtype_helper, "Os", (PyObject *)&PyArray_StringDType, NULL) == NULL) { goto err; } @@ -5199,7 +5148,8 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { /* * Initialize the default PyDataMem_Handler capsule singleton. */ - PyDataMem_DefaultHandler = PyCapsule_New(&default_handler, "mem_handler", NULL); + PyDataMem_DefaultHandler = PyCapsule_New( + &default_handler, MEM_HANDLER_CAPSULE_NAME, NULL); if (PyDataMem_DefaultHandler == NULL) { goto err; } @@ -5212,6 +5162,19 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } + // initialize static reference to a zero-like array + npy_static_pydata.zero_pyint_like_arr = PyArray_ZEROS( + 0, NULL, NPY_DEFAULT_INT, NPY_FALSE); + if (npy_static_pydata.zero_pyint_like_arr == NULL) { + goto err; + } + ((PyArrayObject_fields *)npy_static_pydata.zero_pyint_like_arr)->flags |= + (NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); + + if (verify_static_structs_initialized() < 0) { + goto err; + } + /* * Export the API tables */ @@ -5234,6 +5197,11 @@ PyMODINIT_FUNC PyInit__multiarray_umath(void) { goto err; } +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; err: diff --git a/numpy/_core/src/multiarray/multiarraymodule.h b/numpy/_core/src/multiarray/multiarraymodule.h index ba03d367eeb8..de234a8495d3 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.h +++ b/numpy/_core/src/multiarray/multiarraymodule.h @@ -1,24 +1,88 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_current_allocator; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_function; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_struct; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_priority; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_interface; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_implementation; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_like; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_numpy; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_where; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_preserve; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_convert_if_no_array; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_cpu; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_err_msg_substr; +/* + * A struct storing thread-unsafe global state for the _multiarray_umath + * module. We should refactor so the global state is thread-safe, + * e.g. by adding locking. + */ +typedef struct npy_thread_unsafe_state_struct { + /* + * Cached references to objects obtained via an import. All of these are + * can be initialized at any time by npy_cache_import. + * + * Currently these are not initialized in a thread-safe manner but the + * failure mode is a reference leak for references to imported immortal + * modules so it will never lead to a crash unless users are doing something + * janky that we don't support like reloading. + * + * TODO: maybe make each entry a struct that looks like: + * + * struct { + * atomic_int initialized; + * PyObject *value; + * } + * + * so the initialization is thread-safe and the only possible lock + * contention happens before the cache is initialized, not on every single + * read. + */ + PyObject *_add_dtype_helper; + PyObject *_all; + PyObject *_amax; + PyObject *_amin; + PyObject *_any; + PyObject *array_function_errmsg_formatter; + PyObject *array_ufunc_errmsg_formatter; + PyObject *_clip; + PyObject *_commastring; + PyObject *_convert_to_stringdtype_kwargs; + PyObject *_default_array_repr; + PyObject *_default_array_str; + PyObject *_dump; + PyObject *_dumps; + PyObject *_getfield_is_safe; + PyObject *internal_gcd_func; + PyObject *_mean; + PyObject *NO_NEP50_WARNING; + PyObject *npy_ctypes_check; + PyObject *numpy_matrix; + PyObject *_prod; + PyObject *_promote_fields; + PyObject *_std; + PyObject *_sum; + PyObject *_ufunc_doc_signature_formatter; + PyObject *_var; + PyObject *_view_is_safe; + PyObject *_void_scalar_to_string; + + /* + * Used to test the internal-only scaled float test dtype + */ + npy_bool get_sfloat_dtype_initialized; + + /* + * controls the global madvise hugepage setting + */ + int madvise_hugepage; + + /* + * used to detect module reloading in the reload guard + */ + int reload_guard_initialized; + + /* + * Holds the user-defined setting for whether or not to warn + * if there is no memory policy set + */ + int warn_if_no_mem_policy; + +} npy_thread_unsafe_state_struct; + + +NPY_VISIBILITY_HIDDEN extern npy_thread_unsafe_state_struct npy_thread_unsafe_state; + +NPY_NO_EXPORT int +get_legacy_print_mode(void); #endif /* NUMPY_CORE_SRC_MULTIARRAY_MULTIARRAYMODULE_H_ */ diff --git a/numpy/_core/src/multiarray/nditer_impl.h b/numpy/_core/src/multiarray/nditer_impl.h index 0332d78ec913..790ddcb11f83 100644 --- a/numpy/_core/src/multiarray/nditer_impl.h +++ b/numpy/_core/src/multiarray/nditer_impl.h @@ -18,7 +18,7 @@ #include #include "numpy/arrayobject.h" -#include "npy_pycompat.h" + #include "convert_datatype.h" #include "lowlevel_strided_loops.h" diff --git a/numpy/_core/src/multiarray/nditer_pywrap.c b/numpy/_core/src/multiarray/nditer_pywrap.c index 94dd526ceb6c..ad20194f308f 100644 --- a/numpy/_core/src/multiarray/nditer_pywrap.c +++ b/numpy/_core/src/multiarray/nditer_pywrap.c @@ -15,7 +15,7 @@ #include "numpy/arrayobject.h" #include "npy_config.h" -#include "npy_pycompat.h" + #include "alloc.h" #include "common.h" #include "conversion_utils.h" diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c new file mode 100644 index 000000000000..38f8b5ebd119 --- /dev/null +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -0,0 +1,263 @@ +/* numpy static data structs and initialization */ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _UMATHMODULE +#define _MULTIARRAYMODULE + +#define PY_SSIZE_T_CLEAN +#include +#include + +#include "numpy/ndarraytypes.h" +#include "numpy/npy_common.h" +#include "numpy/arrayobject.h" +#include "npy_import.h" +#include "npy_static_data.h" +#include "extobj.h" + +// static variables are zero-filled by default, no need to explicitly do so +NPY_VISIBILITY_HIDDEN npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN npy_static_cdata_struct npy_static_cdata; + +#define INTERN_STRING(struct_member, string) \ + assert(npy_interned_str.struct_member == NULL); \ + npy_interned_str.struct_member = PyUnicode_InternFromString(string); \ + if (npy_interned_str.struct_member == NULL) { \ + return -1; \ + } \ + +NPY_NO_EXPORT int +intern_strings(void) +{ + INTERN_STRING(current_allocator, "current_allocator"); + INTERN_STRING(array, "__array__"); + INTERN_STRING(array_function, "__array_function__"); + INTERN_STRING(array_struct, "__array_struct__"); + INTERN_STRING(array_priority, "__array_priority__"); + INTERN_STRING(array_interface, "__array_interface__"); + INTERN_STRING(array_ufunc, "__array_ufunc__"); + INTERN_STRING(array_wrap, "__array_wrap__"); + INTERN_STRING(array_finalize, "__array_finalize__"); + INTERN_STRING(implementation, "_implementation"); + INTERN_STRING(axis1, "axis1"); + INTERN_STRING(axis2, "axis2"); + INTERN_STRING(item, "item"); + INTERN_STRING(like, "like"); + INTERN_STRING(numpy, "numpy"); + INTERN_STRING(where, "where"); + INTERN_STRING(convert, "convert"); + INTERN_STRING(preserve, "preserve"); + INTERN_STRING(convert_if_no_array, "convert_if_no_array"); + INTERN_STRING(cpu, "cpu"); + INTERN_STRING(dtype, "dtype"); + INTERN_STRING( + array_err_msg_substr, + "__array__() got an unexpected keyword argument 'copy'"); + INTERN_STRING(out, "out"); + INTERN_STRING(errmode_strings[0], "ignore"); + INTERN_STRING(errmode_strings[1], "warn"); + INTERN_STRING(errmode_strings[2], "raise"); + INTERN_STRING(errmode_strings[3], "call"); + INTERN_STRING(errmode_strings[4], "print"); + INTERN_STRING(errmode_strings[5], "log"); + INTERN_STRING(__dlpack__, "__dlpack__"); + INTERN_STRING(pyvals_name, "UFUNC_PYVALS_NAME"); + INTERN_STRING(legacy, "legacy"); + return 0; +} + +#define IMPORT_GLOBAL(base_path, name, object) \ + assert(object == NULL); \ + object = npy_import(base_path, name); \ + if (object == NULL) { \ + return -1; \ + } + + +/* + * Initializes global constants. + * + * All global constants should live inside the npy_static_pydata + * struct. + * + * Not all entries in the struct are initialized here, some are + * initialized later but care must be taken in those cases to initialize + * the constant in a thread-safe manner, ensuring it is initialized + * exactly once. + * + * Anything initialized here is initialized during module import which + * the python interpreter ensures is done in a single thread. + * + * Anything imported here should not need the C-layer at all and will be + * imported before anything on the C-side is initialized. + */ +NPY_NO_EXPORT int +initialize_static_globals(void) +{ + /* + * Initialize contents of npy_static_pydata struct + * + * This struct holds cached references to python objects + * that we want to keep alive for the lifetime of the + * module for performance reasons + */ + + IMPORT_GLOBAL("math", "floor", + npy_static_pydata.math_floor_func); + + IMPORT_GLOBAL("math", "ceil", + npy_static_pydata.math_ceil_func); + + IMPORT_GLOBAL("math", "trunc", + npy_static_pydata.math_trunc_func); + + IMPORT_GLOBAL("math", "gcd", + npy_static_pydata.math_gcd_func); + + IMPORT_GLOBAL("numpy.exceptions", "AxisError", + npy_static_pydata.AxisError); + + IMPORT_GLOBAL("numpy.exceptions", "ComplexWarning", + npy_static_pydata.ComplexWarning); + + IMPORT_GLOBAL("numpy.exceptions", "DTypePromotionError", + npy_static_pydata.DTypePromotionError); + + IMPORT_GLOBAL("numpy.exceptions", "TooHardError", + npy_static_pydata.TooHardError); + + IMPORT_GLOBAL("numpy.exceptions", "VisibleDeprecationWarning", + npy_static_pydata.VisibleDeprecationWarning); + + IMPORT_GLOBAL("numpy._globals", "_CopyMode", + npy_static_pydata._CopyMode); + + IMPORT_GLOBAL("numpy._globals", "_NoValue", + npy_static_pydata._NoValue); + + IMPORT_GLOBAL("numpy._core._exceptions", "_ArrayMemoryError", + npy_static_pydata._ArrayMemoryError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncBinaryResolutionError", + npy_static_pydata._UFuncBinaryResolutionError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncInputCastingError", + npy_static_pydata._UFuncInputCastingError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncNoLoopError", + npy_static_pydata._UFuncNoLoopError); + + IMPORT_GLOBAL("numpy._core._exceptions", "_UFuncOutputCastingError", + npy_static_pydata._UFuncOutputCastingError); + + IMPORT_GLOBAL("numpy._core.printoptions", "format_options", + npy_static_pydata.format_options); + + IMPORT_GLOBAL("os", "fspath", + npy_static_pydata.os_fspath); + + IMPORT_GLOBAL("os", "PathLike", + npy_static_pydata.os_PathLike); + + // default_truediv_type_tup + PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); + npy_static_pydata.default_truediv_type_tup = + PyTuple_Pack(3, tmp, tmp, tmp); + Py_DECREF(tmp); + if (npy_static_pydata.default_truediv_type_tup == NULL) { + return -1; + } + + npy_static_pydata.kwnames_is_copy = Py_BuildValue("(s)", "copy"); + if (npy_static_pydata.kwnames_is_copy == NULL) { + return -1; + } + + npy_static_pydata.one_obj = PyLong_FromLong((long) 1); + if (npy_static_pydata.one_obj == NULL) { + return -1; + } + + npy_static_pydata.zero_obj = PyLong_FromLong((long) 0); + if (npy_static_pydata.zero_obj == NULL) { + return -1; + } + + /* + * Initialize contents of npy_static_cdata struct + * + * Note that some entries are initialized elsewhere. Care + * must be taken to ensure all entries are initialized during + * module initialization and immutable thereafter. + * + * This struct holds global static caches. These are set + * up this way for performance reasons. + */ + + PyObject *flags = PySys_GetObject("flags"); /* borrowed object */ + if (flags == NULL) { + PyErr_SetString(PyExc_AttributeError, "cannot get sys.flags"); + return -1; + } + PyObject *level = PyObject_GetAttrString(flags, "optimize"); + if (level == NULL) { + return -1; + } + npy_static_cdata.optimize = PyLong_AsLong(level); + Py_DECREF(level); + + /* + * see unpack_bits for how this table is used. + * + * LUT for bigendian bitorder, littleendian is handled via + * byteswapping in the loop. + * + * 256 8 byte blocks representing 8 bits expanded to 1 or 0 bytes + */ + npy_intp j; + for (j=0; j < 256; j++) { + npy_intp k; + for (k=0; k < 8; k++) { + npy_uint8 v = (j & (1 << k)) == (1 << k); + npy_static_cdata.unpack_lookup_big[j].bytes[7 - k] = v; + } + } + + return 0; +} + + +/* + * Verifies all entries in npy_interned_str and npy_static_pydata are + * non-NULL. + * + * Called at the end of initialization for _multiarray_umath. Some + * entries are initialized outside of this file because they depend on + * items that are initialized late in module initialization but they + * should all be initialized by the time this function is called. + */ +NPY_NO_EXPORT int +verify_static_structs_initialized(void) { + // verify all entries in npy_interned_str are filled in + for (int i=0; i < (sizeof(npy_interned_str_struct)/sizeof(PyObject *)); i++) { + if (*(((PyObject **)&npy_interned_str) + i) == NULL) { + PyErr_Format( + PyExc_SystemError, + "NumPy internal error: NULL entry detected in " + "npy_interned_str at index %d", i); + return -1; + } + } + + // verify all entries in npy_static_pydata are filled in + for (int i=0; i < (sizeof(npy_static_pydata_struct)/sizeof(PyObject *)); i++) { + if (*(((PyObject **)&npy_static_pydata) + i) == NULL) { + PyErr_Format( + PyExc_SystemError, + "NumPy internal error: NULL entry detected in " + "npy_static_pydata at index %d", i); + return -1; + } + } + return 0; +} diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h new file mode 100644 index 000000000000..277e4be1eaff --- /dev/null +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -0,0 +1,170 @@ +#ifndef NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ +#define NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ + +NPY_NO_EXPORT int +initialize_static_globals(void); + +NPY_NO_EXPORT int +intern_strings(void); + +NPY_NO_EXPORT int +verify_static_structs_initialized(void); + +typedef struct npy_interned_str_struct { + PyObject *current_allocator; + PyObject *array; + PyObject *array_function; + PyObject *array_struct; + PyObject *array_priority; + PyObject *array_interface; + PyObject *array_wrap; + PyObject *array_finalize; + PyObject *array_ufunc; + PyObject *implementation; + PyObject *axis1; + PyObject *axis2; + PyObject *item; + PyObject *like; + PyObject *numpy; + PyObject *where; + PyObject *convert; + PyObject *preserve; + PyObject *convert_if_no_array; + PyObject *cpu; + PyObject *dtype; + PyObject *array_err_msg_substr; + PyObject *out; + PyObject *errmode_strings[6]; + PyObject *__dlpack__; + PyObject *pyvals_name; + PyObject *legacy; +} npy_interned_str_struct; + +/* + * A struct that stores static global data used throughout + * _multiarray_umath, mostly to cache results that would be + * prohibitively expensive to compute at runtime in a tight loop. + * + * All items in this struct should be initialized during module + * initialization and thereafter should be immutable. Mutating items in + * this struct after module initialization is likely not thread-safe. + */ + +typedef struct npy_static_pydata_struct { + /* + * Used in ufunc_type_resolution.c to avoid reconstructing a tuple + * storing the default true division return types. + */ + PyObject *default_truediv_type_tup; + + /* + * Used to set up the default extobj context variable + */ + PyObject *default_extobj_capsule; + + /* + * The global ContextVar to store the extobject. It is exposed to Python + * as `_extobj_contextvar`. + */ + PyObject *npy_extobj_contextvar; + + /* + * A reference to ndarray's implementations for __array_*__ special methods + */ + PyObject *ndarray_array_ufunc; + PyObject *ndarray_array_finalize; + PyObject *ndarray_array_function; + + /* + * References to the '1' and '0' PyLong objects + */ + PyObject *one_obj; + PyObject *zero_obj; + + /* + * Reference to an np.array(0, dtype=np.long) instance + */ + PyObject *zero_pyint_like_arr; + + /* + * References to items obtained via an import at module initialization + */ + PyObject *AxisError; + PyObject *ComplexWarning; + PyObject *DTypePromotionError; + PyObject *TooHardError; + PyObject *VisibleDeprecationWarning; + PyObject *_CopyMode; + PyObject *_NoValue; + PyObject *_ArrayMemoryError; + PyObject *_UFuncBinaryResolutionError; + PyObject *_UFuncInputCastingError; + PyObject *_UFuncNoLoopError; + PyObject *_UFuncOutputCastingError; + PyObject *math_floor_func; + PyObject *math_ceil_func; + PyObject *math_trunc_func; + PyObject *math_gcd_func; + PyObject *os_PathLike; + PyObject *os_fspath; + PyObject *format_options; + + /* + * Used in the __array__ internals to avoid building a tuple inline + */ + PyObject *kwnames_is_copy; + + /* + * Used in __imatmul__ to avoid building tuples inline + */ + PyObject *axes_1d_obj_kwargs; + PyObject *axes_2d_obj_kwargs; + + /* + * Used for CPU feature detection and dispatch + */ + PyObject *cpu_dispatch_registry; + + /* + * references to ArrayMethod implementations that are cached + * to avoid repeatedly creating them + */ + PyObject *VoidToGenericMethod; + PyObject *GenericToVoidMethod; + PyObject *ObjectToGenericMethod; + PyObject *GenericToObjectMethod; +} npy_static_pydata_struct; + + +typedef struct npy_static_cdata_struct { + /* + * stores sys.flags.optimize as a long, which is used in the add_docstring + * implementation + */ + long optimize; + + /* + * LUT used by unpack_bits + */ + union { + npy_uint8 bytes[8]; + npy_uint64 uint64; + } unpack_lookup_big[256]; + + /* + * A look-up table to recover integer type numbers from type characters. + * + * See the _MAX_LETTER and LETTER_TO_NUM macros in arraytypes.c.src. + * + * The smallest type number is ?, the largest is bounded by 'z'. + * + * This is initialized alongside the built-in dtypes + */ + npy_int16 _letter_to_num['z' + 1 - '?']; +} npy_static_cdata_struct; + +NPY_VISIBILITY_HIDDEN extern npy_interned_str_struct npy_interned_str; +NPY_VISIBILITY_HIDDEN extern npy_static_pydata_struct npy_static_pydata; +NPY_VISIBILITY_HIDDEN extern npy_static_cdata_struct npy_static_cdata; + +#endif // NUMPY_CORE_SRC_MULTIARRAY_STATIC_DATA_H_ diff --git a/numpy/_core/src/multiarray/number.c b/numpy/_core/src/multiarray/number.c index d42d23a281ea..f537d2b68e41 100644 --- a/numpy/_core/src/multiarray/number.c +++ b/numpy/_core/src/multiarray/number.c @@ -23,6 +23,10 @@ **************** Implement Number Protocol **************************** *************************************************************************/ +// this is not in the global data struct to avoid needing to include the +// definition of the NumericOps struct in multiarraymodule.h +// +// it is filled in during module initialization in a thread-safe manner NPY_NO_EXPORT NumericOps n_ops; /* NB: static objects initialized to zero */ /* @@ -60,24 +64,25 @@ array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2); * Those not present will not be changed */ -/* FIXME - macro contains a return */ -#define SET(op) temp = _PyDict_GetItemStringWithError(dict, #op); \ - if (temp == NULL && PyErr_Occurred()) { \ +/* FIXME - macro contains returns */ +#define SET(op) \ + res = PyDict_GetItemStringRef(dict, #op, &temp); \ + if (res == -1) { \ return -1; \ } \ - else if (temp != NULL) { \ + else if (res == 1) { \ if (!(PyCallable_Check(temp))) { \ + Py_DECREF(temp); \ return -1; \ } \ - Py_INCREF(temp); \ - Py_XDECREF(n_ops.op); \ - n_ops.op = temp; \ + Py_XSETREF(n_ops.op, temp); \ } NPY_NO_EXPORT int _PyArray_SetNumericOps(PyObject *dict) { PyObject *temp = NULL; + int res; SET(add); SET(subtract); SET(multiply); @@ -117,6 +122,20 @@ _PyArray_SetNumericOps(PyObject *dict) SET(conjugate); SET(matmul); SET(clip); + + // initialize static globals needed for matmul + npy_static_pydata.axes_1d_obj_kwargs = Py_BuildValue( + "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); + if (npy_static_pydata.axes_1d_obj_kwargs == NULL) { + return -1; + } + + npy_static_pydata.axes_2d_obj_kwargs = Py_BuildValue( + "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); + if (npy_static_pydata.axes_2d_obj_kwargs == NULL) { + return -1; + } + return 0; } @@ -267,15 +286,15 @@ array_matrix_multiply(PyObject *m1, PyObject *m2) static PyObject * array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) { - static PyObject *AxisError_cls = NULL; - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return NULL; - } - INPLACE_GIVE_UP_IF_NEEDED(self, other, nb_inplace_matrix_multiply, array_inplace_matrix_multiply); + PyObject *args = PyTuple_Pack(3, self, other, self); + if (args == NULL) { + return NULL; + } + PyObject *kwargs; + /* * Unlike `matmul(a, b, out=a)` we ensure that the result is not broadcast * if the result without `out` would have less dimensions than `a`. @@ -285,33 +304,11 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * The error here will be confusing, but for now, we enforce this by * passing the correct `axes=`. */ - static PyObject *axes_1d_obj_kwargs = NULL; - static PyObject *axes_2d_obj_kwargs = NULL; - if (NPY_UNLIKELY(axes_1d_obj_kwargs == NULL)) { - axes_1d_obj_kwargs = Py_BuildValue( - "{s, [(i), (i, i), (i)]}", "axes", -1, -2, -1, -1); - if (axes_1d_obj_kwargs == NULL) { - return NULL; - } - } - if (NPY_UNLIKELY(axes_2d_obj_kwargs == NULL)) { - axes_2d_obj_kwargs = Py_BuildValue( - "{s, [(i, i), (i, i), (i, i)]}", "axes", -2, -1, -2, -1, -2, -1); - if (axes_2d_obj_kwargs == NULL) { - return NULL; - } - } - - PyObject *args = PyTuple_Pack(3, self, other, self); - if (args == NULL) { - return NULL; - } - PyObject *kwargs; if (PyArray_NDIM(self) == 1) { - kwargs = axes_1d_obj_kwargs; + kwargs = npy_static_pydata.axes_1d_obj_kwargs; } else { - kwargs = axes_2d_obj_kwargs; + kwargs = npy_static_pydata.axes_2d_obj_kwargs; } PyObject *res = PyObject_Call(n_ops.matmul, args, kwargs); Py_DECREF(args); @@ -321,7 +318,7 @@ array_inplace_matrix_multiply(PyArrayObject *self, PyObject *other) * AxisError should indicate that the axes argument didn't work out * which should mean the second operand not being 2 dimensional. */ - if (PyErr_ExceptionMatches(AxisError_cls)) { + if (PyErr_ExceptionMatches(npy_static_pydata.AxisError)) { PyErr_SetString(PyExc_ValueError, "inplace matrix multiplication requires the first operand to " "have at least one and the second at least two dimensions."); diff --git a/numpy/_core/src/multiarray/public_dtype_api.c b/numpy/_core/src/multiarray/public_dtype_api.c index 73ab8a6b9f92..9b2d7a393842 100644 --- a/numpy/_core/src/multiarray/public_dtype_api.c +++ b/numpy/_core/src/multiarray/public_dtype_api.c @@ -71,7 +71,9 @@ PyArrayInitDTypeMeta_FromSpec( return -1; } - dtypemeta_initialize_struct_from_spec(DType, spec, 0); + if (dtypemeta_initialize_struct_from_spec(DType, spec, 0) < 0) { + return -1; + } if (NPY_DT_SLOTS(DType)->setitem == NULL || NPY_DT_SLOTS(DType)->getitem == NULL) { @@ -169,10 +171,15 @@ _fill_dtype_api(void *full_api_table[]) api_table[33] = &PyArray_ObjectDType; api_table[34] = &PyArray_VoidDType; /* Abstract */ - api_table[35] = &PyArray_PyIntAbstractDType; - api_table[36] = &PyArray_PyFloatAbstractDType; - api_table[37] = &PyArray_PyComplexAbstractDType; + api_table[35] = &PyArray_PyLongDType; + api_table[36] = &PyArray_PyFloatDType; + api_table[37] = &PyArray_PyComplexDType; api_table[38] = &PyArray_DefaultIntDType; /* Non-legacy DTypes that are built in to NumPy */ api_table[39] = &PyArray_StringDType; + + /* Abstract ones added directly: */ + full_api_table[366] = &PyArray_IntAbstractDType; + full_api_table[367] = &PyArray_FloatAbstractDType; + full_api_table[368] = &PyArray_ComplexAbstractDType; } diff --git a/numpy/_core/src/multiarray/refcount.c b/numpy/_core/src/multiarray/refcount.c index df16452ab283..571b50372684 100644 --- a/numpy/_core/src/multiarray/refcount.c +++ b/numpy/_core/src/multiarray/refcount.c @@ -18,10 +18,10 @@ #include "iterators.h" #include "dtypemeta.h" #include "refcount.h" - #include "npy_config.h" +#include "templ_common.h" /* for npy_mul_sizes_with_overflow */ + -#include "npy_pycompat.h" /* * Helper function to clear a strided memory (normally or always contiguous) @@ -56,6 +56,53 @@ PyArray_ClearBuffer( } +/* + * Helper function to zero an array buffer. + * + * Here "zeroing" means an abstract zeroing operation, implementing the + * the behavior of `np.zeros`. E.g. for an of references this is more + * complicated than zero-filling the buffer. + * + * Failure (returns -1) indicates some sort of programming or logical + * error and should not happen for a data type that has been set up + * correctly. In principle a sufficiently weird dtype might run out of + * memory but in practice this likely won't happen. + */ +NPY_NO_EXPORT int +PyArray_ZeroContiguousBuffer( + PyArray_Descr *descr, char *data, + npy_intp stride, npy_intp size, int aligned) +{ + NPY_traverse_info zero_info; + NPY_traverse_info_init(&zero_info); + /* Flags unused: float errors do not matter and we do not release GIL */ + NPY_ARRAYMETHOD_FLAGS flags_unused; + PyArrayMethod_GetTraverseLoop *get_fill_zero_loop = + NPY_DT_SLOTS(NPY_DTYPE(descr))->get_fill_zero_loop; + if (get_fill_zero_loop != NULL) { + if (get_fill_zero_loop( + NULL, descr, aligned, descr->elsize, &(zero_info.func), + &(zero_info.auxdata), &flags_unused) < 0) { + return -1; + } + } + else { + assert(zero_info.func == NULL); + } + if (zero_info.func == NULL) { + /* the multiply here should never overflow, since we already + checked if the new array size doesn't overflow */ + memset(data, 0, size*stride); + return 0; + } + + int res = zero_info.func( + NULL, descr, data, size, stride, zero_info.auxdata); + NPY_traverse_info_xfree(&zero_info); + return res; +} + + /* * Helper function to clear whole array. It seems plausible that we should * be able to get away with assuming the array is contiguous. diff --git a/numpy/_core/src/multiarray/refcount.h b/numpy/_core/src/multiarray/refcount.h index d9f472b2697e..41c428f321e4 100644 --- a/numpy/_core/src/multiarray/refcount.h +++ b/numpy/_core/src/multiarray/refcount.h @@ -6,6 +6,11 @@ PyArray_ClearBuffer( PyArray_Descr *descr, char *data, npy_intp stride, npy_intp size, int aligned); +NPY_NO_EXPORT int +PyArray_ZeroContiguousBuffer( + PyArray_Descr *descr, char *data, + npy_intp stride, npy_intp size, int aligned); + NPY_NO_EXPORT int PyArray_ClearArray(PyArrayObject *arr); diff --git a/numpy/_core/src/multiarray/scalarapi.c b/numpy/_core/src/multiarray/scalarapi.c index a4ed781142ad..9ca83d8a57f5 100644 --- a/numpy/_core/src/multiarray/scalarapi.c +++ b/numpy/_core/src/multiarray/scalarapi.c @@ -12,7 +12,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "array_coercion.h" #include "ctors.h" diff --git a/numpy/_core/src/multiarray/scalartypes.c.src b/numpy/_core/src/multiarray/scalartypes.c.src index 17098af5d3a6..689e16730cc0 100644 --- a/numpy/_core/src/multiarray/scalartypes.c.src +++ b/numpy/_core/src/multiarray/scalartypes.c.src @@ -21,6 +21,7 @@ #include "ctors.h" #include "dtypemeta.h" #include "usertypes.h" +#include "number.h" #include "numpyos.h" #include "can_cast_table.h" #include "common.h" @@ -33,14 +34,14 @@ #include "dragon4.h" #include "npy_longdouble.h" #include "npy_buffer.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" +#include "array_api_standard.h" #include #include "binop_override.h" -/* determines if legacy mode is enabled, global set in multiarraymodule.c */ -extern int npy_legacy_print_mode; - /* * used for allocating a single scalar, so use the default numpy * memory allocators instead of the (maybe) user overrides @@ -121,19 +122,6 @@ gentype_free(PyObject *v) } -static PyObject * -gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) -{ - if (modulo != Py_None) { - /* modular exponentiation is not implemented (gh-8804) */ - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; - } - - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_power, gentype_power); - return PyArray_Type.tp_as_number->nb_power(m1, m2, Py_None); -} - static PyObject * gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, char *str) @@ -165,33 +153,216 @@ gentype_generic_method(PyObject *self, PyObject *args, PyObject *kwds, } } -static PyObject * -gentype_add(PyObject *m1, PyObject* m2) -{ - /* special case str.__radd__, which should not call array_add */ - if (PyBytes_Check(m1) || PyUnicode_Check(m1)) { - Py_INCREF(Py_NotImplemented); - return Py_NotImplemented; + +/* + * Helper function to deal with binary operator deferral. Must be passed a + * valid self (a generic scalar) and an other item. + * May fill self_item and/or other_arr (but not both) with non-NULL values. + * + * Why this dance? When the other object is a exactly Python scalar something + * awkward happens historically in NumPy. + * NumPy doesn't define a result, but the ufunc would cast to `astype(object)` + * which is the same as `scalar.item()`. And that operation converts e.g. + * float32 or float64 to Python floats. + * It then retries. And because it is a builtin type now the operation may + * succeed. + * + * This retrying pass only makes sense if the other object is a Python + * scalar (otherwise we fill in `other_arr` which can be used to call the + * ufunc). + * Additionally, if `self.item()` has the same type as `self` we would end up + * in an infinite recursion. + * + * So the result of this function means the following: + * - < 0 error return. + * - self_op is filled in: Retry the Python operator. + * - other_op is filled in: Use the array operator (goes into ufuncs) + * (This may be the original generic if it is one.) + * - neither is filled in: Return NotImplemented. + * + * It is not possible for both to be filled. If `other` is also a generics, + * it is returned. + */ +static inline int +find_binary_operation_path( + PyObject *self, PyObject *other, PyObject **self_op, PyObject **other_op) +{ + *other_op = NULL; + *self_op = NULL; + + if (PyArray_IsScalar(other, Generic) || + PyLong_CheckExact(other) || + PyFloat_CheckExact(other) || + PyComplex_CheckExact(other) || + PyBool_Check(other) || + PyArray_Check(other)) { + /* + * The other operand is ready for the operation already. Must pass on + * on float/long/complex mainly for weak promotion (NEP 50). + */ + *other_op = Py_NewRef(other); + return 0; + } + /* + * If other has __array_ufunc__ always use ufunc. If array-ufunc was None + * we already deferred. And any custom object with array-ufunc cannot call + * our ufuncs without preventing recursion. + * It may be nice to avoid double lookup in `BINOP_GIVE_UP_IF_NEEDED`. + */ + PyObject *attr = PyArray_LookupSpecial(other, npy_interned_str.array_ufunc); + if (attr != NULL) { + Py_DECREF(attr); + *other_op = Py_NewRef(other); + return 0; + } + else if (PyErr_Occurred()) { + PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */ + } + + /* + * Now check `other`. We want to know whether it is an object scalar + * and the easiest way is by converting to an array here. + */ + int was_scalar; + PyArrayObject *arr = (PyArrayObject *)PyArray_FromAny_int( + other, NULL, NULL, 0, 0, 0, NULL, &was_scalar); + if (arr == NULL) { + return -1; + } + + if (!was_scalar || PyArray_DESCR(arr)->type_num != NPY_OBJECT) { + /* + * The array is OK for usage and we can simply forward it. There + * is a theoretical subtlety here: If the other object implements + * `__array_wrap__`, we may ignore that. However, this only matters + * if the other object has the identical `__array_priority__` and + * additionally already deferred back to us. + * (`obj + scalar` and `scalar + obj` are not symmetric.) + * + * NOTE: Future NumPy may need to distinguish scalars here, one option + * could be marking the array. + */ + *other_op = (PyObject *)arr; + return 0; + } + Py_DECREF(arr); + + /* + * If we are here, we need to operate on Python scalars. In general + * that would just fails since NumPy doesn't know the other object! + * + * However, NumPy (historically) made this often work magically because + * ufuncs for object dtype end up casting to object with `.item()`. This in + * turn ofthen returns a Python type (e.g. float for float32, float64)! + * Retrying then succeeds. So if (and only if) `self.item()` returns a new + * type, we can safely attempt the operation (again) with that. + */ + PyObject *self_item = PyObject_CallMethodNoArgs(self, npy_interned_str.item); + if (self_item == NULL) { + return -1; + } + if (Py_TYPE(self_item) != Py_TYPE(self)) { + /* self_item can be used to retry the operation */ + *self_op = self_item; + return 0; } - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_add, gentype_add); - return PyArray_Type.tp_as_number->nb_add(m1, m2); + /* The operation can't work and we will return NotImplemented */ + Py_DECREF(self_item); + return 0; } + +/* + * These are defined below as they require special handling, we still define + * a _gen version here. `power` is special as it has three arguments. + */ +static PyObject * +gentype_add(PyObject *m1, PyObject *m2); + +static PyObject * +gentype_multiply(PyObject *m1, PyObject *m2); + + /**begin repeat * - * #name = subtract, remainder, divmod, lshift, rshift, - * and, xor, or, floor_divide, true_divide# + * #name = add, multiply, subtract, remainder, divmod, + * lshift, rshift, and, xor, or, floor_divide, true_divide# + * #ufunc = add, multiply, subtract, remainder, divmod, + * left_shift, right_shift, bitwise_and, bitwise_xor, bitwise_or, + * floor_divide, true_divide# + * #func = Add, Multiply, Subtract, Remainder, Divmod, + * Lshift, Rshift, And, Xor, Or, FloorDivide, TrueDivide# + * #suff = _gen, _gen,,,,,,,,,,# */ +/* NOTE: We suffix the name for functions requiring special handling first. */ static PyObject * -gentype_@name@(PyObject *m1, PyObject *m2) +gentype_@name@@suff@(PyObject *m1, PyObject *m2) { BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_@name@, gentype_@name@); - return PyArray_Type.tp_as_number->nb_@name@(m1, m2); + + PyObject *self = NULL; + PyObject *other = NULL; + PyObject *self_op, *other_op; + + if (!PyArray_IsScalar(m2, Generic)) { + self = m1; + other = m2; + } + else { + self = m2; + other = m1; + } + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { + return NULL; + } + if (self_op != NULL) { + PyObject *res; + if (self == m1) { + res = PyNumber_@func@(self_op, m2); + } + else { + res = PyNumber_@func@(m1, self_op); + } + Py_DECREF(self_op); + return res; + } + else if (other_op != NULL) { + /* Call the corresponding ufunc (with the array) */ + PyObject *res; + if (self == m1) { + res = PyArray_GenericBinaryFunction(m1, other_op, n_ops.@ufunc@); + } + else { + res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.@ufunc@); + } + Py_DECREF(other_op); + return res; + } + else { + assert(other_op == NULL); + Py_RETURN_NOTIMPLEMENTED; + } } /**end repeat**/ -/* Get a nested slot, or NULL if absent */ +/* + * The following operators use the above, but require specialization. + */ + +static PyObject * +gentype_add(PyObject *m1, PyObject *m2) +{ + /* special case str.__radd__, which should not call array_add */ + if (PyBytes_Check(m1) || PyUnicode_Check(m1)) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + return gentype_add_gen(m1, m2); +} + +/* Get a nested slot, or NULL if absent (for multiply implementation) */ #define GET_NESTED_SLOT(type, group, slot) \ ((type)->group == NULL ? NULL : (type)->group->slot) @@ -220,11 +391,75 @@ gentype_multiply(PyObject *m1, PyObject *m2) Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } - /* All normal cases are handled by PyArray's multiply */ - BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_multiply, gentype_multiply); - return PyArray_Type.tp_as_number->nb_multiply(m1, m2); + + return gentype_multiply_gen(m1, m2); } + +/* + * NOTE: The three argument nature of power requires code duplication here. + */ +static PyObject * +gentype_power(PyObject *m1, PyObject *m2, PyObject *modulo) +{ + if (modulo != Py_None) { + /* modular exponentiation is not implemented (gh-8804) */ + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + + BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_power, gentype_power); + + PyObject *self = NULL; + PyObject *other = NULL; + PyObject *self_op, *other_op; + + if (!PyArray_IsScalar(m2, Generic)) { + self = m1; + other = m2; + } + else { + self = m2; + other = m1; + } + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { + return NULL; + } + if (self_op != NULL) { + PyObject *res; + if (self == m1) { + res = PyNumber_Power(self_op, m2, Py_None); + } + else { + res = PyNumber_Power(m1, self_op, Py_None); + } + Py_DECREF(self_op); + return res; + } + else if (other_op != NULL) { + /* Call the corresponding ufunc (with the array) + * NOTE: As of NumPy 2.0 there are inconsistencies in array_power + * calling it would fail a (niche) test because an array is + * returned in one of the fast-paths. + * (once NumPy propagates 0-D arrays, this is irrelevant) + */ + PyObject *res; + if (self == m1) { + res = PyArray_GenericBinaryFunction(m1, other_op, n_ops.power); + } + else { + res = PyArray_GenericBinaryFunction(other_op, m2, n_ops.power); + } + Py_DECREF(other_op); + return res; + } + else { + assert(other_op == NULL); + Py_RETURN_NOTIMPLEMENTED; + } +} + + /**begin repeat * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, * LONG, ULONG, LONGLONG, ULONGLONG# @@ -337,16 +572,20 @@ genint_type_repr(PyObject *self) if (value_string == NULL) { return NULL; } - if (npy_legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { return value_string; } int num = _typenum_fromtypeobj((PyObject *)Py_TYPE(self), 0); PyObject *repr; - if (num == 0) { + if (num == NPY_NOTYPE) { /* Not a builtin scalar (presumably), just use the name */ - repr = PyUnicode_FromFormat("%S(%S)", Py_TYPE(self)->tp_name, value_string); + repr = PyUnicode_FromFormat("%s(%S)", Py_TYPE(self)->tp_name, value_string); Py_DECREF(value_string); return repr; } @@ -374,7 +613,11 @@ genbool_type_str(PyObject *self) static PyObject * genbool_type_repr(PyObject *self) { - if (npy_legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { return genbool_type_str(self); } return PyUnicode_FromString( @@ -500,7 +743,11 @@ stringtype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.bytes_(%S)", ret)); } #endif /* IS_repr */ @@ -547,7 +794,11 @@ unicodetype_@form@(PyObject *self) if (ret == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(ret, PyUnicode_FromFormat("np.str_(%S)", ret)); } #endif /* IS_repr */ @@ -609,14 +860,14 @@ _void_to_hex(const char* argbuf, const Py_ssize_t arglen, static PyObject * _void_scalar_to_string(PyObject *obj, int repr) { - static PyObject *tostring_func = NULL; - npy_cache_import("numpy._core.arrayprint", - "_void_scalar_to_string", &tostring_func); - if (tostring_func == NULL) { + if (npy_cache_import_runtime( + "numpy._core.arrayprint", "_void_scalar_to_string", + &npy_runtime_imports._void_scalar_to_string) == -1) { return NULL; } PyObject *is_repr = repr ? Py_True : Py_False; - return PyObject_CallFunctionObjArgs(tostring_func, obj, is_repr, NULL); + return PyObject_CallFunctionObjArgs( + npy_runtime_imports._void_scalar_to_string, obj, is_repr, NULL); } static PyObject * @@ -627,7 +878,11 @@ voidtype_repr(PyObject *self) /* Python helper checks for the legacy mode printing */ return _void_scalar_to_string(self, 1); } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { return _void_to_hex(s->obval, s->descr->elsize, "np.void(b'", "\\x", "')"); } else { @@ -679,7 +934,11 @@ datetimetype_repr(PyObject *self) */ if ((scal->obmeta.num == 1 && scal->obmeta.base != NPY_FR_h) || scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s')", iso); } else { @@ -691,7 +950,11 @@ datetimetype_repr(PyObject *self) if (meta == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.datetime64('%s','%S')", iso, meta); } else { @@ -735,7 +998,11 @@ timedeltatype_repr(PyObject *self) /* The metadata unit */ if (scal->obmeta.base == NPY_FR_GENERIC) { - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S)", val); } else { @@ -748,7 +1015,11 @@ timedeltatype_repr(PyObject *self) Py_DECREF(val); return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { ret = PyUnicode_FromFormat("np.timedelta64(%S,'%S')", val, meta); } else { @@ -1050,7 +1321,11 @@ static PyObject * npy_bool sign) { - if (npy_legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_@name@_format@kind@(val); } @@ -1081,7 +1356,11 @@ static PyObject * if (string == NULL) { return NULL; } - if (npy_legacy_print_mode > 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { Py_SETREF(string, PyUnicode_FromFormat("@repr_format@", string)); } #endif /* IS_repr */ @@ -1096,7 +1375,11 @@ c@name@type_@kind@(PyObject *self) npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@); TrimMode trim = TrimMode_DptZeros; - if (npy_legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_c@name@_format@kind@(val); } @@ -1109,7 +1392,11 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str ret = PyUnicode_FromFormat("%Sj", istr); #else /* IS_repr */ - if (npy_legacy_print_mode <= 125) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 125) { ret = PyUnicode_FromFormat("%Sj", istr); } else { @@ -1157,7 +1444,11 @@ c@name@type_@kind@(PyObject *self) #ifdef IS_str string = PyUnicode_FromFormat("(%S%Sj)", rstr, istr); #else /* IS_repr */ - if (npy_legacy_print_mode > 125) { + legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode > 125) { string = PyUnicode_FromFormat("@crepr_format@", rstr, istr); } else { @@ -1182,7 +1473,11 @@ halftype_@kind@(PyObject *self) float floatval = npy_half_to_float(val); float absval; - if (npy_legacy_print_mode <= 113) { + int legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (legacy_print_mode <= 113) { return legacy_float_format@kind@(floatval); } @@ -1198,10 +1493,16 @@ halftype_@kind@(PyObject *self) #ifdef IS_str return string; #else - if (string == NULL || npy_legacy_print_mode <= 125) { + legacy_print_mode = get_legacy_print_mode(); + if (legacy_print_mode == -1) { + return NULL; + } + if (string == NULL || legacy_print_mode <= 125) { return string; } - return PyUnicode_FromFormat("np.float16(%S)", string); + PyObject *res = PyUnicode_FromFormat("np.float16(%S)", string); + Py_DECREF(string); + return res; #endif } @@ -1263,8 +1564,6 @@ static PyNumberMethods gentype_as_number = { static PyObject * gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) { - PyObject *arr, *ret; - /* * If the other object is None, False is always right. This avoids * the array None comparison, at least until deprecation it is fixed. @@ -1285,17 +1584,35 @@ gentype_richcompare(PyObject *self, PyObject *other, int cmp_op) RICHCMP_GIVE_UP_IF_NEEDED(self, other); - arr = PyArray_FromScalar(self, NULL); - if (arr == NULL) { + PyObject *self_op; + PyObject *other_op; + if (find_binary_operation_path(self, other, &self_op, &other_op) < 0) { return NULL; } - /* - * Call via PyObject_RichCompare to ensure that other.__eq__ - * has a chance to run when necessary - */ - ret = PyObject_RichCompare(arr, other, cmp_op); - Py_DECREF(arr); - return ret; + + /* We can always just call RichCompare again */ + if (other_op != NULL) { + /* If we use richcompare again, need to ensure that one op is array */ + self_op = PyArray_FromScalar(self, NULL); + if (self_op == NULL) { + Py_DECREF(other_op); + return NULL; + } + PyObject *res = PyObject_RichCompare(self_op, other_op, cmp_op); + Py_DECREF(self_op); + Py_DECREF(other_op); + return res; + } + else if (self_op != NULL) { + /* Try again, since other is an object scalar and this one mutated */ + PyObject *res = PyObject_RichCompare(self_op, other, cmp_op); + Py_DECREF(self_op); + return res; + } + else { + /* Comparison with arbitrary objects cannot be defined. */ + Py_RETURN_NOTIMPLEMENTED; + } } static PyObject * @@ -1676,6 +1993,9 @@ static PyGetSetDef gentype_getsets[] = { {"ptp", (getter)gentype_ptp, (setter)0, NULL, NULL}, + {"device", + (getter)array_device, + (setter)0, NULL, NULL}, {"__array_interface__", (getter)gentype_interface_get, NULL, @@ -2449,6 +2769,15 @@ static PyMethodDef gentype_methods[] = { {"setflags", (PyCFunction)gentype_setflags, METH_VARARGS | METH_KEYWORDS, NULL}, + + /* For Array API compatibility */ + {"__array_namespace__", + (PyCFunction)array_array_namespace, + METH_VARARGS | METH_KEYWORDS, NULL}, + {"to_device", + (PyCFunction)array_to_device, + METH_VARARGS | METH_KEYWORDS, NULL}, + {NULL, NULL, 0, NULL} /* sentinel */ }; @@ -3034,13 +3363,7 @@ object_arrtype_alloc(PyTypeObject *type, Py_ssize_t items) * Object scalars should not actually exist, if they exist we should * consider it to be a bug. */ - static PyObject *visibleDeprecationWarning = NULL; - npy_cache_import("numpy", "VisibleDeprecationWarning", - &visibleDeprecationWarning); - if (visibleDeprecationWarning == NULL) { - return NULL; - } - if (PyErr_WarnEx(visibleDeprecationWarning, + if (PyErr_WarnEx(npy_static_pydata.VisibleDeprecationWarning, "Creating a NumPy object scalar. NumPy object scalars should " "never be created. If you see this message please inform the " "NumPy developers. Since this message should never be shown " diff --git a/numpy/_core/src/multiarray/sequence.c b/numpy/_core/src/multiarray/sequence.c index 7bdd64d27e5f..4c94bb798072 100644 --- a/numpy/_core/src/multiarray/sequence.c +++ b/numpy/_core/src/multiarray/sequence.c @@ -10,7 +10,7 @@ #include "npy_config.h" -#include "npy_pycompat.h" + #include "common.h" #include "mapping.h" diff --git a/numpy/_core/src/multiarray/shape.c b/numpy/_core/src/multiarray/shape.c index ede7a617e00b..340fe7289ac8 100644 --- a/numpy/_core/src/multiarray/shape.c +++ b/numpy/_core/src/multiarray/shape.c @@ -7,22 +7,17 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" - #include "numpy/npy_math.h" #include "npy_config.h" - -#include "npy_pycompat.h" - #include "arraywrap.h" #include "ctors.h" - #include "shape.h" - -#include "multiarraymodule.h" /* for interned strings */ +#include "npy_static_data.h" /* for interned strings */ #include "templ_common.h" /* for npy_mul_sizes_with_overflow */ #include "common.h" /* for convert_shape_to_string */ #include "alloc.h" +#include "refcount.h" static int _fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr); @@ -31,9 +26,6 @@ static int _attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims, npy_intp *newstrides, int is_f_order); -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype); - /*NUMPY_API * Resize (reallocate data). Only works if nothing else is referencing this * array and it is contiguous. If refcheck is 0, then the reference count is @@ -141,20 +133,14 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, } if (newnbytes > oldnbytes && PyArray_ISWRITEABLE(self)) { - /* Fill new memory with zeros */ - if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_ITEM_REFCOUNT)) { - PyObject *zero = PyLong_FromLong(0); - char *optr; - optr = PyArray_BYTES(self) + oldnbytes; - npy_intp n_new = newsize - oldsize; - for (npy_intp i = 0; i < n_new; i++) { - _putzero((char *)optr, zero, PyArray_DESCR(self)); - optr += elsize; - } - Py_DECREF(zero); - } - else{ - memset(PyArray_BYTES(self) + oldnbytes, 0, newnbytes - oldnbytes); + /* Fill new memory with zeros (PyLong zero for object arrays) */ + npy_intp stride = elsize; + npy_intp size = newsize - oldsize; + char *data = PyArray_BYTES(self) + oldnbytes; + int aligned = PyArray_ISALIGNED(self); + if (PyArray_ZeroContiguousBuffer(PyArray_DESCR(self), data, + stride, size, aligned) < 0) { + return NULL; } } @@ -201,6 +187,14 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck, NPY_NO_EXPORT PyObject * PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, NPY_ORDER order) +{ + return _reshape_with_copy_arg(self, newdims, order, NPY_COPY_IF_NEEDED); +} + + +NPY_NO_EXPORT PyObject * +_reshape_with_copy_arg(PyArrayObject *array, PyArray_Dims *newdims, + NPY_ORDER order, NPY_COPYMODE copy) { npy_intp i; npy_intp *dimensions = newdims->ptr; @@ -212,7 +206,7 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, int flags; if (order == NPY_ANYORDER) { - order = PyArray_ISFORTRAN(self) ? NPY_FORTRANORDER : NPY_CORDER; + order = PyArray_ISFORTRAN(array) ? NPY_FORTRANORDER : NPY_CORDER; } else if (order == NPY_KEEPORDER) { PyErr_SetString(PyExc_ValueError, @@ -220,56 +214,74 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, return NULL; } /* Quick check to make sure anything actually needs to be done */ - if (ndim == PyArray_NDIM(self)) { + if (ndim == PyArray_NDIM(array) && copy != NPY_COPY_ALWAYS) { same = NPY_TRUE; i = 0; while (same && i < ndim) { - if (PyArray_DIM(self,i) != dimensions[i]) { + if (PyArray_DIM(array, i) != dimensions[i]) { same=NPY_FALSE; } i++; } if (same) { - return PyArray_View(self, NULL, NULL); + return PyArray_View(array, NULL, NULL); } } /* * fix any -1 dimensions and check new-dimensions against old size */ - if (_fix_unknown_dimension(newdims, self) < 0) { + if (_fix_unknown_dimension(newdims, array) < 0) { return NULL; } /* - * sometimes we have to create a new copy of the array - * in order to get the right orientation and - * because we can't just reuse the buffer with the - * data in the order it is in. + * Memory order doesn't depend on a copy/no-copy context. + * 'order' argument is always honored. */ - Py_INCREF(self); - if (((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(self)) || - (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(self)))) { - int success = 0; - success = _attempt_nocopy_reshape(self, ndim, dimensions, - newstrides, order); - if (success) { - /* no need to copy the array after all */ - strides = newstrides; + if (copy == NPY_COPY_ALWAYS) { + PyObject *newcopy = PyArray_NewCopy(array, order); + if (newcopy == NULL) { + return NULL; } - else { - PyObject *newcopy; - newcopy = PyArray_NewCopy(self, order); - Py_DECREF(self); - if (newcopy == NULL) { + array = (PyArrayObject *)newcopy; + } + else { + /* + * sometimes we have to create a new copy of the array + * in order to get the right orientation and + * because we can't just reuse the buffer with the + * data in the order it is in. + */ + Py_INCREF(array); + if (((order == NPY_CORDER && !PyArray_IS_C_CONTIGUOUS(array)) || + (order == NPY_FORTRANORDER && !PyArray_IS_F_CONTIGUOUS(array)))) { + int success = 0; + success = _attempt_nocopy_reshape(array, ndim, dimensions, + newstrides, order); + if (success) { + /* no need to copy the array after all */ + strides = newstrides; + } + else if (copy == NPY_COPY_NEVER) { + PyErr_SetString(PyExc_ValueError, + "Unable to avoid creating a copy while reshaping."); + Py_DECREF(array); return NULL; } - self = (PyArrayObject *)newcopy; + else { + PyObject *newcopy = PyArray_NewCopy(array, order); + Py_DECREF(array); + if (newcopy == NULL) { + return NULL; + } + array = (PyArrayObject *)newcopy; + } } } /* We always have to interpret the contiguous buffer correctly */ /* Make sure the flags argument is set. */ - flags = PyArray_FLAGS(self); + flags = PyArray_FLAGS(array); if (ndim > 1) { if (order == NPY_FORTRANORDER) { flags &= ~NPY_ARRAY_C_CONTIGUOUS; @@ -281,18 +293,17 @@ PyArray_Newshape(PyArrayObject *self, PyArray_Dims *newdims, } } - Py_INCREF(PyArray_DESCR(self)); + Py_INCREF(PyArray_DESCR(array)); ret = (PyArrayObject *)PyArray_NewFromDescr_int( - Py_TYPE(self), PyArray_DESCR(self), - ndim, dimensions, strides, PyArray_DATA(self), - flags, (PyObject *)self, (PyObject *)self, + Py_TYPE(array), PyArray_DESCR(array), + ndim, dimensions, strides, PyArray_DATA(array), + flags, (PyObject *)array, (PyObject *)array, _NPY_ARRAY_ENSURE_DTYPE_IDENTITY); - Py_DECREF(self); + Py_DECREF(array); return (PyObject *)ret; } - /* For backward compatibility -- Not recommended */ /*NUMPY_API @@ -313,41 +324,6 @@ PyArray_Reshape(PyArrayObject *self, PyObject *shape) } -static void -_putzero(char *optr, PyObject *zero, PyArray_Descr *dtype) -{ - if (!PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)) { - memset(optr, 0, dtype->elsize); - } - else if (PyDataType_HASFIELDS(dtype)) { - PyObject *key, *value, *title = NULL; - PyArray_Descr *new; - int offset; - Py_ssize_t pos = 0; - while (PyDict_Next(PyDataType_FIELDS(dtype), &pos, &key, &value)) { - if (NPY_TITLE_KEY(key, value)) { - continue; - } - if (!PyArg_ParseTuple(value, "Oi|O", &new, &offset, &title)) { - return; - } - _putzero(optr + offset, zero, new); - } - } - else { - npy_intp i; - npy_intp nsize = dtype->elsize / sizeof(zero); - - for (i = 0; i < nsize; i++) { - Py_INCREF(zero); - memcpy(optr, &zero, sizeof(zero)); - optr += sizeof(zero); - } - } - return; -} - - /* * attempt to reshape an array without copying data * @@ -643,10 +619,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2) int n = PyArray_NDIM(ap); int i; - if (check_and_adjust_axis_msg(&a1, n, npy_ma_str_axis1) < 0) { + if (check_and_adjust_axis_msg(&a1, n, npy_interned_str.axis1) < 0) { return NULL; } - if (check_and_adjust_axis_msg(&a2, n, npy_ma_str_axis2) < 0) { + if (check_and_adjust_axis_msg(&a2, n, npy_interned_str.axis2) < 0) { return NULL; } diff --git a/numpy/_core/src/multiarray/shape.h b/numpy/_core/src/multiarray/shape.h index d1c1ce723459..a9b91feb0b4a 100644 --- a/numpy/_core/src/multiarray/shape.h +++ b/numpy/_core/src/multiarray/shape.h @@ -1,6 +1,8 @@ #ifndef NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ #define NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ +#include "conversion_utils.h" + /* * Creates a sorted stride perm matching the KEEPORDER behavior * of the NpyIter object. Because this operates based on multiple @@ -27,4 +29,8 @@ PyArray_SqueezeSelected(PyArrayObject *self, npy_bool *axis_flags); NPY_NO_EXPORT PyObject * PyArray_MatrixTranspose(PyArrayObject *ap); +NPY_NO_EXPORT PyObject * +_reshape_with_copy_arg(PyArrayObject *array, PyArray_Dims *newdims, + NPY_ORDER order, NPY_COPYMODE copy); + #endif /* NUMPY_CORE_SRC_MULTIARRAY_SHAPE_H_ */ diff --git a/numpy/_core/src/multiarray/strfuncs.c b/numpy/_core/src/multiarray/strfuncs.c index 8b9966373466..efe5c8a4fdd8 100644 --- a/numpy/_core/src/multiarray/strfuncs.c +++ b/numpy/_core/src/multiarray/strfuncs.c @@ -7,12 +7,9 @@ #include "numpy/arrayobject.h" #include "npy_pycompat.h" #include "npy_import.h" +#include "multiarraymodule.h" #include "strfuncs.h" -static PyObject *PyArray_StrFunction = NULL; -static PyObject *PyArray_ReprFunction = NULL; - - static void npy_PyErr_SetStringChained(PyObject *type, const char *message) { @@ -30,68 +27,44 @@ npy_PyErr_SetStringChained(PyObject *type, const char *message) NPY_NO_EXPORT void PyArray_SetStringFunction(PyObject *op, int repr) { - if (repr) { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_ReprFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_ReprFunction = op; - } - else { - /* Dispose of previous callback */ - Py_XDECREF(PyArray_StrFunction); - /* Add a reference to new callback */ - Py_XINCREF(op); - /* Remember new callback */ - PyArray_StrFunction = op; - } + PyErr_SetString(PyExc_ValueError, "PyArray_SetStringFunction was removed"); } NPY_NO_EXPORT PyObject * array_repr(PyArrayObject *self) { - static PyObject *repr = NULL; - - if (PyArray_ReprFunction != NULL) { - return PyObject_CallFunctionObjArgs(PyArray_ReprFunction, self, NULL); - } - /* * We need to do a delayed import here as initialization on module load * leads to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_repr", &repr); - if (repr == NULL) { + if (npy_cache_import_runtime("numpy._core.arrayprint", "_default_array_repr", + &npy_runtime_imports._default_array_repr) == -1) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__repr__"); return NULL; } - return PyObject_CallFunctionObjArgs(repr, self, NULL); + return PyObject_CallFunctionObjArgs( + npy_runtime_imports._default_array_repr, self, NULL); } NPY_NO_EXPORT PyObject * array_str(PyArrayObject *self) { - static PyObject *str = NULL; - - if (PyArray_StrFunction != NULL) { - return PyObject_CallFunctionObjArgs(PyArray_StrFunction, self, NULL); - } - /* * We need to do a delayed import here as initialization on module load leads * to circular import problems. */ - npy_cache_import("numpy._core.arrayprint", "_default_array_str", &str); - if (str == NULL) { + if (npy_cache_import_runtime( + "numpy._core.arrayprint", "_default_array_str", + &npy_runtime_imports._default_array_str) == -1) { npy_PyErr_SetStringChained(PyExc_RuntimeError, "Unable to configure default ndarray.__str__"); return NULL; } - return PyObject_CallFunctionObjArgs(str, self, NULL); + return PyObject_CallFunctionObjArgs( + npy_runtime_imports._default_array_str, self, NULL); } diff --git a/numpy/_core/src/multiarray/stringdtype/casts.c b/numpy/_core/src/multiarray/stringdtype/casts.c index e6819e3212dd..44ae6c92d128 100644 --- a/numpy/_core/src/multiarray/stringdtype/casts.c +++ b/numpy/_core/src/multiarray/stringdtype/casts.c @@ -79,7 +79,10 @@ string_to_string_resolve_descriptors(PyObject *NPY_UNUSED(self), return NPY_UNSAFE_CASTING; } - *view_offset = 0; + // views are only legal between descriptors that share allocators (e.g. the same object) + if (descr0->allocator == descr1->allocator) { + *view_offset = 0; + }; return NPY_NO_CASTING; } @@ -156,7 +159,7 @@ unicode_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(sdescr); @@ -392,6 +395,7 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], npy_string_allocator *allocator = NpyString_acquire_allocator(descr); int has_null = descr->na_object != NULL; int has_string_na = descr->has_string_na; + int has_nan_na = descr->has_nan_na; const npy_static_string *default_string = &descr->default_string; npy_intp N = dimensions[0]; @@ -412,8 +416,13 @@ string_to_bool(PyArrayMethod_Context *context, char *const data[], } else if (is_null) { if (has_null && !has_string_na) { - // numpy treats NaN as truthy, following python - *out = NPY_TRUE; + if (has_nan_na) { + // numpy treats NaN as truthy, following python + *out = NPY_TRUE; + } + else { + *out = NPY_FALSE; + } } else { *out = (npy_bool)(default_string->size == 0); @@ -571,6 +580,9 @@ string_to_pylong(char *in, int has_null, { PyObject *val_obj = non_nullable_string_to_pystring( in, has_null, default_string, allocator); + if (val_obj == NULL) { + return NULL; + } // interpret as an integer in base 10 PyObject *pylong_value = PyLong_FromUnicodeObject(val_obj, 10); Py_DECREF(val_obj); @@ -882,6 +894,7 @@ string_to_pyfloat(char *in, int has_null, goto fail; \ } \ double dval = PyFloat_AS_DOUBLE(pyfloat_value); \ + Py_DECREF(pyfloat_value); \ npy_##typename fval = (double_to_float)(dval); \ \ if (NPY_UNLIKELY(isinf_name(fval) && !(npy_isinf(dval)))) { \ @@ -1669,7 +1682,7 @@ void_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); @@ -1799,7 +1812,7 @@ bytes_to_string(PyArrayMethod_Context *context, char *const data[], npy_intp const dimensions[], npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) { - PyArray_Descr **descrs = context->descriptors; + PyArray_Descr *const *descrs = context->descriptors; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)descrs[1]; npy_string_allocator *allocator = NpyString_acquire_allocator(descr); diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 807184c3c26a..81a846bf6d96 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -17,6 +17,7 @@ #include "gil_utils.h" #include "conversion_utils.h" #include "npy_import.h" +#include "multiarraymodule.h" /* * Internal helper to create new instances @@ -33,7 +34,6 @@ new_stringdtype_instance(PyObject *na_object, int coerce) char *default_string_buf = NULL; char *na_name_buf = NULL; - char array_owned = 0; npy_string_allocator *allocator = NpyString_new_allocator(PyMem_RawMalloc, PyMem_RawFree, PyMem_RawRealloc); @@ -138,7 +138,7 @@ new_stringdtype_instance(PyObject *na_object, int coerce) if (na_name_buf != NULL) { PyMem_RawFree(na_name_buf); } - if (allocator != NULL && array_owned != 2) { + if (allocator != NULL) { NpyString_free_allocator(allocator); } return NULL; @@ -185,6 +185,30 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) return na_eq_cmp(sna, ona); } +// Currently this can only return 0 or -1, the latter indicating that the +// error indicator is set. Pass in out_na if you want to figure out which +// na is valid. +NPY_NO_EXPORT int +stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na) { + if ((na1 != NULL) && (na2 != NULL)) { + int na_eq = na_eq_cmp(na1, na2); + + if (na_eq < 0) { + return -1; + } + else if (na_eq == 0) { + PyErr_Format(PyExc_TypeError, + "Cannot find a compatible null string value for " + "null strings '%R' and '%R'", na1, na2); + return -1; + } + } + if (out_na != NULL) { + *out_na = na1 ? na1 : na2; + } + return 0; +} + /* * This is used to determine the correct dtype to return when dealing * with a mix of different dtypes (for example when creating an array @@ -193,18 +217,18 @@ _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona) static PyArray_StringDTypeObject * common_instance(PyArray_StringDTypeObject *dtype1, PyArray_StringDTypeObject *dtype2) { - int eq = _eq_comparison(dtype1->coerce, dtype2->coerce, dtype1->na_object, - dtype2->na_object); + PyObject *out_na_object = NULL; - if (eq <= 0) { - PyErr_SetString( - PyExc_ValueError, - "Cannot find common instance for unequal dtype instances"); + if (stringdtype_compatible_na( + dtype1->na_object, dtype2->na_object, &out_na_object) == -1) { + PyErr_Format(PyExc_TypeError, + "Cannot find common instance for incompatible dtypes " + "'%R' and '%R'", (PyObject *)dtype1, (PyObject *)dtype2); return NULL; } return (PyArray_StringDTypeObject *)new_stringdtype_instance( - dtype1->na_object, dtype1->coerce); + out_na_object, dtype1->coerce && dtype1->coerce); } /* @@ -280,30 +304,22 @@ stringdtype_setitem(PyArray_StringDTypeObject *descr, PyObject *obj, char **data { npy_packed_static_string *sdata = (npy_packed_static_string *)dataptr; - int is_cmp = 0; - // borrow reference PyObject *na_object = descr->na_object; - // Note there are two different na_object != NULL checks here. - // - // Do not refactor this! - // // We need the result of the comparison after acquiring the allocator, but // cannot use functions requiring the GIL when the allocator is acquired, // so we do the comparison before acquiring the allocator. - if (na_object != NULL) { - is_cmp = na_eq_cmp(obj, na_object); - if (is_cmp == -1) { - return -1; - } + int na_cmp = na_eq_cmp(obj, na_object); + if (na_cmp == -1) { + return -1; } npy_string_allocator *allocator = NpyString_acquire_allocator(descr); if (na_object != NULL) { - if (is_cmp) { + if (na_cmp) { if (NpyString_pack_null(allocator, sdata) < 0) { PyErr_SetString(PyExc_MemoryError, "Failed to pack null string during StringDType " @@ -400,8 +416,23 @@ stringdtype_getitem(PyArray_StringDTypeObject *descr, char **dataptr) // PyArray_NonzeroFunc // Unicode strings are nonzero if their length is nonzero. npy_bool -nonzero(void *data, void *NPY_UNUSED(arr)) +nonzero(void *data, void *arr) { + PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)PyArray_DESCR(arr); + int has_null = descr->na_object != NULL; + int has_nan_na = descr->has_nan_na; + int has_string_na = descr->has_string_na; + if (has_null && NpyString_isnull((npy_packed_static_string *)data)) { + if (!has_string_na) { + if (has_nan_na) { + // numpy treats NaN as truthy, following python + return 1; + } + else { + return 0; + } + } + } return NpyString_size((npy_packed_static_string *)data) != 0; } @@ -517,7 +548,7 @@ stringdtype_ensure_canonical(PyArray_StringDTypeObject *self) static int stringdtype_clear_loop(void *NPY_UNUSED(traverse_context), - PyArray_Descr *descr, char *data, npy_intp size, + const PyArray_Descr *descr, char *data, npy_intp size, npy_intp stride, NpyAuxData *NPY_UNUSED(auxdata)) { PyArray_StringDTypeObject *sdescr = (PyArray_StringDTypeObject *)descr; @@ -554,88 +585,36 @@ stringdtype_get_clear_loop(void *NPY_UNUSED(traverse_context), } static int -stringdtype_is_known_scalar_type(PyArray_DTypeMeta *NPY_UNUSED(cls), +stringdtype_is_known_scalar_type(PyArray_DTypeMeta *cls, PyTypeObject *pytype) { - if (pytype == &PyFloat_Type) { - return 1; - } - if (pytype == &PyLong_Type) { - return 1; - } - if (pytype == &PyBool_Type) { - return 1; - } - if (pytype == &PyComplex_Type) { - return 1; - } - if (pytype == &PyUnicode_Type) { + if (python_builtins_are_known_scalar_types(cls, pytype)) { return 1; } - if (pytype == &PyBytes_Type) { - return 1; - } - if (pytype == &PyBoolArrType_Type) { - return 1; - } - if (pytype == &PyByteArrType_Type) { - return 1; - } - if (pytype == &PyShortArrType_Type) { - return 1; - } - if (pytype == &PyIntArrType_Type) { - return 1; - } - if (pytype == &PyLongArrType_Type) { - return 1; - } - if (pytype == &PyLongLongArrType_Type) { - return 1; - } - if (pytype == &PyUByteArrType_Type) { - return 1; - } - if (pytype == &PyUShortArrType_Type) { - return 1; - } - if (pytype == &PyUIntArrType_Type) { - return 1; - } - if (pytype == &PyULongArrType_Type) { - return 1; - } - if (pytype == &PyULongLongArrType_Type) { - return 1; - } - if (pytype == &PyHalfArrType_Type) { - return 1; - } - if (pytype == &PyFloatArrType_Type) { - return 1; - } - if (pytype == &PyDoubleArrType_Type) { - return 1; - } - if (pytype == &PyLongDoubleArrType_Type) { - return 1; - } - if (pytype == &PyCFloatArrType_Type) { - return 1; - } - if (pytype == &PyCDoubleArrType_Type) { - return 1; - } - if (pytype == &PyCLongDoubleArrType_Type) { - return 1; - } - if (pytype == &PyIntpArrType_Type) { - return 1; - } - if (pytype == &PyUIntpArrType_Type) { - return 1; - } - if (pytype == &PyDatetimeArrType_Type) { + // accept every built-in numpy dtype + else if (pytype == &PyBoolArrType_Type || + pytype == &PyByteArrType_Type || + pytype == &PyShortArrType_Type || + pytype == &PyIntArrType_Type || + pytype == &PyLongArrType_Type || + pytype == &PyLongLongArrType_Type || + pytype == &PyUByteArrType_Type || + pytype == &PyUShortArrType_Type || + pytype == &PyUIntArrType_Type || + pytype == &PyULongArrType_Type || + pytype == &PyULongLongArrType_Type || + pytype == &PyHalfArrType_Type || + pytype == &PyFloatArrType_Type || + pytype == &PyDoubleArrType_Type || + pytype == &PyLongDoubleArrType_Type || + pytype == &PyCFloatArrType_Type || + pytype == &PyCDoubleArrType_Type || + pytype == &PyCLongDoubleArrType_Type || + pytype == &PyIntpArrType_Type || + pytype == &PyUIntpArrType_Type || + pytype == &PyDatetimeArrType_Type || + pytype == &PyTimedeltaArrType_Type) + { return 1; } return 0; @@ -696,7 +675,7 @@ stringdtype_dealloc(PyArray_StringDTypeObject *self) { Py_XDECREF(self->na_object); // this can be null if an error happens while initializing an instance - if (self->allocator != NULL && self->array_owned != 2) { + if (self->allocator != NULL) { NpyString_free_allocator(self->allocator); } PyMem_RawFree((char *)self->na_name.buf); @@ -729,8 +708,6 @@ stringdtype_repr(PyArray_StringDTypeObject *self) return ret; } -static PyObject *_convert_to_stringdtype_kwargs = NULL; - // implementation of __reduce__ magic method to reconstruct a StringDType // object from the serialized data in the pickle. Uses the python // _convert_to_stringdtype_kwargs for convenience because this isn't @@ -738,19 +715,21 @@ static PyObject *_convert_to_stringdtype_kwargs = NULL; static PyObject * stringdtype__reduce__(PyArray_StringDTypeObject *self, PyObject *NPY_UNUSED(args)) { - npy_cache_import("numpy._core._internal", "_convert_to_stringdtype_kwargs", - &_convert_to_stringdtype_kwargs); - - if (_convert_to_stringdtype_kwargs == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_convert_to_stringdtype_kwargs", + &npy_runtime_imports._convert_to_stringdtype_kwargs) == -1) { return NULL; } if (self->na_object != NULL) { - return Py_BuildValue("O(iO)", _convert_to_stringdtype_kwargs, - self->coerce, self->na_object); + return Py_BuildValue( + "O(iO)", npy_runtime_imports._convert_to_stringdtype_kwargs, + self->coerce, self->na_object); } - return Py_BuildValue("O(i)", _convert_to_stringdtype_kwargs, self->coerce); + return Py_BuildValue( + "O(i)", npy_runtime_imports._convert_to_stringdtype_kwargs, + self->coerce); } static PyMethodDef PyArray_StringDType_methods[] = { @@ -922,8 +901,7 @@ load_new_string(npy_packed_static_string *out, npy_static_string *out_ss, "Failed to allocate string in %s", err_context); return -1; } - int is_null = NpyString_load(allocator, out_pss, out_ss); - if (is_null == -1) { + if (NpyString_load(allocator, out_pss, out_ss) == -1) { npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", err_context); return -1; diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.h b/numpy/_core/src/multiarray/stringdtype/dtype.h index 278513fe8f12..2c2719602c32 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.h +++ b/numpy/_core/src/multiarray/stringdtype/dtype.h @@ -49,6 +49,9 @@ stringdtype_finalize_descr(PyArray_Descr *dtype); NPY_NO_EXPORT int _eq_comparison(int scoerce, int ocoerce, PyObject *sna, PyObject *ona); +NPY_NO_EXPORT int +stringdtype_compatible_na(PyObject *na1, PyObject *na2, PyObject **out_na); + #ifdef __cplusplus } #endif diff --git a/numpy/_core/src/multiarray/stringdtype/static_string.c b/numpy/_core/src/multiarray/stringdtype/static_string.c index 85f499c3c3ae..4d33479409cd 100644 --- a/numpy/_core/src/multiarray/stringdtype/static_string.c +++ b/numpy/_core/src/multiarray/stringdtype/static_string.c @@ -131,7 +131,11 @@ struct npy_string_allocator { npy_string_free_func free; npy_string_realloc_func realloc; npy_string_arena arena; +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *allocator_lock; +#else + PyMutex allocator_lock; +#endif }; static void @@ -245,18 +249,22 @@ NpyString_new_allocator(npy_string_malloc_func m, npy_string_free_func f, if (allocator == NULL) { return NULL; } +#if PY_VERSION_HEX < 0x30d00b3 PyThread_type_lock *allocator_lock = PyThread_allocate_lock(); if (allocator_lock == NULL) { f(allocator); PyErr_SetString(PyExc_MemoryError, "Unable to allocate thread lock"); return NULL; } + allocator->allocator_lock = allocator_lock; +#else + memset(&allocator->allocator_lock, 0, sizeof(PyMutex)); +#endif allocator->malloc = m; allocator->free = f; allocator->realloc = r; // arena buffer gets allocated in arena_malloc allocator->arena = NEW_ARENA; - allocator->allocator_lock = allocator_lock; return allocator; } @@ -269,9 +277,11 @@ NpyString_free_allocator(npy_string_allocator *allocator) if (allocator->arena.buffer != NULL) { f(allocator->arena.buffer); } +#if PY_VERSION_HEX < 0x30d00b3 if (allocator->allocator_lock != NULL) { PyThread_free_lock(allocator->allocator_lock); } +#endif f(allocator); } @@ -286,11 +296,15 @@ NpyString_free_allocator(npy_string_allocator *allocator) * allocator mutex is held, as doing so may cause deadlocks. */ NPY_NO_EXPORT npy_string_allocator * -NpyString_acquire_allocator(PyArray_StringDTypeObject *descr) +NpyString_acquire_allocator(const PyArray_StringDTypeObject *descr) { +#if PY_VERSION_HEX < 0x30d00b3 if (!PyThread_acquire_lock(descr->allocator->allocator_lock, NOWAIT_LOCK)) { PyThread_acquire_lock(descr->allocator->allocator_lock, WAIT_LOCK); } +#else + PyMutex_Lock(&descr->allocator->allocator_lock); +#endif return descr->allocator; } @@ -318,7 +332,7 @@ NpyString_acquire_allocator(PyArray_StringDTypeObject *descr) */ NPY_NO_EXPORT void NpyString_acquire_allocators(size_t n_descriptors, - PyArray_Descr *descrs[], + PyArray_Descr *const descrs[], npy_string_allocator *allocators[]) { for (size_t i=0; iallocator_lock); +#else + PyMutex_Unlock(&allocator->allocator_lock); +#endif } /*NUMPY_API diff --git a/numpy/_core/src/multiarray/temp_elide.c b/numpy/_core/src/multiarray/temp_elide.c index 289040673571..662a2fa52b06 100644 --- a/numpy/_core/src/multiarray/temp_elide.c +++ b/numpy/_core/src/multiarray/temp_elide.c @@ -124,22 +124,22 @@ check_callers(int * cannot) * TODO some calls go over scalarmath in umath but we cannot get the base * address of it from multiarraymodule as it is not linked against it */ - static int init = 0; + NPY_TLS static int init = 0; /* * measured DSO object memory start and end, if an address is located * inside these bounds it is part of that library so we don't need to call * dladdr on it (assuming linear memory) */ - static void * pos_python_start; - static void * pos_python_end; - static void * pos_ma_start; - static void * pos_ma_end; + NPY_TLS static void * pos_python_start; + NPY_TLS static void * pos_python_end; + NPY_TLS static void * pos_ma_start; + NPY_TLS static void * pos_ma_end; /* known address storage to save dladdr calls */ - static void * py_addr[64]; - static void * pyeval_addr[64]; - static npy_intp n_py_addr = 0; - static npy_intp n_pyeval = 0; + NPY_TLS static void * py_addr[64]; + NPY_TLS static void * pyeval_addr[64]; + NPY_TLS static npy_intp n_py_addr = 0; + NPY_TLS static npy_intp n_pyeval = 0; void *buffer[NPY_MAX_STACKSIZE]; int i, nptrs; diff --git a/numpy/_core/src/multiarray/textreading/rows.c b/numpy/_core/src/multiarray/textreading/rows.c index 19c07b18fb51..214c5c499ad8 100644 --- a/numpy/_core/src/multiarray/textreading/rows.c +++ b/numpy/_core/src/multiarray/textreading/rows.c @@ -6,6 +6,7 @@ #define _MULTIARRAYMODULE #include "numpy/arrayobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" #include "alloc.h" #include @@ -58,13 +59,16 @@ create_conv_funcs( PyObject *key, *value; Py_ssize_t pos = 0; + int error = 0; + Py_BEGIN_CRITICAL_SECTION(converters); while (PyDict_Next(converters, &pos, &key, &value)) { Py_ssize_t column = PyNumber_AsSsize_t(key, PyExc_IndexError); if (column == -1 && PyErr_Occurred()) { PyErr_Format(PyExc_TypeError, "keys of the converters dictionary must be integers; " "got %.100R", key); - goto error; + error = 1; + break; } if (usecols != NULL) { /* @@ -92,7 +96,8 @@ create_conv_funcs( PyErr_Format(PyExc_ValueError, "converter specified for column %zd, which is invalid " "for the number of fields %zd.", column, num_fields); - goto error; + error = 1; + break; } if (column < 0) { column += num_fields; @@ -102,11 +107,18 @@ create_conv_funcs( PyErr_Format(PyExc_TypeError, "values of the converters dictionary must be callable, " "but the value associated with key %R is not", key); - goto error; + error = 1; + break; } Py_INCREF(value); conv_funcs[column] = value; } + Py_END_CRITICAL_SECTION(); + + if (error) { + goto error; + } + return conv_funcs; error: @@ -480,6 +492,12 @@ read_rows(stream *s, ((PyArrayObject_fields *)data_array)->dimensions[0] = row_count; } + /* + * If row_size is too big, F_CONTIGUOUS is always set + * as array was created for only one row of data. + * We just update the contiguous flags here. + */ + PyArray_UpdateFlags(data_array, NPY_ARRAY_F_CONTIGUOUS); return data_array; error: diff --git a/numpy/_core/src/multiarray/usertypes.c b/numpy/_core/src/multiarray/usertypes.c index 92325247a60c..8d90f5cc968f 100644 --- a/numpy/_core/src/multiarray/usertypes.c +++ b/numpy/_core/src/multiarray/usertypes.c @@ -34,7 +34,7 @@ maintainer email: oliphant.travis@ieee.org #include "common.h" -#include "npy_pycompat.h" + #include "usertypes.h" #include "dtypemeta.h" @@ -306,7 +306,8 @@ PyArray_RegisterDataType(PyArray_DescrProto *descr_proto) descr->type_num = typenum; /* update prototype to notice duplicate registration */ descr_proto->type_num = typenum; - if (dtypemeta_wrap_legacy_descriptor(descr, descr_proto->f, name, NULL) < 0) { + if (dtypemeta_wrap_legacy_descriptor( + descr, descr_proto->f, &PyArrayDescr_Type, name, NULL) < 0) { descr->type_num = -1; NPY_NUMUSERTYPES--; /* Override the type, it might be wrong and then decref crashes */ diff --git a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp index 1c06eb5755c7..194a81e2d7e9 100644 --- a/numpy/_core/src/npysort/highway_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort.dispatch.cpp @@ -2,31 +2,23 @@ #define VQSORT_ONLY_STATIC 1 #include "hwy/contrib/sort/vqsort-inl.h" +#if VQSORT_ENABLED + +#define DISPATCH_VQSORT(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, intptr_t size) \ +{ \ + hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); \ +} \ + namespace np { namespace highway { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, intptr_t size) -{ - hwy::HWY_NAMESPACE::VQSortStatic(arr, size, hwy::SortAscending()); -} + DISPATCH_VQSORT(int32_t) + DISPATCH_VQSORT(uint32_t) + DISPATCH_VQSORT(int64_t) + DISPATCH_VQSORT(uint64_t) + DISPATCH_VQSORT(double) + DISPATCH_VQSORT(float) } } } // np::highway::qsort_simd + +#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/highway_qsort.hpp b/numpy/_core/src/npysort/highway_qsort.hpp index e08fb3629ec8..ba3fe4920594 100644 --- a/numpy/_core/src/npysort/highway_qsort.hpp +++ b/numpy/_core/src/npysort/highway_qsort.hpp @@ -1,8 +1,22 @@ #ifndef NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP #define NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#include "hwy/highway.h" + #include "common.hpp" +// This replicates VQSORT_ENABLED from hwy/contrib/sort/shared-inl.h +// without checking the scalar target as this is not built within the dynamic +// dispatched sources. +#if (HWY_COMPILER_MSVC && !HWY_IS_DEBUG_BUILD) || \ + (HWY_ARCH_ARM_V7 && HWY_IS_DEBUG_BUILD) || \ + (HWY_ARCH_ARM_A64 && HWY_COMPILER_GCC_ACTUAL && HWY_IS_ASAN) || \ + (HWY_ARCH_ARM_A64 && HWY_COMPILER_CLANG && \ + (HWY_IS_HWASAN || HWY_IS_MSAN || HWY_IS_TSAN || HWY_IS_ASAN)) +#define NPY_DISABLE_HIGHWAY_SORT +#endif + +#ifndef NPY_DISABLE_HIGHWAY_SORT namespace np { namespace highway { namespace qsort_simd { #ifndef NPY_DISABLE_OPTIMIZATION @@ -21,3 +35,4 @@ NPY_CPU_DISPATCH_DECLARE(template void QSelect, (T* arr, npy_intp n } } } // np::highway::qsort_simd #endif // NUMPY_SRC_COMMON_NPYSORT_HWY_SIMD_QSORT_HPP +#endif // NPY_DISABLE_HIGHWAY_SORT diff --git a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp index 35b6cc58c7e8..d069cb6373d0 100644 --- a/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/highway_qsort_16bit.dispatch.cpp @@ -4,6 +4,8 @@ #include "quicksort.hpp" +#if VQSORT_ENABLED + namespace np { namespace highway { namespace qsort_simd { template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, intptr_t size) @@ -24,3 +26,5 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, intptr_t size) } } } } // np::highway::qsort_simd + +#endif // VQSORT_ENABLED diff --git a/numpy/_core/src/npysort/quicksort.cpp b/numpy/_core/src/npysort/quicksort.cpp index 15e5668f599d..aca748056f39 100644 --- a/numpy/_core/src/npysort/quicksort.cpp +++ b/numpy/_core/src/npysort/quicksort.cpp @@ -84,7 +84,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else + #elif !defined(NPY_DISABLE_HIGHWAY_SORT) #include "highway_qsort_16bit.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif @@ -95,7 +95,7 @@ inline bool quicksort_dispatch(T *start, npy_intp num) #if defined(NPY_CPU_AMD64) || defined(NPY_CPU_X86) // x86 32-bit and 64-bit #include "x86_simd_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::qsort_simd::template QSort, ); - #else + #elif !defined(NPY_DISABLE_HIGHWAY_SORT) #include "highway_qsort.dispatch.h" NPY_CPU_DISPATCH_CALL_XB(dispfunc = np::highway::qsort_simd::template QSort, ); #endif diff --git a/numpy/_core/src/npysort/x86-simd-sort b/numpy/_core/src/npysort/x86-simd-sort index 868696d22ad8..9a1b616d5cd4 160000 --- a/numpy/_core/src/npysort/x86-simd-sort +++ b/numpy/_core/src/npysort/x86-simd-sort @@ -1 +1 @@ -Subproject commit 868696d22ad84c5cd46bf9c2a4dac65e60a9213a +Subproject commit 9a1b616d5cd4eaf49f7664fb86ccc1d18bad2b8d diff --git a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp index 3083d6a8bf23..04bb03532719 100644 --- a/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_argsort.dispatch.cpp @@ -1,87 +1,26 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SKX) -#include "x86-simd-sort/src/avx512-64bit-argsort.hpp" -#elif defined(NPY_HAVE_AVX2) -#include "x86-simd-sort/src/avx2-32bit-half.hpp" -#include "x86-simd-sort/src/avx2-32bit-qsort.hpp" -#include "x86-simd-sort/src/avx2-64bit-qsort.hpp" -#include "x86-simd-sort/src/xss-common-argsort.h" -#endif +#include "x86-simd-sort/src/x86simdsort-static-incl.h" -namespace { -template -void x86_argsort(T* arr, size_t* arg, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_argsort(arr, arg, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_argsort(arr, arg, num, true); -#endif -} - -template -void x86_argselect(T* arr, size_t* arg, npy_intp kth, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_argselect(arr, arg, kth, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_argselect(arr, arg, kth, num, true); -#endif -} -} // anonymous +#define DISPATCH_ARG_METHODS(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(TYPE* arr, npy_intp* arg, npy_intp num, npy_intp kth) \ +{ \ + x86simdsortStatic::argselect(arr, reinterpret_cast(arg), kth, num, true); \ +} \ +template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(TYPE* arr, npy_intp *arg, npy_intp size) \ +{ \ + x86simdsortStatic::argsort(arr, reinterpret_cast(arg), size, true); \ +} \ namespace np { namespace qsort_simd { -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint32_t *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(int64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(uint64_t*arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(float *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSelect)(double *arr, npy_intp* arg, npy_intp num, npy_intp kth) -{ - x86_argselect(arr, reinterpret_cast(arg), kth, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int32_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint32_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(int64_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(uint64_t *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(float *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} -template<> void NPY_CPU_DISPATCH_CURFX(ArgQSort)(double *arr, npy_intp *arg, npy_intp size) -{ - x86_argsort(arr, reinterpret_cast(arg), size); -} + DISPATCH_ARG_METHODS(uint32_t) + DISPATCH_ARG_METHODS(int32_t) + DISPATCH_ARG_METHODS(float) + DISPATCH_ARG_METHODS(uint64_t) + DISPATCH_ARG_METHODS(int64_t) + DISPATCH_ARG_METHODS(double) }} // namespace np::simd diff --git a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp index ea4516408c56..c4505f058857 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort.dispatch.cpp @@ -1,89 +1,25 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SKX) - #include "x86-simd-sort/src/avx512-32bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-64bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-64bit-argsort.hpp" -#elif defined(NPY_HAVE_AVX2) - #include "x86-simd-sort/src/avx2-32bit-qsort.hpp" - #include "x86-simd-sort/src/avx2-64bit-qsort.hpp" -#endif +#include "x86-simd-sort/src/x86simdsort-static-incl.h" -namespace { -template -void x86_qsort(T* arr, npy_intp num) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_qsort(arr, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_qsort(arr, num, true); -#endif -} - -template -void x86_qselect(T* arr, npy_intp num, npy_intp kth) -{ -#if defined(NPY_HAVE_AVX512_SKX) - avx512_qselect(arr, kth, num, true); -#elif defined(NPY_HAVE_AVX2) - avx2_qselect(arr, kth, num, true); -#endif -} -} // anonymous +#define DISPATCH_SORT_METHODS(TYPE) \ +template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(TYPE *arr, npy_intp num, npy_intp kth) \ +{ \ + x86simdsortStatic::qselect(arr, kth, num, true); \ +} \ +template<> void NPY_CPU_DISPATCH_CURFX(QSort)(TYPE *arr, npy_intp num) \ +{ \ + x86simdsortStatic::qsort(arr, num, true); \ +} \ namespace np { namespace qsort_simd { -#if defined(NPY_HAVE_AVX512_SKX) || defined(NPY_HAVE_AVX2) -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int32_t *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint32_t *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int64_t*arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint64_t*arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(float *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(double *arr, npy_intp num, npy_intp kth) -{ - x86_qselect(arr, num, kth); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int32_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint32_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int64_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint64_t *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(float *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -template<> void NPY_CPU_DISPATCH_CURFX(QSort)(double *arr, npy_intp num) -{ - x86_qsort(arr, num); -} -#endif // NPY_HAVE_AVX512_SKX || NPY_HAVE_AVX2 - + DISPATCH_SORT_METHODS(uint32_t) + DISPATCH_SORT_METHODS(int32_t) + DISPATCH_SORT_METHODS(float) + DISPATCH_SORT_METHODS(uint64_t) + DISPATCH_SORT_METHODS(int64_t) + DISPATCH_SORT_METHODS(double) }} // namespace np::qsort_simd #endif // __CYGWIN__ diff --git a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp index 8222fc77cae3..063e713c5256 100644 --- a/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp +++ b/numpy/_core/src/npysort/x86_simd_qsort_16bit.dispatch.cpp @@ -1,11 +1,13 @@ #include "x86_simd_qsort.hpp" #ifndef __CYGWIN__ -#if defined(NPY_HAVE_AVX512_SPR) - #include "x86-simd-sort/src/avx512fp16-16bit-qsort.hpp" - #include "x86-simd-sort/src/avx512-16bit-qsort.hpp" -#elif defined(NPY_HAVE_AVX512_ICL) - #include "x86-simd-sort/src/avx512-16bit-qsort.hpp" +#include "x86-simd-sort/src/x86simdsort-static-incl.h" +/* + * MSVC doesn't set the macro __AVX512VBMI2__ which is required for the 16-bit + * functions and therefore we need to manually include this file here + */ +#ifdef _MSC_VER +#include "x86-simd-sort/src/avx512-16bit-qsort.hpp" #endif namespace np { namespace qsort_simd { @@ -13,24 +15,23 @@ namespace np { namespace qsort_simd { /* * QSelect dispatch functions: */ -#if defined(NPY_HAVE_AVX512_ICL) || defined(NPY_HAVE_AVX512_SPR) template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(Half *arr, npy_intp num, npy_intp kth) { #if defined(NPY_HAVE_AVX512_SPR) - avx512_qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); + x86simdsortStatic::qselect(reinterpret_cast<_Float16*>(arr), kth, num, true); #else - avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true); + avx512_qselect_fp16(reinterpret_cast(arr), kth, num, true, false); #endif } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(uint16_t *arr, npy_intp num, npy_intp kth) { - avx512_qselect(arr, kth, num); + x86simdsortStatic::qselect(arr, kth, num); } template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_intp kth) { - avx512_qselect(arr, kth, num); + x86simdsortStatic::qselect(arr, kth, num); } /* @@ -39,20 +40,19 @@ template<> void NPY_CPU_DISPATCH_CURFX(QSelect)(int16_t *arr, npy_intp num, npy_ template<> void NPY_CPU_DISPATCH_CURFX(QSort)(Half *arr, npy_intp size) { #if defined(NPY_HAVE_AVX512_SPR) - avx512_qsort(reinterpret_cast<_Float16*>(arr), size, true); + x86simdsortStatic::qsort(reinterpret_cast<_Float16*>(arr), size, true); #else - avx512_qsort_fp16(reinterpret_cast(arr), size, true); + avx512_qsort_fp16(reinterpret_cast(arr), size, true, false); #endif } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(uint16_t *arr, npy_intp size) { - avx512_qsort(arr, size); + x86simdsortStatic::qsort(arr, size); } template<> void NPY_CPU_DISPATCH_CURFX(QSort)(int16_t *arr, npy_intp size) { - avx512_qsort(arr, size); + x86simdsortStatic::qsort(arr, size); } -#endif // NPY_HAVE_AVX512_ICL || SPR }} // namespace np::qsort_simd diff --git a/numpy/_core/src/umath/_operand_flag_tests.c b/numpy/_core/src/umath/_operand_flag_tests.c index a674dfa560b7..9747b7946512 100644 --- a/numpy/_core/src/umath/_operand_flag_tests.c +++ b/numpy/_core/src/umath/_operand_flag_tests.c @@ -36,9 +36,9 @@ inplace_add(char **args, npy_intp const *dimensions, npy_intp const *steps, void PyUFuncGenericFunction funcs[1] = {&inplace_add}; /* These are the input and return dtypes of logit.*/ -static char types[2] = {NPY_INTP, NPY_INTP}; +static const char types[2] = {NPY_INTP, NPY_INTP}; -static void *data[1] = {NULL}; +static void *const data[1] = {NULL}; static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, @@ -77,6 +77,11 @@ PyMODINIT_FUNC PyInit__operand_flag_tests(void) ((PyUFuncObject*)ufunc)->iter_flags = NPY_ITER_REDUCE_OK; PyModule_AddObject(m, "inplace_add", (PyObject*)ufunc); +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; fail: diff --git a/numpy/_core/src/umath/_rational_tests.c b/numpy/_core/src/umath/_rational_tests.c index 80acf38354ae..a95c89b373df 100644 --- a/numpy/_core/src/umath/_rational_tests.c +++ b/numpy/_core/src/umath/_rational_tests.c @@ -1273,8 +1273,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { { int types2[3] = {npy_rational,npy_rational,npy_rational}; PyObject* gufunc = PyUFunc_FromFuncAndDataAndSignature(0,0,0,0,2,1, - PyUFunc_None,(char*)"matrix_multiply", - (char*)"return result of multiplying two matrices of rationals", + PyUFunc_None,"matrix_multiply", + "return result of multiplying two matrices of rationals", 0,"(m,n),(n,p)->(m,p)"); if (!gufunc) { goto fail; @@ -1291,8 +1291,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { int types3[3] = {NPY_INT64,NPY_INT64,npy_rational}; PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,2,1, - PyUFunc_None,(char*)"test_add", - (char*)"add two matrices of int64 and return rational matrix",0); + PyUFunc_None,"test_add", + "add two matrices of int64 and return rational matrix",0); if (!ufunc) { goto fail; } @@ -1306,8 +1306,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { /* Create test ufunc with rational types using RegisterLoopForDescr */ { PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,2,1, - PyUFunc_None,(char*)"test_add_rationals", - (char*)"add two matrices of rationals and return rational matrix",0); + PyUFunc_None,"test_add_rationals", + "add two matrices of rationals and return rational matrix",0); PyArray_Descr* types[3] = {npyrational_descr, npyrational_descr, npyrational_descr}; @@ -1326,7 +1326,7 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { #define NEW_UNARY_UFUNC(name,type,doc) { \ int types[2] = {npy_rational,type}; \ PyObject* ufunc = PyUFunc_FromFuncAndData(0,0,0,0,1,1, \ - PyUFunc_None,(char*)#name,(char*)doc,0); \ + PyUFunc_None,#name,doc,0); \ if (!ufunc) { \ goto fail; \ } \ @@ -1345,8 +1345,8 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { static const char types[3] = {type,type,type}; \ static void* data[1] = {0}; \ PyObject* ufunc = PyUFunc_FromFuncAndData( \ - (PyUFuncGenericFunction*)func, data,(char*)types, \ - 1,2,1,PyUFunc_One,(char*)#name,(char*)doc,0); \ + (PyUFuncGenericFunction*)func, data,types, \ + 1,2,1,PyUFunc_One,#name,doc,0); \ if (!ufunc) { \ goto fail; \ } \ @@ -1355,6 +1355,11 @@ PyMODINIT_FUNC PyInit__rational_tests(void) { GCD_LCM_UFUNC(gcd,NPY_INT64,"greatest common denominator of two integers"); GCD_LCM_UFUNC(lcm,NPY_INT64,"least common multiple of two integers"); +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; fail: diff --git a/numpy/_core/src/umath/_scaled_float_dtype.c b/numpy/_core/src/umath/_scaled_float_dtype.c index 2c7d231b3695..fbdbbb8d2375 100644 --- a/numpy/_core/src/umath/_scaled_float_dtype.c +++ b/numpy/_core/src/umath/_scaled_float_dtype.c @@ -25,6 +25,7 @@ #include "dtypemeta.h" #include "dispatching.h" #include "gil_utils.h" +#include "multiarraymodule.h" typedef struct { PyArray_Descr base; @@ -653,8 +654,8 @@ add_sfloats_resolve_descriptors( */ static int translate_given_descrs_to_double( - int nin, int nout, PyArray_DTypeMeta *wrapped_dtypes[], - PyArray_Descr *given_descrs[], PyArray_Descr *new_descrs[]) + int nin, int nout, PyArray_DTypeMeta *const wrapped_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]) { assert(nin == 2 && nout == 1); for (int i = 0; i < 3; i++) { @@ -671,8 +672,8 @@ translate_given_descrs_to_double( static int translate_loop_descrs( - int nin, int nout, PyArray_DTypeMeta *new_dtypes[], - PyArray_Descr *given_descrs[], + int nin, int nout, PyArray_DTypeMeta *const new_dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *NPY_UNUSED(original_descrs[]), PyArray_Descr *loop_descrs[]) { @@ -867,10 +868,7 @@ sfloat_init_ufuncs(void) { NPY_NO_EXPORT PyObject * get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) { - /* Allow calling the function multiple times. */ - static npy_bool initialized = NPY_FALSE; - - if (initialized) { + if (npy_thread_unsafe_state.get_sfloat_dtype_initialized) { Py_INCREF(&PyArray_SFloatDType); return (PyObject *)&PyArray_SFloatDType; } @@ -899,6 +897,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args)) return NULL; } - initialized = NPY_TRUE; + npy_thread_unsafe_state.get_sfloat_dtype_initialized = NPY_TRUE; return (PyObject *)&PyArray_SFloatDType; } diff --git a/numpy/_core/src/umath/_struct_ufunc_tests.c b/numpy/_core/src/umath/_struct_ufunc_tests.c index ee71c4698f79..90b7e147d50a 100644 --- a/numpy/_core/src/umath/_struct_ufunc_tests.c +++ b/numpy/_core/src/umath/_struct_ufunc_tests.c @@ -156,5 +156,11 @@ PyMODINIT_FUNC PyInit__struct_ufunc_tests(void) PyDict_SetItemString(d, "add_triplet", add_triplet); Py_DECREF(add_triplet); + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/umath/_umath_tests.c.src b/numpy/_core/src/umath/_umath_tests.c.src index 2479c9c9279c..c1bcc3c8957e 100644 --- a/numpy/_core/src/umath/_umath_tests.c.src +++ b/numpy/_core/src/umath/_umath_tests.c.src @@ -13,13 +13,13 @@ #undef NPY_INTERNAL_BUILD #endif // for add_INT32_negative_indexed -#define NPY_TARGET_VERSION NPY_2_0_API_VERSION +#define NPY_TARGET_VERSION NPY_2_1_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/ndarrayobject.h" #include "numpy/npy_math.h" -#include "npy_pycompat.h" + #include "npy_config.h" #include "npy_cpu_features.h" @@ -422,29 +422,29 @@ defdict = { */ static PyUFuncGenericFunction always_error_functions[] = { always_error_loop }; -static void *always_error_data[] = { (void *)NULL }; -static char always_error_signatures[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const always_error_data[] = { (void *)NULL }; +static const char always_error_signatures[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction inner1d_functions[] = { INTP_inner1d, DOUBLE_inner1d }; -static void *inner1d_data[] = { (void *)NULL, (void *)NULL }; -static char inner1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const inner1d_data[] = { (void *)NULL, (void *)NULL }; +static const char inner1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction innerwt_functions[] = { INTP_innerwt, DOUBLE_innerwt }; -static void *innerwt_data[] = { (void *)NULL, (void *)NULL }; -static char innerwt_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const innerwt_data[] = { (void *)NULL, (void *)NULL }; +static const char innerwt_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction matrix_multiply_functions[] = { INTP_matrix_multiply, FLOAT_matrix_multiply, DOUBLE_matrix_multiply }; -static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; -static char matrix_multiply_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL }; +static const char matrix_multiply_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction cross1d_functions[] = { INTP_cross1d, DOUBLE_cross1d }; -static void *cross1d_data[] = { (void *)NULL, (void *)NULL }; -static char cross1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; +static void *const cross1d_data[] = { (void *)NULL, (void *)NULL }; +static const char cross1d_signatures[] = { NPY_INTP, NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction euclidean_pdist_functions[] = { FLOAT_euclidean_pdist, DOUBLE_euclidean_pdist }; -static void *eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL }; -static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT, +static void *const eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL }; +static const char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE }; static PyUFuncGenericFunction cumsum_functions[] = { INTP_cumsum, DOUBLE_cumsum }; -static void *cumsum_data[] = { (void *)NULL, (void *)NULL }; -static char cumsum_signatures[] = { NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE }; +static void *const cumsum_data[] = { (void *)NULL, (void *)NULL }; +static const char cumsum_signatures[] = { NPY_INTP, NPY_INTP, NPY_DOUBLE, NPY_DOUBLE }; static int @@ -761,6 +761,95 @@ add_INT32_negative_indexed(PyObject *module, PyObject *dict) { return 0; } +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +// Define the gufunc 'conv1d_full' +// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) +#define MAX(a, b) (((a) < (b)) ? (b) : (a)) + +int conv1d_full_process_core_dims(PyUFuncObject *ufunc, + npy_intp *core_dim_sizes) +{ + // + // core_dim_sizes will hold the core dimensions [m, n, p]. + // p will be -1 if the caller did not provide the out argument. + // + npy_intp m = core_dim_sizes[0]; + npy_intp n = core_dim_sizes[1]; + npy_intp p = core_dim_sizes[2]; + npy_intp required_p = m + n - 1; + + if (m == 0 && n == 0) { + PyErr_SetString(PyExc_ValueError, + "conv1d_full: both inputs have core dimension 0; the function " + "requires that at least one input has positive size."); + return -1; + } + if (p == -1) { + core_dim_sizes[2] = required_p; + return 0; + } + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "conv1d_full: the core dimension p of the out parameter " + "does not equal m + n - 1, where m and n are the core " + "dimensions of the inputs x and y; got m=%zd and n=%zd so " + "p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; +} + +static void +conv1d_full_double_loop(char **args, + npy_intp const *dimensions, + npy_intp const *steps, + void *NPY_UNUSED(func)) +{ + // Input and output arrays + char *p_x = args[0]; + char *p_y = args[1]; + char *p_out = args[2]; + // Number of loops of pdist calculations to execute. + npy_intp nloops = dimensions[0]; + // Core dimensions + npy_intp m = dimensions[1]; + npy_intp n = dimensions[2]; + npy_intp p = dimensions[3]; // Must be m + n - 1. + // Core strides + npy_intp x_stride = steps[0]; + npy_intp y_stride = steps[1]; + npy_intp out_stride = steps[2]; + // Inner strides + npy_intp x_inner_stride = steps[3]; + npy_intp y_inner_stride = steps[4]; + npy_intp out_inner_stride = steps[5]; + + for (npy_intp loop = 0; loop < nloops; ++loop, p_x += x_stride, + p_y += y_stride, + p_out += out_stride) { + // Basic implementation of 1d convolution + for (npy_intp k = 0; k < p; ++k) { + double sum = 0.0; + for (npy_intp i = MAX(0, k - n + 1); i < MIN(m, k + 1); ++i) { + double x_i = *(double *)(p_x + i*x_inner_stride); + double y_k_minus_i = *(double *)(p_y + (k - i)*y_inner_stride); + sum += x_i * y_k_minus_i; + } + *(double *)(p_out + k*out_inner_stride) = sum; + } + } +} + +static PyUFuncGenericFunction conv1d_full_functions[] = { + (PyUFuncGenericFunction) &conv1d_full_double_loop +}; +static void *const conv1d_full_data[] = {NULL}; +static const char conv1d_full_typecodes[] = {NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE}; + + static PyMethodDef UMath_TestsMethods[] = { {"test_signature", UMath_Tests_test_signature, METH_VARARGS, "Test signature parsing of ufunc. \n" @@ -829,5 +918,38 @@ PyMODINIT_FUNC PyInit__umath_tests(void) { "cannot load _umath_tests module."); return NULL; } + + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + // Define the gufunc 'conv1d_full' + // Shape signature is (m),(n)->(p) where p must be m + n - 1. + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + PyUFuncObject *gufunc = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature( + conv1d_full_functions, + conv1d_full_data, + conv1d_full_typecodes, + 1, 2, 1, PyUFunc_None, "conv1d_full", + "convolution of x and y ('full' mode)", + 0, "(m),(n)->(p)"); + if (gufunc == NULL) { + Py_DECREF(m); + return NULL; + } + gufunc->process_core_dims_func = &conv1d_full_process_core_dims; + + int status = PyModule_AddObject(m, "conv1d_full", (PyObject *) gufunc); + if (status == -1) { + Py_DECREF(gufunc); + Py_DECREF(m); + return NULL; + } + + // - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/_core/src/umath/clip.cpp b/numpy/_core/src/umath/clip.cpp index c30ab89b1595..e051692c6d48 100644 --- a/numpy/_core/src/umath/clip.cpp +++ b/numpy/_core/src/umath/clip.cpp @@ -1,6 +1,8 @@ /** * This module provides the inner loops for the clip ufunc */ +#include + #define _UMATHMODULE #define _MULTIARRAYMODULE #define NPY_NO_DEPRECATED_API NPY_API_VERSION @@ -150,50 +152,100 @@ _NPY_CLIP(T x, T min, T max) return _NPY_MIN(_NPY_MAX((x), (min)), (max)); } -template -static void -_npy_clip_(T **args, npy_intp const *dimensions, npy_intp const *steps) -{ - npy_intp n = dimensions[0]; - if (steps[1] == 0 && steps[2] == 0) { - /* min and max are constant throughout the loop, the most common case - */ - /* NOTE: it may be possible to optimize these checks for nan */ - T min_val = *args[1]; - T max_val = *args[2]; +template +static inline void +_npy_clip_const_minmax_( + char *ip, npy_intp is, char *op, npy_intp os, npy_intp n, T min_val, T max_val, + std::false_type /* non-floating point */ +) +{ + /* contiguous, branch to let the compiler optimize */ + if (is == sizeof(T) && os == sizeof(T)) { + for (npy_intp i = 0; i < n; i++, ip += sizeof(T), op += sizeof(T)) { + *(T *)op = _NPY_CLIP(*(T *)ip, min_val, max_val); + } + } + else { + for (npy_intp i = 0; i < n; i++, ip += is, op += os) { + *(T *)op = _NPY_CLIP(*(T *)ip, min_val, max_val); + } + } +} - T *ip1 = args[0], *op1 = args[3]; - npy_intp is1 = steps[0] / sizeof(T), os1 = steps[3] / sizeof(T); +template +static inline void +_npy_clip_const_minmax_( + char *ip, npy_intp is, char *op, npy_intp os, npy_intp n, T min_val, T max_val, + std::true_type /* floating point */ +) +{ + if (!npy_isnan(min_val) && !npy_isnan(max_val)) { + /* + * The min/max_val are not NaN so the comparison below will + * propagate NaNs in the input without further NaN checks. + */ /* contiguous, branch to let the compiler optimize */ - if (is1 == 1 && os1 == 1) { - for (npy_intp i = 0; i < n; i++, ip1++, op1++) { - *op1 = _NPY_CLIP(*ip1, min_val, max_val); + if (is == sizeof(T) && os == sizeof(T)) { + for (npy_intp i = 0; i < n; i++, ip += sizeof(T), op += sizeof(T)) { + T x = *(T *)ip; + if (x < min_val) { + x = min_val; + } + if (x > max_val) { + x = max_val; + } + *(T *)op = x; } } else { - for (npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) { - *op1 = _NPY_CLIP(*ip1, min_val, max_val); + for (npy_intp i = 0; i < n; i++, ip += is, op += os) { + T x = *(T *)ip; + if (x < min_val) { + x = min_val; + } + if (x > max_val) { + x = max_val; + } + *(T *)op = x; } } } else { - T *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3]; - npy_intp is1 = steps[0] / sizeof(T), is2 = steps[1] / sizeof(T), - is3 = steps[2] / sizeof(T), os1 = steps[3] / sizeof(T); - for (npy_intp i = 0; i < n; - i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1) - *op1 = _NPY_CLIP(*ip1, *ip2, *ip3); + /* min_val and/or max_val are nans */ + T x = npy_isnan(min_val) ? min_val : max_val; + for (npy_intp i = 0; i < n; i++, op += os) { + *(T *)op = x; + } } - npy_clear_floatstatus_barrier((char *)dimensions); } -template +template static void _npy_clip(char **args, npy_intp const *dimensions, npy_intp const *steps) { - using T = typename Tag::type; - return _npy_clip_((T **)args, dimensions, steps); + npy_intp n = dimensions[0]; + if (steps[1] == 0 && steps[2] == 0) { + /* min and max are constant throughout the loop, the most common case */ + T min_val = *(T *)args[1]; + T max_val = *(T *)args[2]; + + _npy_clip_const_minmax_( + args[0], steps[0], args[3], steps[3], n, min_val, max_val, + std::is_base_of{} + ); + } + else { + char *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3]; + npy_intp is1 = steps[0], is2 = steps[1], + is3 = steps[2], os1 = steps[3]; + for (npy_intp i = 0; i < n; + i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1) + { + *(T *)op1 = _NPY_CLIP(*(T *)ip1, *(T *)ip2, *(T *)ip3); + } + } + npy_clear_floatstatus_barrier((char *)dimensions); } extern "C" { diff --git a/numpy/_core/src/umath/dispatching.c b/numpy/_core/src/umath/dispatching.c index 26cc66c3a898..e76509ad7db2 100644 --- a/numpy/_core/src/umath/dispatching.c +++ b/numpy/_core/src/umath/dispatching.c @@ -45,7 +45,9 @@ #include "numpy/ndarraytypes.h" #include "numpy/npy_3kcompat.h" #include "common.h" +#include "npy_pycompat.h" +#include "arrayobject.h" #include "dispatching.h" #include "dtypemeta.h" #include "npy_hashtable.h" @@ -63,7 +65,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion); + npy_bool legacy_promotion_is_possible); /** @@ -121,8 +123,9 @@ PyUFunc_AddLoop(PyUFuncObject *ufunc, PyObject *info, int ignore_duplicate) PyObject *loops = ufunc->_loops; Py_ssize_t length = PyList_Size(loops); for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItem(loops, i); + PyObject *item = PyList_GetItemRef(loops, i); PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); + Py_DECREF(item); int cmp = PyObject_RichCompareBool(cur_DType_tuple, DType_tuple, Py_EQ); if (cmp < 0) { return -1; @@ -274,21 +277,20 @@ resolve_implementation_info(PyUFuncObject *ufunc, /* Unspecified out always matches (see below for inputs) */ continue; } + assert(i == 0); /* - * This is a reduce-like operation, which always have the form - * `(res_DType, op_DType, res_DType)`. If the first and last - * dtype of the loops match, this should be reduce-compatible. + * This is a reduce-like operation, we enforce that these + * register with None as the first DType. If a reduction + * uses the same DType, we will do that promotion. + * A `(res_DType, op_DType, res_DType)` pattern can make sense + * in other context as well and could be confusing. */ - if (PyTuple_GET_ITEM(curr_dtypes, 0) - == PyTuple_GET_ITEM(curr_dtypes, 2)) { + if (PyTuple_GET_ITEM(curr_dtypes, 0) == Py_None) { continue; } - /* - * This should be a reduce, but doesn't follow the reduce - * pattern. So (for now?) consider this not a match. - */ + /* Otherwise, this is not considered a match */ matches = NPY_FALSE; - continue; + break; } if (resolver_dtype == (PyArray_DTypeMeta *)Py_None) { @@ -488,7 +490,7 @@ resolve_implementation_info(PyUFuncObject *ufunc, * those defined by the `signature` unmodified). */ static PyObject * -call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, +call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *info, PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], PyArrayObject *const operands[]) { @@ -498,37 +500,51 @@ call_promoter_and_recurse(PyUFuncObject *ufunc, PyObject *promoter, int promoter_result; PyArray_DTypeMeta *new_op_dtypes[NPY_MAXARGS]; - if (PyCapsule_CheckExact(promoter)) { - /* We could also go the other way and wrap up the python function... */ - PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( - promoter, "numpy._ufunc_promoter"); - if (promoter_function == NULL) { + if (info != NULL) { + PyObject *promoter = PyTuple_GET_ITEM(info, 1); + if (PyCapsule_CheckExact(promoter)) { + /* We could also go the other way and wrap up the python function... */ + PyArrayMethod_PromoterFunction *promoter_function = PyCapsule_GetPointer( + promoter, "numpy._ufunc_promoter"); + if (promoter_function == NULL) { + return NULL; + } + promoter_result = promoter_function((PyObject *)ufunc, + op_dtypes, signature, new_op_dtypes); + } + else { + PyErr_SetString(PyExc_NotImplementedError, + "Calling python functions for promotion is not implemented."); return NULL; } - promoter_result = promoter_function((PyObject *)ufunc, - op_dtypes, signature, new_op_dtypes); - } - else { - PyErr_SetString(PyExc_NotImplementedError, - "Calling python functions for promotion is not implemented."); - return NULL; - } - if (promoter_result < 0) { - return NULL; - } - /* - * If none of the dtypes changes, we would recurse infinitely, abort. - * (Of course it is nevertheless possible to recurse infinitely.) - */ - int dtypes_changed = 0; - for (int i = 0; i < nargs; i++) { - if (new_op_dtypes[i] != op_dtypes[i]) { - dtypes_changed = 1; - break; + if (promoter_result < 0) { + return NULL; + } + /* + * If none of the dtypes changes, we would recurse infinitely, abort. + * (Of course it is nevertheless possible to recurse infinitely.) + * + * TODO: We could allow users to signal this directly and also move + * the call to be (almost immediate). That would call it + * unnecessarily sometimes, but may allow additional flexibility. + */ + int dtypes_changed = 0; + for (int i = 0; i < nargs; i++) { + if (new_op_dtypes[i] != op_dtypes[i]) { + dtypes_changed = 1; + break; + } + } + if (!dtypes_changed) { + goto finish; } } - if (!dtypes_changed) { - goto finish; + else { + /* Reduction special path */ + new_op_dtypes[0] = NPY_DT_NewRef(op_dtypes[1]); + new_op_dtypes[1] = NPY_DT_NewRef(op_dtypes[1]); + Py_XINCREF(op_dtypes[2]); + new_op_dtypes[2] = op_dtypes[2]; } /* @@ -576,8 +592,7 @@ _make_new_typetup( none_count++; } else { - if (!NPY_DT_is_legacy(signature[i]) - || NPY_DT_is_abstract(signature[i])) { + if (!NPY_DT_is_legacy(signature[i])) { /* * The legacy type resolution can't deal with these. * This path will return `None` or so in the future to @@ -745,7 +760,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, PyArrayObject *const ops[], PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], - npy_bool allow_legacy_promotion) + npy_bool legacy_promotion_is_possible) { /* * Fetch the dispatching info which consists of the implementation and @@ -788,13 +803,13 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, /* * At this point `info` is NULL if there is no matching loop, or it is - * a promoter that needs to be used/called: + * a promoter that needs to be used/called. + * TODO: It may be nice to find a better reduce-solution, but this way + * it is a True fallback (not registered so lowest priority) */ - if (info != NULL) { - PyObject *promoter = PyTuple_GET_ITEM(info, 1); - + if (info != NULL || op_dtypes[0] == NULL) { info = call_promoter_and_recurse(ufunc, - promoter, op_dtypes, signature, ops); + info, op_dtypes, signature, ops); if (info == NULL && PyErr_Occurred()) { return NULL; } @@ -814,7 +829,7 @@ promote_and_get_info_and_ufuncimpl(PyUFuncObject *ufunc, * However, we need to give the legacy implementation a chance here. * (it will modify `op_dtypes`). */ - if (!allow_legacy_promotion || ufunc->type_resolver == NULL || + if (!legacy_promotion_is_possible || ufunc->type_resolver == NULL || (ufunc->ntypes == 0 && ufunc->userloops == NULL)) { /* Already tried or not a "legacy" ufunc (no loop found, return) */ return NULL; @@ -921,11 +936,11 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promoting_pyscalars, npy_bool ensure_reduce_compatible) { int nin = ufunc->nin, nargs = ufunc->nargs; + npy_bool legacy_promotion_is_possible = NPY_TRUE; /* * Get the actual DTypes we operate with by setting op_dtypes[i] from @@ -950,10 +965,25 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, */ Py_CLEAR(op_dtypes[i]); } + /* + * If the op_dtype ends up being a non-legacy one, then we cannot use + * legacy promotion (unless this is a python scalar). + */ + if (op_dtypes[i] != NULL && !NPY_DT_is_legacy(op_dtypes[i]) && ( + signature[i] != NULL || // signature cannot be a pyscalar + !(PyArray_FLAGS(ops[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL))) { + legacy_promotion_is_possible = NPY_FALSE; + } } - if (force_legacy_promotion - && npy_promotion_state == NPY_USE_LEGACY_PROMOTION + int error_res = 0; + PyObject *all_dtypes; + PyArrayMethodObject *method; + Py_BEGIN_CRITICAL_SECTION((PyObject *)ufunc); + int current_promotion_state = get_npy_promotion_state(); + + if (force_legacy_promotion && legacy_promotion_is_possible + && current_promotion_state == NPY_USE_LEGACY_PROMOTION && (ufunc->ntypes != 0 || ufunc->userloops != NULL)) { /* * We must use legacy promotion for value-based logic. Call the old @@ -963,43 +993,51 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, int cacheable = 1; /* unused, as we modify the original `op_dtypes` */ if (legacy_promote_using_legacy_type_resolver(ufunc, ops, signature, op_dtypes, &cacheable, NPY_FALSE) < 0) { - goto handle_error; + error_res = -1; } } - /* Pause warnings and always use "new" path */ - int old_promotion_state = npy_promotion_state; - npy_promotion_state = NPY_USE_WEAK_PROMOTION; - PyObject *info = promote_and_get_info_and_ufuncimpl(ufunc, - ops, signature, op_dtypes, allow_legacy_promotion); - npy_promotion_state = old_promotion_state; + PyObject *info = NULL; + if (error_res == 0) { + /* Pause warnings and always use "new" path */ + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); + info = promote_and_get_info_and_ufuncimpl(ufunc, + ops, signature, op_dtypes, legacy_promotion_is_possible); + set_npy_promotion_state(current_promotion_state); - if (info == NULL) { - goto handle_error; + if (info == NULL) { + error_res = -1; + } } - PyArrayMethodObject *method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); - PyObject *all_dtypes = PyTuple_GET_ITEM(info, 0); + if (error_res == 0) { + method = (PyArrayMethodObject *)PyTuple_GET_ITEM(info, 1); + all_dtypes = PyTuple_GET_ITEM(info, 0); - /* If necessary, check if the old result would have been different */ - if (NPY_UNLIKELY(npy_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) - && (force_legacy_promotion || promoting_pyscalars) - && npy_give_promotion_warnings()) { - PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; - for (int i = 0; i < nargs; i++) { - check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( - all_dtypes, i); - } - /* Before calling to the legacy promotion, pretend that is the state: */ - npy_promotion_state = NPY_USE_LEGACY_PROMOTION; - int res = legacy_promote_using_legacy_type_resolver(ufunc, - ops, signature, check_dtypes, NULL, NPY_TRUE); - /* Reset the promotion state: */ - npy_promotion_state = NPY_USE_WEAK_PROMOTION_AND_WARN; - if (res < 0) { - goto handle_error; + /* If necessary, check if the old result would have been different */ + if (NPY_UNLIKELY(current_promotion_state == NPY_USE_WEAK_PROMOTION_AND_WARN) + && (force_legacy_promotion || promoting_pyscalars) + && npy_give_promotion_warnings()) { + PyArray_DTypeMeta *check_dtypes[NPY_MAXARGS]; + for (int i = 0; i < nargs; i++) { + check_dtypes[i] = (PyArray_DTypeMeta *)PyTuple_GET_ITEM( + all_dtypes, i); + } + /* Before calling to the legacy promotion, pretend that is the state: */ + set_npy_promotion_state(NPY_USE_LEGACY_PROMOTION); + int res = legacy_promote_using_legacy_type_resolver(ufunc, + ops, signature, check_dtypes, NULL, NPY_TRUE); + /* Reset the promotion state: */ + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION_AND_WARN); + if (res < 0) { + error_res = 0; + } } } + Py_END_CRITICAL_SECTION(); + if (error_res < 0) { + goto handle_error; + } /* * In certain cases (only the logical ufuncs really), the loop we found may @@ -1017,7 +1055,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, Py_INCREF(signature[0]); return promote_and_get_ufuncimpl(ufunc, ops, signature, op_dtypes, - force_legacy_promotion, allow_legacy_promotion, + force_legacy_promotion, promoting_pyscalars, NPY_FALSE); } @@ -1047,7 +1085,7 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, * then we chain it, because DTypePromotionError effectively means that there * is no loop available. (We failed finding a loop by using promotion.) */ - else if (PyErr_ExceptionMatches(npy_DTypePromotionError)) { + else if (PyErr_ExceptionMatches(npy_static_pydata.DTypePromotionError)) { PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL; PyErr_Fetch(&err_type, &err_value, &err_traceback); raise_no_loop_found_error(ufunc, (PyObject **)op_dtypes); @@ -1265,8 +1303,9 @@ get_info_no_cast(PyUFuncObject *ufunc, PyArray_DTypeMeta *op_dtype, PyObject *loops = ufunc->_loops; Py_ssize_t length = PyList_Size(loops); for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItem(loops, i); + PyObject *item = PyList_GetItemRef(loops, i); PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); + Py_DECREF(item); int cmp = PyObject_RichCompareBool(cur_DType_tuple, t_dtypes, Py_EQ); if (cmp < 0) { diff --git a/numpy/_core/src/umath/dispatching.h b/numpy/_core/src/umath/dispatching.h index c711a66688c6..9bb5fbd9b013 100644 --- a/numpy/_core/src/umath/dispatching.h +++ b/numpy/_core/src/umath/dispatching.h @@ -22,7 +22,6 @@ promote_and_get_ufuncimpl(PyUFuncObject *ufunc, PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *op_dtypes[], npy_bool force_legacy_promotion, - npy_bool allow_legacy_promotion, npy_bool promote_pyscalars, npy_bool ensure_reduce_compatible); diff --git a/numpy/_core/src/umath/extobj.c b/numpy/_core/src/umath/extobj.c index aea145de81f9..755d8665b11d 100644 --- a/numpy/_core/src/umath/extobj.c +++ b/numpy/_core/src/umath/extobj.c @@ -6,7 +6,7 @@ #include #include "npy_config.h" -#include "npy_pycompat.h" + #include "npy_argparse.h" #include "conversion_utils.h" @@ -14,18 +14,9 @@ #include "extobj.h" #include "numpy/ufuncobject.h" -#include "ufunc_object.h" /* for npy_um_str_pyvals_name */ #include "common.h" -/* - * The global ContextVar to store the extobject. It is exposed to Python - * as `_extobj_contextvar`. - */ -static PyObject *default_extobj_capsule = NULL; -NPY_NO_EXPORT PyObject *npy_extobj_contextvar = NULL; - - #define UFUNC_ERR_IGNORE 0 #define UFUNC_ERR_WARN 1 #define UFUNC_ERR_RAISE 2 @@ -44,11 +35,6 @@ NPY_NO_EXPORT PyObject *npy_extobj_contextvar = NULL; #define UFUNC_SHIFT_UNDERFLOW 6 #define UFUNC_SHIFT_INVALID 9 -/* The python strings for the above error modes defined in extobj.h */ -const char *errmode_cstrings[] = { - "ignore", "warn", "raise", "call", "print", "log"}; -static PyObject *errmode_strings[6] = {NULL}; - /* Default user error mode (underflows are ignored, others warn) */ #define UFUNC_ERR_DEFAULT \ (UFUNC_ERR_WARN << UFUNC_SHIFT_DIVIDEBYZERO) + \ @@ -131,7 +117,8 @@ fetch_curr_extobj_state(npy_extobj *extobj) { PyObject *capsule; if (PyContextVar_Get( - npy_extobj_contextvar, default_extobj_capsule, &capsule) < 0) { + npy_static_pydata.npy_extobj_contextvar, + npy_static_pydata.default_extobj_capsule, &capsule) < 0) { return -1; } npy_extobj *obj = PyCapsule_GetPointer(capsule, "numpy.ufunc.extobj"); @@ -153,26 +140,15 @@ fetch_curr_extobj_state(npy_extobj *extobj) NPY_NO_EXPORT int init_extobj(void) { - /* - * First initialize the string constants we need to parse `errstate()` - * inputs. - */ - for (int i = 0; i <= UFUNC_ERR_LOG; i++) { - errmode_strings[i] = PyUnicode_InternFromString(errmode_cstrings[i]); - if (errmode_strings[i] == NULL) { - return -1; - } - } - - default_extobj_capsule = make_extobj_capsule( + npy_static_pydata.default_extobj_capsule = make_extobj_capsule( NPY_BUFSIZE, UFUNC_ERR_DEFAULT, Py_None); - if (default_extobj_capsule == NULL) { + if (npy_static_pydata.default_extobj_capsule == NULL) { return -1; } - npy_extobj_contextvar = PyContextVar_New( - "numpy.ufunc.extobj", default_extobj_capsule); - if (npy_extobj_contextvar == NULL) { - Py_CLEAR(default_extobj_capsule); + npy_static_pydata.npy_extobj_contextvar = PyContextVar_New( + "numpy.ufunc.extobj", npy_static_pydata.default_extobj_capsule); + if (npy_static_pydata.npy_extobj_contextvar == NULL) { + Py_CLEAR(npy_static_pydata.default_extobj_capsule); return -1; } return 0; @@ -191,7 +167,8 @@ errmodeconverter(PyObject *obj, int *mode) } int i = 0; for (; i <= UFUNC_ERR_LOG; i++) { - int eq = PyObject_RichCompareBool(obj, errmode_strings[i], Py_EQ); + int eq = PyObject_RichCompareBool( + obj, npy_interned_str.errmode_strings[i], Py_EQ); if (eq == -1) { return 0; } @@ -212,7 +189,7 @@ errmodeconverter(PyObject *obj, int *mode) /* * This function is currently exposed as `umath._seterrobj()`, it is private * and returns a capsule representing the errstate. This capsule is then - * assigned to the `npy_extobj_contextvar` in Python. + * assigned to the `_extobj_contextvar` in Python. */ NPY_NO_EXPORT PyObject * extobj_make_extobj(PyObject *NPY_UNUSED(mod), @@ -338,19 +315,23 @@ extobj_get_extobj_dict(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(noarg)) } /* Set all error modes: */ mode = (extobj.errmask & UFUNC_MASK_DIVIDEBYZERO) >> UFUNC_SHIFT_DIVIDEBYZERO; - if (PyDict_SetItemString(result, "divide", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "divide", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_OVERFLOW) >> UFUNC_SHIFT_OVERFLOW; - if (PyDict_SetItemString(result, "over", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "over", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_UNDERFLOW) >> UFUNC_SHIFT_UNDERFLOW; - if (PyDict_SetItemString(result, "under", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "under", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } mode = (extobj.errmask & UFUNC_MASK_INVALID) >> UFUNC_SHIFT_INVALID; - if (PyDict_SetItemString(result, "invalid", errmode_strings[mode]) < 0) { + if (PyDict_SetItemString(result, "invalid", + npy_interned_str.errmode_strings[mode]) < 0) { goto fail; } diff --git a/numpy/_core/src/umath/extobj.h b/numpy/_core/src/umath/extobj.h index 0cd5afd76218..9176af6a3539 100644 --- a/numpy/_core/src/umath/extobj.h +++ b/numpy/_core/src/umath/extobj.h @@ -4,9 +4,6 @@ #include /* for NPY_NO_EXPORT */ -/* For the private exposure of the extobject contextvar to Python */ -extern NPY_NO_EXPORT PyObject *npy_extobj_contextvar; - /* * Represent the current ufunc error (and buffer) state. we are using a * capsule for now to store this, but it could make sense to refactor it into diff --git a/numpy/_core/src/umath/fast_loop_macros.h b/numpy/_core/src/umath/fast_loop_macros.h index b8c1926b2f7e..ab830d52e9ab 100644 --- a/numpy/_core/src/umath/fast_loop_macros.h +++ b/numpy/_core/src/umath/fast_loop_macros.h @@ -323,34 +323,6 @@ abs_ptrdiff(char *a, char *b) ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \ ((abs_ptrdiff(args[1], args[0]) == 0)))) -/* - * Avoid using SIMD for very large step sizes for several reasons: - * 1) Supporting large step sizes requires use of i64gather/scatter_ps instructions, - * in which case we need two i64gather instructions and an additional vinsertf32x8 - * instruction to load a single zmm register (since one i64gather instruction - * loads into a ymm register). This is not ideal for performance. - * 2) Gather and scatter instructions can be slow when the loads/stores - * cross page boundaries. - * - * We instead rely on i32gather/scatter_ps instructions which use a 32-bit index - * element. The index needs to be < INT_MAX to avoid overflow. MAX_STEP_SIZE - * ensures this. The condition also requires that the input and output arrays - * should have no overlap in memory. - */ -#define IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[2], steps[2] * dimensions[0]))) - -#define IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP \ - ((labs(steps[0]) < MAX_STEP_SIZE) && \ - (labs(steps[1]) < MAX_STEP_SIZE) && \ - (labs(steps[2]) < MAX_STEP_SIZE) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[2], steps[2] * dimensions[0])) && \ - (nomemoverlap(args[0], steps[0] * dimensions[0], args[1], steps[1] * dimensions[0]))) - /* * 1) Output should be contiguous, can handle strided input data * 2) Input step should be smaller than MAX_STEP_SIZE for performance @@ -359,7 +331,7 @@ abs_ptrdiff(char *a, char *b) #define IS_OUTPUT_BLOCKABLE_UNARY(esizein, esizeout, vsize) \ ((steps[0] & (esizein-1)) == 0 && \ steps[1] == (esizeout) && llabs(steps[0]) < MAX_STEP_SIZE && \ - (nomemoverlap(args[1], steps[1] * dimensions[0], args[0], steps[0] * dimensions[0]))) + (nomemoverlap(args[1], steps[1], args[0], steps[0], dimensions[0]))) #define IS_BLOCKABLE_REDUCE(esize, vsize) \ (steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \ diff --git a/numpy/_core/src/umath/funcs.inc.src b/numpy/_core/src/umath/funcs.inc.src index 6cd9448d025b..1075af97c9df 100644 --- a/numpy/_core/src/umath/funcs.inc.src +++ b/numpy/_core/src/umath/funcs.inc.src @@ -7,9 +7,10 @@ */ #define NPY_NO_DEPRECATED_API NPY_API_VERSION -#include "npy_pycompat.h" -#include "npy_import.h" +#include "npy_import.h" +#include "npy_static_data.h" +#include "multiarraymodule.h" /* ***************************************************************************** @@ -157,35 +158,20 @@ npy_ObjectLogicalNot(PyObject *i1) static PyObject * npy_ObjectFloor(PyObject *obj) { - static PyObject *math_floor_func = NULL; - - npy_cache_import("math", "floor", &math_floor_func); - if (math_floor_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_floor_func, "O", obj); + return PyObject_CallFunction(npy_static_pydata.math_floor_func, + "O", obj); } static PyObject * npy_ObjectCeil(PyObject *obj) { - static PyObject *math_ceil_func = NULL; - - npy_cache_import("math", "ceil", &math_ceil_func); - if (math_ceil_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_ceil_func, "O", obj); + return PyObject_CallFunction(npy_static_pydata.math_ceil_func, + "O", obj); } static PyObject * npy_ObjectTrunc(PyObject *obj) { - static PyObject *math_trunc_func = NULL; - - npy_cache_import("math", "trunc", &math_trunc_func); - if (math_trunc_func == NULL) { - return NULL; - } - return PyObject_CallFunction(math_trunc_func, "O", obj); + return PyObject_CallFunction(npy_static_pydata.math_trunc_func, + "O", obj); } static PyObject * @@ -195,13 +181,8 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* use math.gcd if valid on the provided types */ { - static PyObject *math_gcd_func = NULL; - - npy_cache_import("math", "gcd", &math_gcd_func); - if (math_gcd_func == NULL) { - return NULL; - } - gcd = PyObject_CallFunction(math_gcd_func, "OO", i1, i2); + gcd = PyObject_CallFunction(npy_static_pydata.math_gcd_func, + "OO", i1, i2); if (gcd != NULL) { return gcd; } @@ -211,13 +192,12 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2) /* otherwise, use our internal one, written in python */ { - static PyObject *internal_gcd_func = NULL; - - npy_cache_import("numpy._core._internal", "_gcd", &internal_gcd_func); - if (internal_gcd_func == NULL) { + if (npy_cache_import_runtime("numpy._core._internal", "_gcd", + &npy_runtime_imports.internal_gcd_func) == -1) { return NULL; } - gcd = PyObject_CallFunction(internal_gcd_func, "OO", i1, i2); + gcd = PyObject_CallFunction(npy_runtime_imports.internal_gcd_func, + "OO", i1, i2); if (gcd == NULL) { return NULL; } diff --git a/numpy/_core/src/umath/legacy_array_method.c b/numpy/_core/src/umath/legacy_array_method.c index 6e90d55225b5..9592df0e1366 100644 --- a/numpy/_core/src/umath/legacy_array_method.c +++ b/numpy/_core/src/umath/legacy_array_method.c @@ -33,37 +33,43 @@ typedef struct { /* Use a free list, since we should normally only need one at a time */ +#ifndef Py_GIL_DISABLED #define NPY_LOOP_DATA_CACHE_SIZE 5 static int loop_data_num_cached = 0; static legacy_array_method_auxdata *loop_data_cache[NPY_LOOP_DATA_CACHE_SIZE]; - +#else +#define NPY_LOOP_DATA_CACHE_SIZE 0 +#endif static void legacy_array_method_auxdata_free(NpyAuxData *data) { +#if NPY_LOOP_DATA_CACHE_SIZE > 0 if (loop_data_num_cached < NPY_LOOP_DATA_CACHE_SIZE) { loop_data_cache[loop_data_num_cached] = ( (legacy_array_method_auxdata *)data); loop_data_num_cached++; } - else { + else +#endif + { PyMem_Free(data); } } -#undef NPY_LOOP_DATA_CACHE_SIZE - - NpyAuxData * get_new_loop_data( PyUFuncGenericFunction loop, void *user_data, int pyerr_check) { legacy_array_method_auxdata *data; +#if NPY_LOOP_DATA_CACHE_SIZE > 0 if (NPY_LIKELY(loop_data_num_cached > 0)) { loop_data_num_cached--; data = loop_data_cache[loop_data_num_cached]; } - else { + else +#endif + { data = PyMem_Malloc(sizeof(legacy_array_method_auxdata)); if (data == NULL) { return NULL; @@ -77,6 +83,7 @@ get_new_loop_data( return (NpyAuxData *)data; } +#undef NPY_LOOP_DATA_CACHE_SIZE /* * This is a thin wrapper around the legacy loop signature. @@ -104,8 +111,8 @@ generic_wrapped_legacy_loop(PyArrayMethod_Context *NPY_UNUSED(context), */ NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *NPY_UNUSED(given_descrs[]), + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const NPY_UNUSED(given_descrs[]), PyArray_Descr *NPY_UNUSED(loop_descrs[]), npy_intp *NPY_UNUSED(view_offset)) { @@ -123,8 +130,8 @@ wrapped_legacy_resolve_descriptors(PyArrayMethodObject *NPY_UNUSED(self), static NPY_CASTING simple_legacy_resolve_descriptors( PyArrayMethodObject *method, - PyArray_DTypeMeta **dtypes, - PyArray_Descr **given_descrs, + PyArray_DTypeMeta *const *dtypes, + PyArray_Descr *const *given_descrs, PyArray_Descr **output_descrs, npy_intp *NPY_UNUSED(view_offset)) { @@ -246,7 +253,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, static int copy_cached_initial( PyArrayMethod_Context *context, npy_bool NPY_UNUSED(reduction_is_empty), - char *initial) + void *initial) { memcpy(initial, context->method->legacy_initial, context->descriptors[0]->elsize); @@ -266,7 +273,7 @@ copy_cached_initial( static int get_initial_from_ufunc( PyArrayMethod_Context *context, npy_bool reduction_is_empty, - char *initial) + void *initial) { if (context->caller == NULL || !PyObject_TypeCheck(context->caller, &PyUFunc_Type)) { diff --git a/numpy/_core/src/umath/legacy_array_method.h b/numpy/_core/src/umath/legacy_array_method.h index 750de06c7992..82eeb04a0a15 100644 --- a/numpy/_core/src/umath/legacy_array_method.h +++ b/numpy/_core/src/umath/legacy_array_method.h @@ -28,7 +28,7 @@ get_wrapped_legacy_ufunc_loop(PyArrayMethod_Context *context, NPY_NO_EXPORT NPY_CASTING wrapped_legacy_resolve_descriptors(PyArrayMethodObject *, - PyArray_DTypeMeta **, PyArray_Descr **, PyArray_Descr **, npy_intp *); + PyArray_DTypeMeta *const *, PyArray_Descr *const *, PyArray_Descr **, npy_intp *); #ifdef __cplusplus } diff --git a/numpy/_core/src/umath/loops.c.src b/numpy/_core/src/umath/loops.c.src index 811680b9c47c..5ac67fa3024b 100644 --- a/numpy/_core/src/umath/loops.c.src +++ b/numpy/_core/src/umath/loops.c.src @@ -471,13 +471,49 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 int } /**end repeat1**/ +static inline @type@ +_@TYPE@_squared_exponentiation_helper(@type@ base, @type@ exponent_two, int first_bit) { + // Helper method to calculate power using squared exponentiation + // The algorithm is partly unrolled. The second and third argument are the exponent//2 and the first bit of the exponent + @type@ out = first_bit ? base : 1; + while (exponent_two > 0) { + base *= base; + if (exponent_two & 1) { + out *= base; + } + exponent_two >>= 1; + } + return out; +} + NPY_NO_EXPORT void @TYPE@_power(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { + if (steps[1]==0) { + // stride for second argument is 0 + BINARY_DEFS + const @type@ in2 = *(@type@ *)ip2; + #if @SIGNED@ + if (in2 < 0) { + npy_gil_error(PyExc_ValueError, + "Integers to negative integer powers are not allowed."); + return; + } + #endif + + int first_bit = in2 & 1; + @type@ in2start = in2 >> 1; + + BINARY_LOOP_SLIDING { + @type@ in1 = *(@type@ *)ip1; + + *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2start, first_bit); + } + return; + } BINARY_LOOP { @type@ in1 = *(@type@ *)ip1; @type@ in2 = *(@type@ *)ip2; - @type@ out; #if @SIGNED@ if (in2 < 0) { @@ -495,16 +531,9 @@ NPY_NO_EXPORT void continue; } - out = in2 & 1 ? in1 : 1; + int first_bit = in2 & 1; in2 >>= 1; - while (in2 > 0) { - in1 *= in1; - if (in2 & 1) { - out *= in1; - } - in2 >>= 1; - } - *((@type@ *) op1) = out; + *((@type@ *) op1) = _@TYPE@_squared_exponentiation_helper(in1, in2, first_bit); } } /**end repeat**/ diff --git a/numpy/_core/src/umath/loops.h.src b/numpy/_core/src/umath/loops.h.src index 55db18de4474..f775bc22b8a8 100644 --- a/numpy/_core/src/umath/loops.h.src +++ b/numpy/_core/src/umath/loops.h.src @@ -10,6 +10,10 @@ #define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN #endif +#ifdef __cplusplus +extern "C" { +#endif + /* ***************************************************************************** ** BOOLEAN LOOPS ** @@ -73,7 +77,7 @@ BOOL_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void #include "loops_autovec.dispatch.h" #endif /**begin repeat - * #kind = isnan, isinf, isfinite# + * #kind = isnan, isinf, isfinite, floor, ceil, trunc# */ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void BOOL_@kind@, (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))) @@ -175,6 +179,12 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @S@@TYPE@_positive(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)); +/**begin repeat2 + * #kind = floor, ceil, trunc# + */ +#define @S@@TYPE@_@kind@ @S@@TYPE@_positive +/**end repeat2**/ + /**begin repeat2 * Arithmetic * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor, @@ -875,5 +885,7 @@ PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, vo ** END LOOPS ** ***************************************************************************** */ - +#ifdef __cplusplus +} +#endif #endif diff --git a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src index a5453501836e..21e01c115a7d 100644 --- a/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_arithm_fp.dispatch.c.src @@ -346,14 +346,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) && __apple_build_version__ < 14030000 goto loop_scalar; #endif // end affected Apple clang. + if (is_mem_overlap(b_src0, b_ssrc0, b_dst, b_sdst, len) || is_mem_overlap(b_src1, b_ssrc1, b_dst, b_sdst, len) || - b_sdst % sizeof(@ftype@) != 0 || b_sdst == 0 || - b_ssrc0 % sizeof(@ftype@) != 0 || - b_ssrc1 % sizeof(@ftype@) != 0 + !npyv_loadable_stride_@sfx@(b_ssrc0) || + !npyv_loadable_stride_@sfx@(b_ssrc1) || + !npyv_storable_stride_@sfx@(b_sdst) || + b_sdst == 0 ) { goto loop_scalar; } + const @ftype@ *src0 = (@ftype@*)b_src0; const @ftype@ *src1 = (@ftype@*)b_src1; @ftype@ *dst = (@ftype@*)b_dst; @@ -366,10 +369,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) const int wstep = vstep * 2; const int hstep = vstep / 2; - const int loadable0 = npyv_loadable_stride_s64(ssrc0); - const int loadable1 = npyv_loadable_stride_s64(ssrc1); - const int storable = npyv_storable_stride_s64(sdst); - // lots**lots of specializations, to squeeze out max performance // contig if (ssrc0 == 2 && ssrc0 == ssrc1 && ssrc0 == sdst) { @@ -414,7 +413,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } } // non-contig - else if (loadable1 && storable) { + else { for (; len >= vstep; len -= vstep, src1 += ssrc1*vstep, dst += sdst*vstep) { npyv_@sfx@ b0 = npyv_loadn2_@sfx@(src1, ssrc1); npyv_@sfx@ b1 = npyv_loadn2_@sfx@(src1 + ssrc1*hstep, ssrc1); @@ -433,9 +432,6 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else { - goto loop_scalar; - } } // scalar 1 else if (ssrc1 == 0) { @@ -460,7 +456,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } } // non-contig - else if (loadable0 && storable) { + else { for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, dst += sdst*vstep) { npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src0, ssrc0); npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src0 + ssrc0*hstep, ssrc0); @@ -479,13 +475,10 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else { - goto loop_scalar; - } } #if @is_mul@ // non-contig - else if (loadable0 && loadable1 && storable) { + else { for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, src1 += ssrc1*vstep, dst += sdst*vstep ) { @@ -512,12 +505,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - #endif + #else /* @is_mul@ */ else { + // Only multiply is vectorized for the generic non-contig case. goto loop_scalar; } + #endif /* @is_mul@ */ + npyv_cleanup(); return; + loop_scalar: #endif for (; len > 0; --len, b_src0 += b_ssrc0, b_src1 += b_ssrc1, b_dst += b_sdst) { @@ -580,8 +577,8 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npy_intp b_ssrc = steps[0], b_sdst = steps[1]; #if @VECTOR@ if (is_mem_overlap(b_src, b_ssrc, b_dst, b_sdst, len) || - b_sdst % sizeof(@ftype@) != 0 || - b_ssrc % sizeof(@ftype@) != 0 + !npyv_loadable_stride_@sfx@(b_ssrc) || + !npyv_storable_stride_@sfx@(b_sdst) ) { goto loop_scalar; } @@ -609,7 +606,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_store2_till_@sfx@(dst, len, r); } } - else if (ssrc == 2 && npyv_storable_stride_s64(sdst)) { + else if (ssrc == 2) { for (; len >= vstep; len -= vstep, src += wstep, dst += sdst*vstep) { npyv_@sfx@ a0 = npyv_load_@sfx@(src); npyv_@sfx@ a1 = npyv_load_@sfx@(src + vstep); @@ -624,7 +621,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npyv_storen2_till_@sfx@(dst, sdst, len, r); } } - else if (sdst == 2 && npyv_loadable_stride_s64(ssrc)) { + else if (sdst == 2) { for (; len >= vstep; len -= vstep, src += ssrc*vstep, dst += wstep) { npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src, ssrc); npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src + ssrc*hstep, ssrc); diff --git a/numpy/_core/src/umath/loops_autovec.dispatch.c.src b/numpy/_core/src/umath/loops_autovec.dispatch.c.src index 6ccafe577c72..e93e851d6b7a 100644 --- a/numpy/_core/src/umath/loops_autovec.dispatch.c.src +++ b/numpy/_core/src/umath/loops_autovec.dispatch.c.src @@ -264,6 +264,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) } /**end repeat**/ +/**begin repeat + * Identity + * #kind = floor, ceil, trunc# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(BOOL_@kind@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) +{ + UNARY_LOOP_FAST(npy_bool, npy_bool, *out = in); +} +/**end repeat**/ + /* ***************************************************************************** ** HALF-FLOAT LOOPS ** diff --git a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src index 159e275bd45e..a4acc4437b1b 100644 --- a/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src +++ b/numpy/_core/src/umath/loops_exponent_log.dispatch.c.src @@ -1315,16 +1315,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) - const npy_double *src = (npy_double*)args[0]; - npy_double *dst = (npy_double*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; const npy_intp len = dimensions[0]; - assert(steps[0] % lsize == 0 && steps[1] % lsize == 0); - if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - npyv_loadable_stride_f64(ssrc) && - npyv_storable_stride_f64(sdst)) { + + if (!is_mem_overlap(args[0], steps[0], args[1], steps[1], len) && + npyv_loadable_stride_f64(steps[0]) && + npyv_storable_stride_f64(steps[1])) { + const npy_double *src = (npy_double*)args[0]; + npy_double *dst = (npy_double*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(src[0]); + const npy_intp sdst = steps[1] / sizeof(src[0]); + simd_@func@_f64(src, ssrc, dst, sdst, len); return; } @@ -1350,12 +1350,17 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) * #TYPE = FLOAT, DOUBLE# * #c = f, # * #C = F, # + * #suffix = f32, f64# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_frexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_UNARY_TWO_OUT_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[0], steps[0], args[1], steps[1], dimensions[0]))) { AVX512_SKX_frexp_@TYPE@(args, dimensions, steps); return; } @@ -1370,7 +1375,11 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_ldexp) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func)) { #ifdef SIMD_AVX512_SKX - if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) { + if ((npyv_loadable_stride_@suffix@(steps[0])) && + (npyv_storable_stride_@suffix@(steps[1])) && + (npyv_storable_stride_@suffix@(steps[2])) && + (!is_mem_overlap(args[0], steps[0], args[2], steps[2], dimensions[0])) && + (!is_mem_overlap(args[1], steps[1], args[2], steps[2], dimensions[0]))) { AVX512_SKX_ldexp_@TYPE@(args, dimensions, steps); return; } diff --git a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src b/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src index 8e09de941168..d72ace50ff19 100644 --- a/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src +++ b/numpy/_core/src/umath/loops_hyperbolic.dispatch.c.src @@ -9,6 +9,8 @@ #include "simd/simd.h" #include "loops_utils.h" #include "loops.h" +// Provides the various *_LOOP macros +#include "fast_loop_macros.h" #if NPY_SIMD_FMA3 // native support /* @@ -608,32 +610,29 @@ simd_tanh_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, npy_in NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { - const @type@ *src = (@type@*)args[0]; - @type@ *dst = (@type@*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); #if @simd@ - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_@sfx@(ssrc) || !npyv_storable_stride_@sfx@(sdst) + npy_intp len = dimensions[0]; + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_@sfx@(steps[0]) || + !npyv_storable_stride_@sfx@(steps[1]) ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_@func@_@sfx@(src, 1, dst, 1, 1); + UNARY_LOOP { + simd_@func@_@sfx@((@type@ *)ip1, 1, (@type@ *)op1, 1, 1); } } else { - simd_@func@_@sfx@(src, ssrc, dst, sdst, len); + npy_intp ssrc = steps[0] / sizeof(@type@); + npy_intp sdst = steps[1] / sizeof(@type@); + simd_@func@_@sfx@((@type@ *)args[0], ssrc, (@type@ *)args[1], sdst, len); } npyv_cleanup(); #if @simd_req_clear@ npy_clear_floatstatus_barrier((char*)dimensions); #endif #else - for (; len > 0; --len, src += ssrc, dst += sdst) { - const @type@ src0 = *src; - *dst = npy_@func@@ssfx@(src0); + UNARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = npy_@func@@ssfx@(in1); } #endif } diff --git a/numpy/_core/src/umath/loops_minmax.dispatch.c.src b/numpy/_core/src/umath/loops_minmax.dispatch.c.src index 319072c01fbe..a67e7d490f5b 100644 --- a/numpy/_core/src/umath/loops_minmax.dispatch.c.src +++ b/numpy/_core/src/umath/loops_minmax.dispatch.c.src @@ -352,9 +352,9 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) } // unroll scalars faster than non-contiguous vector load/store on Arm #if !defined(NPY_HAVE_NEON) && @is_fp@ - if (TO_SIMD_SFX(npyv_loadable_stride)(is1/sizeof(STYPE)) && - TO_SIMD_SFX(npyv_loadable_stride)(is2/sizeof(STYPE)) && - TO_SIMD_SFX(npyv_storable_stride)(os1/sizeof(STYPE)) + if (TO_SIMD_SFX(npyv_loadable_stride)(is1) && + TO_SIMD_SFX(npyv_loadable_stride)(is2) && + TO_SIMD_SFX(npyv_storable_stride)(os1) ) { TO_SIMD_SFX(simd_binary_@intrin@)( (STYPE*)ip1, is1/sizeof(STYPE), diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src b/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src deleted file mode 100644 index 31de906098e3..000000000000 --- a/numpy/_core/src/umath/loops_trigonometric.dispatch.c.src +++ /dev/null @@ -1,457 +0,0 @@ -/*@targets - ** $maxopt baseline - ** (avx2 fma3) avx512f - ** vsx2 vsx3 vsx4 - ** neon_vfpv4 - ** vxe vxe2 - **/ -#include "numpy/npy_math.h" -#include "simd/simd.h" -#include "loops_utils.h" -#include "loops.h" -#include "fast_loop_macros.h" -/* - * TODO: - * - use vectorized version of Payne-Hanek style reduction for large elements or - * when there's no native FUSED support instead of fallback to libc - */ -#if NPY_SIMD_FMA3 // native support -/**begin repeat - * #check = F64, F32# - * #sfx = f64, f32# - * #enable = 0, 1# - */ -#if NPY_SIMD_@check@ && @enable@ -/* - * Vectorized Cody-Waite range reduction technique - * Performs the reduction step x* = x - y*C in three steps: - * 1) x* = x - y*c1 - * 2) x* = x - y*c2 - * 3) x* = x - y*c3 - * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision - */ -NPY_FINLINE npyv_@sfx@ -simd_range_reduction_@sfx@(npyv_@sfx@ x, npyv_@sfx@ y, npyv_@sfx@ c1, npyv_@sfx@ c2, npyv_@sfx@ c3) -{ - npyv_@sfx@ reduced_x = npyv_muladd_@sfx@(y, c1, x); - reduced_x = npyv_muladd_@sfx@(y, c2, reduced_x); - reduced_x = npyv_muladd_@sfx@(y, c3, reduced_x); - return reduced_x; -} -#endif -/**end repeat**/ -/* Disable SIMD code and revert to libm: see - * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ - * for detailed discussion on this*/ -#if 0 // NPY_SIMD_F64 -/**begin repeat - * #op = cos, sin# - */ -#if defined(NPY_OS_WIN32) || defined(NPY_OS_CYGWIN) -NPY_FINLINE npyv_f64 -#else -NPY_NOINLINE npyv_f64 -#endif -simd_@op@_scalar_f64(npyv_f64 out, npy_uint64 cmp_bits) -{ - // MSVC doesn't compile with direct vector access, so we copy it here - // as we have no npyv_get_lane/npyv_set_lane intrinsics - npy_double NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) out_copy[npyv_nlanes_f64]; - npyv_storea_f64(out_copy, out); - - for (unsigned i = 0; i < npyv_nlanes_f64; ++i) { - if (cmp_bits & (1 << i)) { - out_copy[i] = npy_@op@(out_copy[i]); - } - } - - return npyv_loada_f64(out_copy); -} -/**end repeat**/ - -/* - * Approximate sine algorithm for x \in [-pi/2, pi/2] - * worst-case error is 3.5 ulp. - * abs error: 0x1.be222a58p-53 in [-pi/2, pi/2]. - */ -NPY_FINLINE npyv_f64 -simd_approx_sine_poly_f64(npyv_f64 r) -{ - const npyv_f64 poly1 = npyv_setall_f64(-0x1.9f4a9c8b21dc9p-41); - const npyv_f64 poly2 = npyv_setall_f64(0x1.60e88a10163f2p-33); - const npyv_f64 poly3 = npyv_setall_f64(-0x1.ae6361b7254e7p-26); - const npyv_f64 poly4 = npyv_setall_f64(0x1.71de382e8d62bp-19); - const npyv_f64 poly5 = npyv_setall_f64(-0x1.a01a019aeb4ffp-13); - const npyv_f64 poly6 = npyv_setall_f64(0x1.111111110b25ep-7); - const npyv_f64 poly7 = npyv_setall_f64(-0x1.55555555554c3p-3); - - npyv_f64 r2 = npyv_mul_f64(r, r); - npyv_f64 y = npyv_muladd_f64(poly1, r2, poly2); - y = npyv_muladd_f64(y, r2, poly3); - y = npyv_muladd_f64(y, r2, poly4); - y = npyv_muladd_f64(y, r2, poly5); - y = npyv_muladd_f64(y, r2, poly6); - y = npyv_muladd_f64(y, r2, poly7); - y = npyv_muladd_f64(npyv_mul_f64(y, r2), r, r); - - return y; -} - -/* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ -NPY_FINLINE npyv_f64 -simd_range_reduction_pi2(npyv_f64 r, npyv_f64 n) { - const npyv_f64 pi1 = npyv_setall_f64(-0x1.921fb54442d18p+1); - const npyv_f64 pi2 = npyv_setall_f64(-0x1.1a62633145c06p-53); - const npyv_f64 pi3 = npyv_setall_f64(-0x1.c1cd129024e09p-106); - - return simd_range_reduction_f64(r, n, pi1, pi2, pi3); -} - -NPY_FINLINE npyv_b64 simd_sin_range_check_f64(npyv_u64 ir) { - const npyv_u64 tiny_bound = npyv_setall_u64(0x202); /* top12 (asuint64 (0x1p-509)). */ - const npyv_u64 simd_thresh = npyv_setall_u64(0x214); /* top12 (asuint64 (RangeVal)) - SIMD_TINY_BOUND. */ - - return npyv_cmpge_u64(npyv_sub_u64(npyv_shri_u64(ir, 52), tiny_bound), simd_thresh); -} - -NPY_FINLINE npyv_b64 simd_cos_range_check_f64(npyv_u64 ir) { - const npyv_f64 range_val = npyv_setall_f64(0x1p23); - - return npyv_cmpge_u64(ir, npyv_reinterpret_u64_f64(range_val)); -} - -NPY_FINLINE npyv_f64 -simd_cos_poly_f64(npyv_f64 r, npyv_u64 ir, npyv_u64 sign) -{ - const npyv_f64 inv_pi = npyv_setall_f64(0x1.45f306dc9c883p-2); - const npyv_f64 half_pi = npyv_setall_f64(0x1.921fb54442d18p+0); - const npyv_f64 shift = npyv_setall_f64(0x1.8p52); - - /* n = rint((|x|+pi/2)/pi) - 0.5. */ - npyv_f64 n = npyv_muladd_f64(inv_pi, npyv_add_f64(r, half_pi), shift); - npyv_u64 odd = npyv_shli_u64(npyv_reinterpret_u64_f64(n), 63); - n = npyv_sub_f64(n, shift); - n = npyv_sub_f64(n, npyv_setall_f64(0.5)); - - /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ - r = simd_range_reduction_pi2(r, n); - - /* sin(r) poly approx. */ - npyv_f64 y = simd_approx_sine_poly_f64(r); - - /* sign. */ - return npyv_reinterpret_f64_u64(npyv_xor_u64(npyv_reinterpret_u64_f64(y), odd)); -} - -NPY_FINLINE npyv_f64 -simd_sin_poly_f64(npyv_f64 r, npyv_u64 ir, npyv_u64 sign) -{ - const npyv_f64 inv_pi = npyv_setall_f64(0x1.45f306dc9c883p-2); - const npyv_f64 shift = npyv_setall_f64(0x1.8p52); - - /* n = rint(|x|/pi). */ - npyv_f64 n = npyv_muladd_f64(inv_pi, r, shift); - npyv_u64 odd = npyv_shli_u64(npyv_reinterpret_u64_f64(n), 63); - n = npyv_sub_f64(n, shift); - - /* r = |x| - n*pi (range reduction into -pi/2 .. pi/2). */ - r = simd_range_reduction_pi2(r, n); - - /* sin(r) poly approx. */ - npyv_f64 y = simd_approx_sine_poly_f64(r); - - /* sign. */ - return npyv_reinterpret_f64_u64(npyv_xor_u64(npyv_xor_u64(npyv_reinterpret_u64_f64(y), sign), odd)); -} - -/**begin repeat - * #op = cos, sin# - */ -NPY_FINLINE void -simd_@op@_f64(const double *src, npy_intp ssrc, double *dst, npy_intp sdst, npy_intp len) -{ - const npyv_u64 abs_mask = npyv_setall_u64(0x7fffffffffffffff); - const int vstep = npyv_nlanes_f64; - - npyv_f64 out = npyv_zero_f64(); - npyv_f64 x_in; - - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { - if (ssrc == 1) { - x_in = npyv_load_tillz_f64(src, len); - } else { - x_in = npyv_loadn_tillz_f64(src, ssrc, len); - } - - npyv_u64 ir = npyv_and_u64(npyv_reinterpret_u64_f64(x_in), abs_mask); - npyv_f64 r = npyv_reinterpret_f64_u64(ir); - npyv_u64 sign = npyv_and_u64(npyv_reinterpret_u64_f64(x_in), npyv_not_u64(abs_mask)); - - npyv_b64 cmp = simd_@op@_range_check_f64(ir); - /* If fenv exceptions are to be triggered correctly, set any special lanes - to 1 (which is neutral w.r.t. fenv). These lanes will be fixed by - scalar loop later. */ - r = npyv_select_f64(cmp, npyv_setall_f64(1.0), r); - - // Some in range, at least one calculation is useful - if (!npyv_all_b64(cmp)) { - out = simd_@op@_poly_f64(r, ir, sign); - } - - if (npyv_any_b64(cmp)) { - out = npyv_select_f64(cmp, x_in, out); - out = simd_@op@_scalar_f64(out, npyv_tobits_b64(cmp)); - } - - if (sdst == 1) { - npyv_store_till_f64(dst, len, out); - } else { - npyv_storen_till_f64(dst, sdst, len, out); - } - } - npyv_cleanup(); -} -/**end repeat**/ -#endif // NPY_SIMD_F64 - -#if NPY_SIMD_F32 -/* - * Approximate cosine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.875 - */ -NPY_FINLINE npyv_f32 -simd_cosine_poly_f32(npyv_f32 x2) -{ - const npyv_f32 invf8 = npyv_setall_f32(0x1.98e616p-16f); - const npyv_f32 invf6 = npyv_setall_f32(-0x1.6c06dcp-10f); - const npyv_f32 invf4 = npyv_setall_f32(0x1.55553cp-05f); - const npyv_f32 invf2 = npyv_setall_f32(-0x1.000000p-01f); - const npyv_f32 invf0 = npyv_setall_f32(0x1.000000p+00f); - - npyv_f32 r = npyv_muladd_f32(invf8, x2, invf6); - r = npyv_muladd_f32(r, x2, invf4); - r = npyv_muladd_f32(r, x2, invf2); - r = npyv_muladd_f32(r, x2, invf0); - return r; -} -/* - * Approximate sine algorithm for x \in [-PI/4, PI/4] - * Maximum ULP across all 32-bit floats = 0.647 - * Polynomial approximation based on unpublished work by T. Myklebust - */ -NPY_FINLINE npyv_f32 -simd_sine_poly_f32(npyv_f32 x, npyv_f32 x2) -{ - const npyv_f32 invf9 = npyv_setall_f32(0x1.7d3bbcp-19f); - const npyv_f32 invf7 = npyv_setall_f32(-0x1.a06bbap-13f); - const npyv_f32 invf5 = npyv_setall_f32(0x1.11119ap-07f); - const npyv_f32 invf3 = npyv_setall_f32(-0x1.555556p-03f); - - npyv_f32 r = npyv_muladd_f32(invf9, x2, invf7); - r = npyv_muladd_f32(r, x2, invf5); - r = npyv_muladd_f32(r, x2, invf3); - r = npyv_muladd_f32(r, x2, npyv_zero_f32()); - r = npyv_muladd_f32(r, x, x); - return r; -} -/* - * Vectorized approximate sine/cosine algorithms: The following code is a - * vectorized version of the algorithm presented here: - * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 - * (1) Load data in registers and generate mask for elements that are - * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, - * 117435.992f] for sine. - * (2) For elements within range, perform range reduction using Cody-Waite's - * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4]. - * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k = - * int(y). - * (4) For elements outside that range, Cody-Waite reduction performs poorly - * leading to catastrophic cancellation. We compute cosine by calling glibc in - * a scalar fashion. - * (5) Vectorized implementation has a max ULP of 1.49 and performs at least - * 5-7x(x86) - 2.5-3x(Power) - 1-2x(Arm) faster than scalar implementations - * when magnitude of all elements in the array < 71476.0625f (117435.992f for sine). - * Worst case performance is when all the elements are large leading to about 1-2% reduction in - * performance. - */ -typedef enum -{ - SIMD_COMPUTE_SIN, - SIMD_COMPUTE_COS -} SIMD_TRIG_OP; - -static void SIMD_MSVC_NOINLINE -simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, - npy_intp len, SIMD_TRIG_OP trig_op) -{ - // Load up frequently used constants - const npyv_f32 zerosf = npyv_zero_f32(); - const npyv_s32 ones = npyv_setall_s32(1); - const npyv_s32 twos = npyv_setall_s32(2); - const npyv_f32 two_over_pi = npyv_setall_f32(0x1.45f306p-1f); - const npyv_f32 codyw_pio2_highf = npyv_setall_f32(-0x1.921fb0p+00f); - const npyv_f32 codyw_pio2_medf = npyv_setall_f32(-0x1.5110b4p-22f); - const npyv_f32 codyw_pio2_lowf = npyv_setall_f32(-0x1.846988p-48f); - const npyv_f32 rint_cvt_magic = npyv_setall_f32(0x1.800000p+23f); - // Cody-Waite's range - float max_codi = 117435.992f; - if (trig_op == SIMD_COMPUTE_COS) { - max_codi = 71476.0625f; - } - const npyv_f32 max_cody = npyv_setall_f32(max_codi); - const int vstep = npyv_nlanes_f32; - - for (; len > 0; len -= vstep, src += ssrc*vstep, dst += sdst*vstep) { - npyv_f32 x_in; - if (ssrc == 1) { - x_in = npyv_load_tillz_f32(src, len); - } else { - x_in = npyv_loadn_tillz_f32(src, ssrc, len); - } - npyv_b32 nnan_mask = npyv_notnan_f32(x_in); - #if NPY_SIMD_CMPSIGNAL - // Eliminate NaN to avoid FP invalid exception - x_in = npyv_and_f32(x_in, npyv_reinterpret_f32_u32(npyv_cvt_u32_b32(nnan_mask))); - #endif - npyv_b32 simd_mask = npyv_cmple_f32(npyv_abs_f32(x_in), max_cody); - npy_uint64 simd_maski = npyv_tobits_b32(simd_mask); - /* - * For elements outside of this range, Cody-Waite's range reduction - * becomes inaccurate and we will call libc to compute cosine for - * these numbers - */ - if (simd_maski != 0) { - npyv_f32 x = npyv_select_f32(npyv_and_b32(nnan_mask, simd_mask), x_in, zerosf); - - npyv_f32 quadrant = npyv_mul_f32(x, two_over_pi); - // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 - quadrant = npyv_add_f32(quadrant, rint_cvt_magic); - quadrant = npyv_sub_f32(quadrant, rint_cvt_magic); - - // Cody-Waite's range reduction algorithm - npyv_f32 reduced_x = simd_range_reduction_f32( - x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf - ); - npyv_f32 reduced_x2 = npyv_square_f32(reduced_x); - - // compute cosine and sine - npyv_f32 cos = simd_cosine_poly_f32(reduced_x2); - npyv_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); - - npyv_s32 iquadrant = npyv_round_s32_f32(quadrant); - if (trig_op == SIMD_COMPUTE_COS) { - iquadrant = npyv_add_s32(iquadrant, ones); - } - // blend sin and cos based on the quadrant - npyv_b32 sine_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, ones), npyv_zero_s32()); - cos = npyv_select_f32(sine_mask, sin, cos); - - // multiply by -1 for appropriate elements - npyv_b32 negate_mask = npyv_cmpeq_s32(npyv_and_s32(iquadrant, twos), twos); - cos = npyv_ifsub_f32(negate_mask, zerosf, cos, cos); - cos = npyv_select_f32(nnan_mask, cos, npyv_setall_f32(NPY_NANF)); - - if (sdst == 1) { - npyv_store_till_f32(dst, len, cos); - } else { - npyv_storen_till_f32(dst, sdst, len, cos); - } - } - if (simd_maski != (npy_uint64)((1 << vstep) - 1)) { - float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[npyv_nlanes_f32]; - npyv_storea_f32(ip_fback, x_in); - - // process elements using libc for large elements - if (trig_op == SIMD_COMPUTE_COS) { - for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { - if ((simd_maski >> i) & 1) { - continue; - } - dst[sdst*i] = npy_cosf(ip_fback[i]); - } - } - else { - for (unsigned i = 0; i < npyv_nlanes_f32; ++i) { - if ((simd_maski >> i) & 1) { - continue; - } - dst[sdst*i] = npy_sinf(ip_fback[i]); - } - } - } - } - npyv_cleanup(); -} -#endif // NPY_SIMD_FP32 -#endif // NYP_SIMD_FMA3 - -/**begin repeat - * #func = cos, sin# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ - /* Disable SIMD code and revert to libm: see - * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ - * for detailed discussion on this*/ -//#if NPY_SIMD_F64 && NPY_SIMD_FMA3 -#if 0 - const double *src = (double*)args[0]; - double *dst = (double*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f64(ssrc) || !npyv_storable_stride_f64(sdst) - ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_@func@_f64(src, 1, dst, 1, 1); - } - } else { - simd_@func@_f64(src, ssrc, dst, sdst, len); - } -#else - UNARY_LOOP { - const npy_double in1 = *(npy_double *)ip1; - *(npy_double *)op1 = npy_@func@(in1); - } -#endif -} -/**end repeat**/ - -/**begin repeat - * #func = sin, cos# - * #enum = SIMD_COMPUTE_SIN, SIMD_COMPUTE_COS# - */ -NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_@func@) -(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) -{ -#if NPY_SIMD_F32 && NPY_SIMD_FMA3 - const npy_float *src = (npy_float*)args[0]; - npy_float *dst = (npy_float*)args[1]; - - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; - npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (is_mem_overlap(src, steps[0], dst, steps[1], len) || - !npyv_loadable_stride_f32(ssrc) || !npyv_storable_stride_f32(sdst) - ) { - for (; len > 0; --len, src += ssrc, dst += sdst) { - simd_sincos_f32(src, 1, dst, 1, 1, @enum@); - } - } else { - simd_sincos_f32(src, ssrc, dst, sdst, len, @enum@); - } -#else - UNARY_LOOP { - const npy_float in1 = *(npy_float *)ip1; - *(npy_float *)op1 = npy_@func@f(in1); - } -#endif -} -/**end repeat**/ diff --git a/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp new file mode 100644 index 000000000000..1bc6ecfb14d6 --- /dev/null +++ b/numpy/_core/src/umath/loops_trigonometric.dispatch.cpp @@ -0,0 +1,271 @@ +#include "simd/simd.h" +#include "loops_utils.h" +#include "loops.h" +#include "fast_loop_macros.h" +#include +namespace hn = hwy::HWY_NAMESPACE; + +/* + * Vectorized approximate sine/cosine algorithms: The following code is a + * vectorized version of the algorithm presented here: + * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751 + * (1) Load data in registers and generate mask for elements that are within + * range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f, 117435.992f] + * for sine. + * (2) For elements within range, perform range reduction using + * Cody-Waite's method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, + * PI/4]. + * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the + * quadrant k = int(y). + * (4) For elements outside that range, Cody-Waite + * reduction performs poorly leading to catastrophic cancellation. We compute + * cosine by calling glibc in a scalar fashion. + * (5) Vectorized implementation + * has a max ULP of 1.49 and performs at least 5-7x(x86) - 2.5-3x(Power) - + * 1-2x(Arm) faster than scalar implementations when magnitude of all elements + * in the array < 71476.0625f (117435.992f for sine). Worst case performance + * is when all the elements are large leading to about 1-2% reduction in + * performance. + * TODO: use vectorized version of Payne-Hanek style reduction for large + * elements or when there's no native FUSED support instead of fallback to libc + */ + +#if NPY_SIMD_FMA3 // native support +typedef enum +{ + SIMD_COMPUTE_SIN, + SIMD_COMPUTE_COS +} SIMD_TRIG_OP; + +const hn::ScalableTag f32; +const hn::ScalableTag s32; +using vec_f32 = hn::Vec; +using vec_s32 = hn::Vec; +using opmask_t = hn::Mask; + +HWY_INLINE HWY_ATTR vec_f32 +simd_range_reduction_f32(vec_f32& x, vec_f32& y, const vec_f32& c1, const vec_f32& c2, const vec_f32& c3) +{ + vec_f32 reduced_x = hn::MulAdd(y, c1, x); + reduced_x = hn::MulAdd(y, c2, reduced_x); + reduced_x = hn::MulAdd(y, c3, reduced_x); + return reduced_x; +} + +HWY_INLINE HWY_ATTR vec_f32 +simd_cosine_poly_f32(vec_f32& x2) +{ + const vec_f32 invf8 = hn::Set(f32, 0x1.98e616p-16f); + const vec_f32 invf6 = hn::Set(f32, -0x1.6c06dcp-10f); + const vec_f32 invf4 = hn::Set(f32, 0x1.55553cp-05f); + const vec_f32 invf2 = hn::Set(f32, -0x1.000000p-01f); + const vec_f32 invf0 = hn::Set(f32, 0x1.000000p+00f); + + vec_f32 r = hn::MulAdd(invf8, x2, invf6); + r = hn::MulAdd(r, x2, invf4); + r = hn::MulAdd(r, x2, invf2); + r = hn::MulAdd(r, x2, invf0); + return r; +} +/* + * Approximate sine algorithm for x \in [-PI/4, PI/4] + * Maximum ULP across all 32-bit floats = 0.647 + * Polynomial approximation based on unpublished work by T. Myklebust + */ +HWY_INLINE HWY_ATTR vec_f32 +simd_sine_poly_f32(vec_f32& x, vec_f32& x2) +{ + const vec_f32 invf9 = hn::Set(f32, 0x1.7d3bbcp-19f); + const vec_f32 invf7 = hn::Set(f32, -0x1.a06bbap-13f); + const vec_f32 invf5 = hn::Set(f32, 0x1.11119ap-07f); + const vec_f32 invf3 = hn::Set(f32, -0x1.555556p-03f); + + vec_f32 r = hn::MulAdd(invf9, x2, invf7); + r = hn::MulAdd(r, x2, invf5); + r = hn::MulAdd(r, x2, invf3); + r = hn::MulAdd(r, x2, hn::Zero(f32)); + r = hn::MulAdd(r, x, x); + return r; +} + +static void HWY_ATTR SIMD_MSVC_NOINLINE +simd_sincos_f32(const float *src, npy_intp ssrc, float *dst, npy_intp sdst, + npy_intp len, SIMD_TRIG_OP trig_op) +{ + // Load up frequently used constants + const vec_f32 zerosf = hn::Zero(f32); + const vec_s32 ones = hn::Set(s32, 1); + const vec_s32 twos = hn::Set(s32, 2); + const vec_f32 two_over_pi = hn::Set(f32, 0x1.45f306p-1f); + const vec_f32 codyw_pio2_highf = hn::Set(f32, -0x1.921fb0p+00f); + const vec_f32 codyw_pio2_medf = hn::Set(f32, -0x1.5110b4p-22f); + const vec_f32 codyw_pio2_lowf = hn::Set(f32, -0x1.846988p-48f); + const vec_f32 rint_cvt_magic = hn::Set(f32, 0x1.800000p+23f); + // Cody-Waite's range + float max_codi = 117435.992f; + if (trig_op == SIMD_COMPUTE_COS) { + max_codi = 71476.0625f; + } + const vec_f32 max_cody = hn::Set(f32, max_codi); + + const int lanes = hn::Lanes(f32); + const vec_s32 src_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, ssrc)); + const vec_s32 dst_index = hn::Mul(hn::Iota(s32, 0), hn::Set(s32, sdst)); + + for (; len > 0; len -= lanes, src += ssrc*lanes, dst += sdst*lanes) { + vec_f32 x_in; + if (ssrc == 1) { + x_in = hn::LoadN(f32, src, len); + } else { + x_in = hn::GatherIndexN(f32, src, src_index, len); + } + opmask_t nnan_mask = hn::Not(hn::IsNaN(x_in)); + // Eliminate NaN to avoid FP invalid exception + x_in = hn::IfThenElse(nnan_mask, x_in, zerosf); + opmask_t simd_mask = hn::Le(hn::Abs(x_in), max_cody); + /* + * For elements outside of this range, Cody-Waite's range reduction + * becomes inaccurate and we will call libc to compute cosine for + * these numbers + */ + if (!hn::AllFalse(f32, simd_mask)) { + vec_f32 x = hn::IfThenElse(hn::And(nnan_mask, simd_mask), x_in, zerosf); + + vec_f32 quadrant = hn::Mul(x, two_over_pi); + // round to nearest, -0.0f -> +0.0f, and |a| must be <= 0x1.0p+22 + quadrant = hn::Add(quadrant, rint_cvt_magic); + quadrant = hn::Sub(quadrant, rint_cvt_magic); + + // Cody-Waite's range reduction algorithm + vec_f32 reduced_x = simd_range_reduction_f32( + x, quadrant, codyw_pio2_highf, codyw_pio2_medf, codyw_pio2_lowf + ); + vec_f32 reduced_x2 = hn::Mul(reduced_x, reduced_x); + + // compute cosine and sine + vec_f32 cos = simd_cosine_poly_f32(reduced_x2); + vec_f32 sin = simd_sine_poly_f32(reduced_x, reduced_x2); + + vec_s32 iquadrant = hn::NearestInt(quadrant); + if (trig_op == SIMD_COMPUTE_COS) { + iquadrant = hn::Add(iquadrant, ones); + } + // blend sin and cos based on the quadrant + opmask_t sine_mask = hn::RebindMask(f32, hn::Eq(hn::And(iquadrant, ones), hn::Zero(s32))); + cos = hn::IfThenElse(sine_mask, sin, cos); + + // multiply by -1 for appropriate elements + opmask_t negate_mask = hn::RebindMask(f32, hn::Eq(hn::And(iquadrant, twos), twos)); + cos = hn::MaskedSubOr(cos, negate_mask, zerosf, cos); + cos = hn::IfThenElse(nnan_mask, cos, hn::Set(f32, NPY_NANF)); + + if (sdst == 1) { + hn::StoreN(cos, f32, dst, len); + } else { + hn::ScatterIndexN(cos, f32, dst, dst_index, len); + } + } + if (!hn::AllTrue(f32, simd_mask)) { + npy_uint64 simd_maski; + hn::StoreMaskBits(f32, simd_mask, (uint8_t*)&simd_maski); + float NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) ip_fback[hn::Lanes(f32)]; + hn::Store(x_in, f32, ip_fback); + + // process elements using libc for large elements + if (trig_op == SIMD_COMPUTE_COS) { + for (unsigned i = 0; i < hn::Lanes(f32); ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst*i] = npy_cosf(ip_fback[i]); + } + } + else { + for (unsigned i = 0; i < hn::Lanes(f32); ++i) { + if ((simd_maski >> i) & 1) { + continue; + } + dst[sdst*i] = npy_sinf(ip_fback[i]); + } + } + } + npyv_cleanup(); + } +} +#endif // NPY_SIMD_FMA3 + +/* Disable SIMD code sin/cos f64 and revert to libm: see + * https://mail.python.org/archives/list/numpy-discussion@python.org/thread/C6EYZZSR4EWGVKHAZXLE7IBILRMNVK7L/ + * for detailed discussion on this*/ +#define DISPATCH_DOUBLE_FUNC(func) \ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(DOUBLE_##func) \ +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) \ +{ \ + UNARY_LOOP { \ + const npy_double in1 = *(npy_double *)ip1; \ + *(npy_double *)op1 = npy_##func(in1); \ + } \ +} \ + +DISPATCH_DOUBLE_FUNC(sin) +DISPATCH_DOUBLE_FUNC(cos) + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_sin) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD_FMA3 + npy_intp len = dimensions[0]; + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_f32(steps[0]) || + !npyv_storable_stride_f32(steps[1]) + ) { + UNARY_LOOP { + simd_sincos_f32( + (npy_float *)ip1, 1, (npy_float *)op1, 1, 1, SIMD_COMPUTE_SIN); + } + } else { + const npy_float *src = (npy_float*)args[0]; + npy_float *dst = (npy_float*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(npy_float); + const npy_intp sdst = steps[1] / sizeof(npy_float); + + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_SIN); + } +#else + UNARY_LOOP { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = npy_sinf(in1); + } +#endif +} + +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(FLOAT_cos) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD_FMA3 + npy_intp len = dimensions[0]; + + if (is_mem_overlap(args[0], steps[0], args[1], steps[1], len) || + !npyv_loadable_stride_f32(steps[0]) || + !npyv_storable_stride_f32(steps[1]) + ) { + UNARY_LOOP { + simd_sincos_f32( + (npy_float *)ip1, 1, (npy_float *)op1, 1, 1, SIMD_COMPUTE_COS); + } + } else { + const npy_float *src = (npy_float*)args[0]; + npy_float *dst = (npy_float*)args[1]; + const npy_intp ssrc = steps[0] / sizeof(npy_float); + const npy_intp sdst = steps[1] / sizeof(npy_float); + + simd_sincos_f32(src, ssrc, dst, sdst, len, SIMD_COMPUTE_COS); + } +#else + UNARY_LOOP { + const npy_float in1 = *(npy_float *)ip1; + *(npy_float *)op1 = npy_cosf(in1); + } +#endif +} diff --git a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src index 2390fb989190..bf358e8ee7c1 100644 --- a/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_umath_fp.dispatch.c.src @@ -160,13 +160,12 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(HALF_@func@) #if NPY_SIMD && defined(NPY_CAN_LINK_SVML) const npy_half *src = (npy_half*)args[0]; npy_half *dst = (npy_half*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; + const npy_intp len = dimensions[0]; + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - (ssrc == 1) && - (sdst == 1)) { + (steps[0] == sizeof(npy_half)) && + (steps[1] == sizeof(npy_half))) { #if defined(NPY_HAVE_AVX512_SPR) __svml_@intrin@s32(src, dst, len); return; @@ -201,14 +200,15 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) const @type@ *src = (@type@*)args[0]; @type@ *dst = (@type@*)args[1]; - const int lsize = sizeof(src[0]); - const npy_intp ssrc = steps[0] / lsize; - const npy_intp sdst = steps[1] / lsize; + const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); + if (!is_mem_overlap(src, steps[0], dst, steps[1], len) && - npyv_loadable_stride_@sfx@(ssrc) && - npyv_storable_stride_@sfx@(sdst)) { + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_storable_stride_@sfx@(steps[1])) + { + const npy_intp ssrc = steps[0] / sizeof(@type@); + const npy_intp sdst = steps[1] / sizeof(@type@); simd_@intrin@_@sfx@(src, ssrc, dst, sdst, len); return; } @@ -226,27 +226,44 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) * #type = npy_double, npy_float# * #vsub = , f# * #sfx = f64, f32# + * #sqrt = sqrt, sqrtf# */ /**begin repeat1 - * #func = power, arctan2# - * #intrin = pow, atan2# + * #func = power# + * #intrin = pow# */ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) { + int stride_zero = steps[1]==0; + if (stride_zero) { + BINARY_DEFS + const @type@ in2 = *(@type@ *)ip2; + if (in2 == 2.0) { + BINARY_LOOP_SLIDING { + const @type@ in1 = *(@type@ *)ip1; + *(@type@ *)op1 = in1 * in1; + } + return; + } + } #if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) const @type@ *src1 = (@type@*)args[0]; const @type@ *src2 = (@type@*)args[1]; @type@ *dst = (@type@*)args[2]; - const int lsize = sizeof(src1[0]); - const npy_intp ssrc1 = steps[0] / lsize; - const npy_intp ssrc2 = steps[1] / lsize; - const npy_intp sdst = steps[2] / lsize; + const npy_intp len = dimensions[0]; - assert(len <= 1 || (steps[0] % lsize == 0 && steps[1] % lsize == 0)); - if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && !is_mem_overlap(src2, steps[1], dst, steps[2], len) && - npyv_loadable_stride_@sfx@(ssrc1) && npyv_loadable_stride_@sfx@(ssrc2) && - npyv_storable_stride_@sfx@(sdst)) { + + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && + !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_loadable_stride_@sfx@(steps[1]) && + npyv_storable_stride_@sfx@(steps[2]) + ) { + const npy_intp ssrc1 = steps[0] / sizeof(@type@); + const npy_intp ssrc2 = steps[1] / sizeof(@type@); + const npy_intp sdst = steps[2] / sizeof(@type@); + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); return; } @@ -258,4 +275,41 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) } } /**end repeat1**/ + +/**begin repeat1 + * #func = arctan2# + * #intrin = atan2# + */ +NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@func@) +(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)) +{ +#if NPY_SIMD && defined(NPY_HAVE_AVX512_SKX) && defined(NPY_CAN_LINK_SVML) + const @type@ *src1 = (@type@*)args[0]; + const @type@ *src2 = (@type@*)args[1]; + @type@ *dst = (@type@*)args[2]; + + const npy_intp len = dimensions[0]; + + if (!is_mem_overlap(src1, steps[0], dst, steps[2], len) && + !is_mem_overlap(src2, steps[1], dst, steps[2], len) && + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_loadable_stride_@sfx@(steps[1]) && + npyv_storable_stride_@sfx@(steps[2]) + ) { + const npy_intp ssrc1 = steps[0] / sizeof(@type@); + const npy_intp ssrc2 = steps[1] / sizeof(@type@); + const npy_intp sdst = steps[2] / sizeof(@type@); + + simd_@intrin@_@sfx@(src1, ssrc1, src2, ssrc2, dst, sdst, len); + return; + } +#endif + BINARY_LOOP { + const @type@ in1 = *(@type@ *)ip1; + const @type@ in2 = *(@type@ *)ip2; + *(@type@ *)op1 = npy_@intrin@@vsub@(in1, in2); + } +} +/**end repeat1**/ + /**end repeat**/ diff --git a/numpy/_core/src/umath/loops_unary.dispatch.c.src b/numpy/_core/src/umath/loops_unary.dispatch.c.src index bfe4d892d0c9..4c87c2279c3b 100644 --- a/numpy/_core/src/umath/loops_unary.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary.dispatch.c.src @@ -298,12 +298,12 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) goto clear; } #if @supports_ncontig@ - const npy_intp istride = istep / sizeof(STYPE); - const npy_intp ostride = ostep / sizeof(STYPE); - if (TO_SIMD_SFX(npyv_loadable_stride)(istride) && - TO_SIMD_SFX(npyv_storable_stride)(ostride)) + if (TO_SIMD_SFX(npyv_loadable_stride)(istep) && + TO_SIMD_SFX(npyv_storable_stride)(ostep)) { - if (istride == 1 && ostride != 1) { + const npy_intp istride = istep / sizeof(STYPE); + const npy_intp ostride = ostep / sizeof(STYPE); + if (istride == sizeof(STYPE) && ostride != 1) { // contiguous input, non-contiguous output TO_SIMD_SFX(simd_unary_cn_@intrin@)( (STYPE*)ip, (STYPE*)op, ostride, len diff --git a/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src b/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src index 052ad464c7a8..ede46485313b 100644 --- a/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_complex.dispatch.c.src @@ -88,14 +88,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_absolute) { #if @VECTOR@ npy_intp len = dimensions[0]; - npy_intp ssrc = steps[0] / sizeof(@ftype@); - npy_intp sdst = steps[1] / sizeof(@ftype@); if (!is_mem_overlap(args[0], steps[0], args[1], steps[1], len) && - npyv_loadable_stride_@sfx@(ssrc) && npyv_storable_stride_@sfx@(sdst) - && steps[0] % sizeof(@ftype@) == 0 - && steps[1] % sizeof(@ftype@) == 0 + npyv_loadable_stride_@sfx@(steps[0]) && + npyv_storable_stride_@sfx@(steps[1]) ) { + npy_intp ssrc = steps[0] / sizeof(@ftype@); + npy_intp sdst = steps[1] / sizeof(@ftype@); + const @ftype@ *src = (@ftype@*)args[0]; @ftype@ *dst = (@ftype@*)args[1]; diff --git a/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src b/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src index f6404f6f7d68..6cce02cd37bc 100644 --- a/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_fp.dispatch.c.src @@ -212,15 +212,16 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) npy_intp len = dimensions[0]; #if @VCHK@ const int lsize = sizeof(npyv_lanetype_@sfx@); - assert(len <= 1 || (src_step % lsize == 0 && dst_step % lsize == 0)); + if (is_mem_overlap(src, src_step, dst, dst_step, len)) { goto no_unroll; } - const npy_intp ssrc = src_step / lsize; - const npy_intp sdst = dst_step / lsize; - if (!npyv_loadable_stride_@sfx@(ssrc) || !npyv_storable_stride_@sfx@(sdst)) { + if (!npyv_loadable_stride_@sfx@(src_step) || !npyv_storable_stride_@sfx@(dst_step)) { goto no_unroll; } + + const npy_intp ssrc = src_step / lsize; + const npy_intp sdst = dst_step / lsize; if (ssrc == 1 && sdst == 1) { simd_@TYPE@_@kind@_CONTIG_CONTIG(src, 1, dst, 1, len); } diff --git a/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src b/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src index ba133dc1e60f..9f7ed6c1dfc4 100644 --- a/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src +++ b/numpy/_core/src/umath/loops_unary_fp_le.dispatch.c.src @@ -528,17 +528,14 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@) const npy_intp istep = steps[0]; const npy_intp ostep = steps[1]; npy_intp len = dimensions[0]; - const int ilsize = sizeof(npyv_lanetype_@sfx@); - const int olsize = sizeof(npy_bool); - const npy_intp istride = istep / ilsize; - const npy_intp ostride = ostep / olsize; - assert(len <= 1 || ostep % olsize == 0); - - if ((istep % ilsize == 0) && - !is_mem_overlap(ip, istep, op, ostep, len) && - npyv_loadable_stride_@sfx@(istride) && - npyv_storable_stride_@sfx@(ostride)) + + if (!is_mem_overlap(ip, istep, op, ostep, len) && + npyv_loadable_stride_@sfx@(istep) && + npyv_storable_stride_@sfx@(ostep)) { + const npy_intp istride = istep / sizeof(npyv_lanetype_@sfx@); + const npy_intp ostride = ostep / sizeof(npy_bool); + if (istride == 1 && ostride == 1) { simd_unary_@kind@_@TYPE@_CONTIG_CONTIG(ip, 1, op, 1, len); } diff --git a/numpy/_core/src/umath/loops_utils.h.src b/numpy/_core/src/umath/loops_utils.h.src index 5640a1f0b646..828d16ee635c 100644 --- a/numpy/_core/src/umath/loops_utils.h.src +++ b/numpy/_core/src/umath/loops_utils.h.src @@ -16,28 +16,31 @@ #endif /* * nomemoverlap - returns false if two strided arrays have an overlapping - * region in memory. ip_size/op_size = size of the arrays which can be negative - * indicating negative steps. + * region in memory. */ NPY_FINLINE npy_bool -nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) +nomemoverlap(char *ip, npy_intp ip_step, char *op, npy_intp op_step, npy_intp len) { + // Calculate inclusive ranges for offsets of items in arrays. + // The end pointer points to address of the last item. + const npy_intp ip_offset = ip_step * (len - 1); + const npy_intp op_offset = op_step * (len - 1); char *ip_start, *ip_end, *op_start, *op_end; - if (ip_size < 0) { - ip_start = ip + ip_size; + if (ip_step < 0) { + ip_start = ip + ip_offset; ip_end = ip; } else { ip_start = ip; - ip_end = ip + ip_size; + ip_end = ip + ip_offset; } - if (op_size < 0) { - op_start = op + op_size; + if (op_step < 0) { + op_start = op + op_offset; op_end = op; } else { op_start = op; - op_end = op + op_size; + op_end = op + op_offset; } return (ip_start == op_start && op_end == ip_end) || (ip_start > op_end) || (op_start > ip_end); @@ -48,7 +51,7 @@ nomemoverlap(char *ip, npy_intp ip_size, char *op, npy_intp op_size) NPY_FINLINE npy_bool is_mem_overlap(const void *src, npy_intp src_step, const void *dst, npy_intp dst_step, npy_intp len) { - return !(nomemoverlap((char*)src, src_step*len, (char*)dst, dst_step*len)); + return !(nomemoverlap((char*)src, src_step, (char*)dst, dst_step, len)); } /* diff --git a/numpy/_core/src/umath/matmul.c.src b/numpy/_core/src/umath/matmul.c.src index cdae9d1d22a5..37f990f970ed 100644 --- a/numpy/_core/src/umath/matmul.c.src +++ b/numpy/_core/src/umath/matmul.c.src @@ -14,7 +14,7 @@ #include "numpy/halffloat.h" #include "lowlevel_strided_loops.h" -#include "npy_pycompat.h" + #include "npy_cblas.h" #include "arraytypes.h" /* For TYPE_dot functions */ diff --git a/numpy/_core/src/umath/override.c b/numpy/_core/src/umath/override.c index bb29fcdf7c52..139d9c7bdbbd 100644 --- a/numpy/_core/src/umath/override.c +++ b/numpy/_core/src/umath/override.c @@ -1,10 +1,12 @@ #define NPY_NO_DEPRECATED_API NPY_API_VERSION #define NO_IMPORT_ARRAY -#include "npy_pycompat.h" +#include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" #include "npy_import.h" - +#include "npy_static_data.h" +#include "multiarraymodule.h" +#include "npy_pycompat.h" #include "override.h" #include "ufunc_override.h" @@ -110,29 +112,22 @@ initialize_normal_kwds(PyObject *out_args, } } } - static PyObject *out_str = NULL; - if (out_str == NULL) { - out_str = PyUnicode_InternFromString("out"); - if (out_str == NULL) { - return -1; - } - } if (out_args != NULL) { /* Replace `out` argument with the normalized version */ - int res = PyDict_SetItem(normal_kwds, out_str, out_args); + int res = PyDict_SetItem(normal_kwds, npy_interned_str.out, out_args); if (res < 0) { return -1; } } else { /* Ensure that `out` is not present. */ - int res = PyDict_Contains(normal_kwds, out_str); + int res = PyDict_Contains(normal_kwds, npy_interned_str.out); if (res < 0) { return -1; } if (res) { - return PyDict_DelItem(normal_kwds, out_str); + return PyDict_DelItem(normal_kwds, npy_interned_str.out); } } return 0; @@ -148,18 +143,17 @@ static int normalize_signature_keyword(PyObject *normal_kwds) { /* If the keywords include `sig` rename to `signature`. */ - PyObject* obj = _PyDict_GetItemStringWithError(normal_kwds, "sig"); - if (obj == NULL && PyErr_Occurred()) { + PyObject* obj = NULL; + int result = PyDict_GetItemStringRef(normal_kwds, "sig", &obj); + if (result == -1) { return -1; } - if (obj != NULL) { - /* - * No INCREF or DECREF needed: got a borrowed reference above, - * and, unlike e.g. PyList_SetItem, PyDict_SetItem INCREF's it. - */ + if (result == 1) { if (PyDict_SetItemString(normal_kwds, "signature", obj) < 0) { + Py_DECREF(obj); return -1; } + Py_DECREF(obj); if (PyDict_DelItemString(normal_kwds, "sig") < 0) { return -1; } @@ -183,10 +177,8 @@ copy_positional_args_to_kwargs(const char **keywords, * This is only relevant for reduce, which is the only one with * 5 keyword arguments. */ - static PyObject *NoValue = NULL; assert(strcmp(keywords[i], "initial") == 0); - npy_cache_import("numpy", "_NoValue", &NoValue); - if (args[i] == NoValue) { + if (args[i] == npy_static_pydata._NoValue) { continue; } } @@ -372,23 +364,23 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method, /* Check if there is a method left to call */ if (!override_obj) { /* No acceptable override found. */ - static PyObject *errmsg_formatter = NULL; PyObject *errmsg; - npy_cache_import("numpy._core._internal", - "array_ufunc_errmsg_formatter", - &errmsg_formatter); - - if (errmsg_formatter != NULL) { - /* All tuple items must be set before use */ - Py_INCREF(Py_None); - PyTuple_SET_ITEM(override_args, 0, Py_None); - errmsg = PyObject_Call(errmsg_formatter, override_args, - normal_kwds); - if (errmsg != NULL) { - PyErr_SetObject(PyExc_TypeError, errmsg); - Py_DECREF(errmsg); - } + /* All tuple items must be set before use */ + Py_INCREF(Py_None); + PyTuple_SET_ITEM(override_args, 0, Py_None); + if (npy_cache_import_runtime( + "numpy._core._internal", + "array_ufunc_errmsg_formatter", + &npy_runtime_imports.array_ufunc_errmsg_formatter) == -1) { + goto fail; + } + errmsg = PyObject_Call( + npy_runtime_imports.array_ufunc_errmsg_formatter, + override_args, normal_kwds); + if (errmsg != NULL) { + PyErr_SetObject(PyExc_TypeError, errmsg); + Py_DECREF(errmsg); } Py_DECREF(override_args); goto fail; diff --git a/numpy/_core/src/umath/reduction.c b/numpy/_core/src/umath/reduction.c index 5a938eaedb85..548530e1ca3b 100644 --- a/numpy/_core/src/umath/reduction.c +++ b/numpy/_core/src/umath/reduction.c @@ -16,11 +16,12 @@ #include "npy_config.h" #include "numpy/arrayobject.h" -#include "npy_pycompat.h" + #include "array_assign.h" #include "array_coercion.h" #include "array_method.h" #include "ctors.h" +#include "refcount.h" #include "numpy/ufuncobject.h" #include "lowlevel_strided_loops.h" @@ -438,7 +439,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, Py_INCREF(result); if (initial_buf != NULL && PyDataType_REFCHK(PyArray_DESCR(result))) { - PyArray_Item_XDECREF(initial_buf, PyArray_DESCR(result)); + PyArray_ClearBuffer(PyArray_DESCR(result), initial_buf, 0, 1, 1); } PyMem_FREE(initial_buf); NPY_AUXDATA_FREE(auxdata); @@ -450,7 +451,7 @@ PyUFunc_ReduceWrapper(PyArrayMethod_Context *context, fail: if (initial_buf != NULL && PyDataType_REFCHK(op_dtypes[0])) { - PyArray_Item_XDECREF(initial_buf, op_dtypes[0]); + PyArray_ClearBuffer(op_dtypes[0], initial_buf, 0, 1, 1); } PyMem_FREE(initial_buf); NPY_AUXDATA_FREE(auxdata); diff --git a/numpy/_core/src/umath/scalarmath.c.src b/numpy/_core/src/umath/scalarmath.c.src index 40ba7d65f05c..3b7b65e97fab 100644 --- a/numpy/_core/src/umath/scalarmath.c.src +++ b/numpy/_core/src/umath/scalarmath.c.src @@ -19,7 +19,7 @@ #include "numpy/npy_math.h" #include "npy_import.h" -#include "npy_pycompat.h" + #include "numpy/halffloat.h" #include "templ_common.h" @@ -798,8 +798,9 @@ typedef enum { */ CONVERT_PYSCALAR, /* - * Other object is an unknown scalar or array-like, we (typically) use + * Other object is an unknown scalar or array-like, we also use * the generic path, which normally ends up in the ufunc machinery. + * (So it ends up identical to PROMOTION_REQUIRED.) */ OTHER_IS_UNKNOWN_OBJECT, /* @@ -953,17 +954,9 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyFloat_Check(value)) { - if (!PyFloat_CheckExact(value)) { - /* A NumPy double is a float subclass, but special. */ - if (PyArray_IsScalar(value, Double)) { - descr = PyArray_DescrFromType(NPY_DOUBLE); - goto numpy_scalar; - } - *may_need_deferring = NPY_TRUE; - } + if (PyFloat_CheckExact(value)) { if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) { - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ return PROMOTION_REQUIRED; } @@ -977,16 +970,13 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyLong_Check(value)) { - if (!PyLong_CheckExact(value)) { - *may_need_deferring = NPY_TRUE; - } + if (PyLong_CheckExact(value)) { if (!IS_SAFE(NPY_LONG, NPY_@TYPE@)) { /* * long -> (c)longdouble is safe, so `OTHER_IS_UNKNOWN_OBJECT` will * be returned below for huge integers. */ - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ return PROMOTION_REQUIRED; } @@ -996,7 +986,7 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) long val = PyLong_AsLongAndOverflow(value, &overflow); if (overflow) { /* handle as if "unsafe" */ - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { return OTHER_IS_UNKNOWN_OBJECT; } return CONVERT_PYSCALAR; @@ -1008,17 +998,9 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return CONVERSION_SUCCESS; } - if (PyComplex_Check(value)) { - if (!PyComplex_CheckExact(value)) { - /* A NumPy complex double is a float subclass, but special. */ - if (PyArray_IsScalar(value, CDouble)) { - descr = PyArray_DescrFromType(NPY_CDOUBLE); - goto numpy_scalar; - } - *may_need_deferring = NPY_TRUE; - } + if (PyComplex_CheckExact(value)) { if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) { - if (npy_promotion_state != NPY_USE_WEAK_PROMOTION) { + if (get_npy_promotion_state() != NPY_USE_WEAK_PROMOTION) { /* Legacy promotion and weak-and-warn not handled here */ return PROMOTION_REQUIRED; } @@ -1078,7 +1060,6 @@ convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring) return OTHER_IS_UNKNOWN_OBJECT; } - numpy_scalar: if (descr->typeobj != Py_TYPE(value)) { /* * This is a subclass of a builtin type, we may continue normally, @@ -1262,12 +1243,9 @@ static PyObject * * also integers that are too large to convert to `long`), or * even a subclass of a NumPy scalar (currently). * - * Generally, we try dropping through to the array path here, - * but this can lead to infinite recursions for (c)longdouble. + * We drop through to the generic path here which checks for the + * infinite recursion problem (gh-18548, gh-26767). */ -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: /* * Python scalar that is larger than the current one, or two @@ -1391,7 +1369,7 @@ static PyObject * */ PyObject *ret; npy_float64 arg1, arg2, other_val; - @type@ other_val_conv; + @type@ other_val_conv = 0; int is_forward; if (Py_TYPE(a) == &Py@Name@ArrType_Type) { @@ -1411,7 +1389,8 @@ static PyObject * npy_bool may_need_deferring; conversion_result res = convert_to_@name@( other, &other_val_conv, &may_need_deferring); - other_val = other_val_conv; /* Need a float value */ + /* Actual float cast `other_val` is set below on success. */ + if (res == CONVERSION_ERROR) { return NULL; /* an error occurred (should never happen) */ } @@ -1422,6 +1401,7 @@ static PyObject * case DEFER_TO_OTHER_KNOWN_SCALAR: Py_RETURN_NOTIMPLEMENTED; case CONVERSION_SUCCESS: + other_val = other_val_conv; /* Need a float value */ break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: case PROMOTION_REQUIRED: @@ -1544,9 +1524,6 @@ static PyObject * case CONVERSION_SUCCESS: break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo); case CONVERT_PYSCALAR: @@ -1788,12 +1765,7 @@ static int static int emit_complexwarning(void) { - static PyObject *cls = NULL; - npy_cache_import("numpy.exceptions", "ComplexWarning", &cls); - if (cls == NULL) { - return -1; - } - return PyErr_WarnEx(cls, + return PyErr_WarnEx(npy_static_pydata.ComplexWarning, "Casting complex values to real discards the imaginary part", 1); } @@ -1960,9 +1932,6 @@ static PyObject* case CONVERSION_SUCCESS: break; /* successfully extracted value we can proceed */ case OTHER_IS_UNKNOWN_OBJECT: -#if defined(IS_longdouble) || defined(IS_clongdouble) - Py_RETURN_NOTIMPLEMENTED; -#endif case PROMOTION_REQUIRED: return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op); case CONVERT_PYSCALAR: diff --git a/numpy/_core/src/umath/special_integer_comparisons.cpp b/numpy/_core/src/umath/special_integer_comparisons.cpp index 9e0c9481960b..06babeeda0a8 100644 --- a/numpy/_core/src/umath/special_integer_comparisons.cpp +++ b/numpy/_core/src/umath/special_integer_comparisons.cpp @@ -177,7 +177,7 @@ resolve_descriptors_with_scalars( { int value_range = 0; - npy_bool first_is_pyint = dtypes[0] == &PyArray_PyIntAbstractDType; + npy_bool first_is_pyint = dtypes[0] == &PyArray_PyLongDType; int arr_idx = first_is_pyint? 1 : 0; int scalar_idx = first_is_pyint? 0 : 1; PyObject *scalar = input_scalars[scalar_idx]; @@ -293,7 +293,7 @@ get_loop(PyArrayMethod_Context *context, /* - * Machinery to add the python integer to NumPy intger comparsisons as well + * Machinery to add the python integer to NumPy integer comparsisons as well * as a special promotion to special case Python int with Python int * comparisons. */ @@ -327,7 +327,7 @@ template static int add_dtype_loops(PyObject *umath, PyArrayMethod_Spec *spec, PyObject *info) { - PyArray_DTypeMeta *PyInt = &PyArray_PyIntAbstractDType; + PyArray_DTypeMeta *PyInt = &PyArray_PyLongDType; PyObject *name = PyUnicode_FromString(comp_name(comp)); if (name == nullptr) { @@ -441,7 +441,7 @@ init_special_int_comparisons(PyObject *umath) * `np.equal(2, 4)` (with two python integers) use an object loop. */ PyObject *dtype_tuple = PyTuple_Pack(3, - &PyArray_PyIntAbstractDType, &PyArray_PyIntAbstractDType, Bool); + &PyArray_PyLongDType, &PyArray_PyLongDType, Bool); if (dtype_tuple == NULL) { goto finish; } diff --git a/numpy/_core/src/umath/string_buffer.h b/numpy/_core/src/umath/string_buffer.h index 02c2c82c4ac1..665c47bbf067 100644 --- a/numpy/_core/src/umath/string_buffer.h +++ b/numpy/_core/src/umath/string_buffer.h @@ -262,12 +262,12 @@ struct Buffer { char *buf; char *after; - inline Buffer() + inline Buffer() { buf = after = NULL; } - inline Buffer(char *buf_, npy_int64 elsize_) + inline Buffer(char *buf_, npy_int64 elsize_) { buf = buf_; after = buf_ + elsize_; @@ -1149,49 +1149,54 @@ string_lrstrip_whitespace(Buffer buf, Buffer out, STRIPTYPE striptype) return 0; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf.after - buf.buf); Buffer traverse_buf = Buffer(buf.buf, num_bytes); if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len) { + while (new_start < len) { if (!traverse_buf.first_character_isspace()) { break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; + traverse_buf++; // may go one beyond buffer } } - npy_intp j = len - 1; // Could also turn negative if we're stripping the whole string + size_t new_stop = len; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf.after, 0) - 1; } else { - traverse_buf = buf + j; + traverse_buf = buf + (new_stop - 1); } if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + while (new_stop > new_start) { if (*traverse_buf != 0 && !traverse_buf.first_character_isspace()) { break; } + num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf--; - j--; + new_stop--; + + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { + traverse_buf--; + } } } - Buffer offset_buf = buf + i; + Buffer offset_buf = buf + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } @@ -1218,22 +1223,32 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT return len1; } - size_t i = 0; + size_t new_start = 0; size_t num_bytes = (buf1.after - buf1.buf); Buffer traverse_buf = Buffer(buf1.buf, num_bytes); if (striptype != STRIPTYPE::RIGHTSTRIP) { - while (i < len1) { + for (; new_start < len1; traverse_buf++) { Py_ssize_t res; + size_t current_point_bytes = traverse_buf.num_bytes_next_character(); switch (enc) { case ENCODING::ASCII: - case ENCODING::UTF8: { CheckedIndexer ind(buf2.buf, len2); res = findchar(ind, len2, *traverse_buf); break; } + case ENCODING::UTF8: + { + if (current_point_bytes == 1) { + CheckedIndexer ind(buf2.buf, len2); + res = findchar(ind, len2, *traverse_buf); + } else { + res = fastsearch(buf2.buf, buf2.after - buf2.buf,traverse_buf.buf, current_point_bytes, -1, FAST_SEARCH); + } + break; + } case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); @@ -1245,30 +1260,39 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT break; } num_bytes -= traverse_buf.num_bytes_next_character(); - traverse_buf++; - i++; + new_start++; } } - npy_intp j = len1 - 1; + size_t new_stop = len1; // New stop is a range (beyond last char) if (enc == ENCODING::UTF8) { traverse_buf = Buffer(buf1.after, 0) - 1; } else { - traverse_buf = buf1 + j; + traverse_buf = buf1 + (new_stop - 1); } if (striptype != STRIPTYPE::LEFTSTRIP) { - while (j >= static_cast(i)) { + while (new_stop > new_start) { + size_t current_point_bytes = traverse_buf.num_bytes_next_character(); Py_ssize_t res; switch (enc) { case ENCODING::ASCII: - case ENCODING::UTF8: { CheckedIndexer ind(buf2.buf, len2); res = findchar(ind, len2, *traverse_buf); break; } + case ENCODING::UTF8: + { + if (current_point_bytes == 1) { + CheckedIndexer ind(buf2.buf, len2); + res = findchar(ind, len2, *traverse_buf); + } else { + res = fastsearch(buf2.buf, buf2.after - buf2.buf, traverse_buf.buf, current_point_bytes, -1, FAST_RSEARCH); + } + break; + } case ENCODING::UTF32: { CheckedIndexer ind((npy_ucs4 *)buf2.buf, len2); @@ -1279,22 +1303,23 @@ string_lrstrip_chars(Buffer buf1, Buffer buf2, Buffer out, STRIPT if (res < 0) { break; } - num_bytes -= traverse_buf.num_bytes_next_character(); - j--; - if (j > 0) { + num_bytes -= current_point_bytes;; + new_stop--; + // Do not step to character -1: can't find it's start for utf-8. + if (new_stop > 0) { traverse_buf--; } } } - Buffer offset_buf = buf1 + i; + Buffer offset_buf = buf1 + new_start; if (enc == ENCODING::UTF8) { offset_buf.buffer_memcpy(out, num_bytes); return num_bytes; } - offset_buf.buffer_memcpy(out, j - i + 1); - out.buffer_fill_with_zeros_after_index(j - i + 1); - return j - i + 1; + offset_buf.buffer_memcpy(out, new_stop - new_start); + out.buffer_fill_with_zeros_after_index(new_stop - new_start); + return new_stop - new_start; } template @@ -1462,7 +1487,7 @@ string_expandtabs_length(Buffer buf, npy_int64 tabsize) line_pos = 0; } } - if (new_len == PY_SSIZE_T_MAX || new_len < 0) { + if (new_len > INT_MAX || new_len < 0) { npy_gil_error(PyExc_OverflowError, "new string is too long"); return -1; } @@ -1505,4 +1530,134 @@ string_expandtabs(Buffer buf, npy_int64 tabsize, Buffer out) } +enum class JUSTPOSITION { + CENTER, LEFT, RIGHT +}; + +template +static inline npy_intp +string_pad(Buffer buf, npy_int64 width, npy_ucs4 fill, JUSTPOSITION pos, Buffer out) +{ + size_t finalwidth = width > 0 ? width : 0; + if (finalwidth > PY_SSIZE_T_MAX) { + npy_gil_error(PyExc_OverflowError, "padded string is too long"); + return -1; + } + + size_t len_codepoints = buf.num_codepoints(); + size_t len_bytes = buf.after - buf.buf; + + size_t len; + if (enc == ENCODING::UTF8) { + len = len_bytes; + } + else { + len = len_codepoints; + } + + if (len_codepoints >= finalwidth) { + buf.buffer_memcpy(out, len); + return (npy_intp) len; + } + + size_t left, right; + if (pos == JUSTPOSITION::CENTER) { + size_t pad = finalwidth - len_codepoints; + left = pad / 2 + (pad & finalwidth & 1); + right = pad - left; + } + else if (pos == JUSTPOSITION::LEFT) { + left = 0; + right = finalwidth - len_codepoints; + } + else { + left = finalwidth - len_codepoints; + right = 0; + } + + assert(left >= 0 || right >= 0); + assert(left <= PY_SSIZE_T_MAX - len && right <= PY_SSIZE_T_MAX - (left + len)); + + if (left > 0) { + out.advance_chars_or_bytes(out.buffer_memset(fill, left)); + } + + buf.buffer_memcpy(out, len); + out += len_codepoints; + + if (right > 0) { + out.advance_chars_or_bytes(out.buffer_memset(fill, right)); + } + + return finalwidth; +} + + +template +static inline npy_intp +string_zfill(Buffer buf, npy_int64 width, Buffer out) +{ + size_t finalwidth = width > 0 ? width : 0; + + npy_ucs4 fill = '0'; + npy_intp new_len = string_pad(buf, width, fill, JUSTPOSITION::RIGHT, out); + if (new_len == -1) { + return -1; + } + + size_t offset = finalwidth - buf.num_codepoints(); + Buffer tmp = out + offset; + + npy_ucs4 c = *tmp; + if (c == '+' || c == '-') { + tmp.buffer_memset(fill, 1); + out.buffer_memset(c, 1); + } + + return new_len; +} + + +template +static inline void +string_partition(Buffer buf1, Buffer buf2, npy_int64 idx, + Buffer out1, Buffer out2, Buffer out3, + npy_intp *final_len1, npy_intp *final_len2, npy_intp *final_len3, + STARTPOSITION pos) +{ + // StringDType uses a ufunc that implements the find-part as well + assert(enc != ENCODING::UTF8); + + size_t len1 = buf1.num_codepoints(); + size_t len2 = buf2.num_codepoints(); + + if (len2 == 0) { + npy_gil_error(PyExc_ValueError, "empty separator"); + *final_len1 = *final_len2 = *final_len3 = -1; + return; + } + + if (idx < 0) { + if (pos == STARTPOSITION::FRONT) { + buf1.buffer_memcpy(out1, len1); + *final_len1 = len1; + *final_len2 = *final_len3 = 0; + } + else { + buf1.buffer_memcpy(out3, len1); + *final_len1 = *final_len2 = 0; + *final_len3 = len1; + } + return; + } + + buf1.buffer_memcpy(out1, idx); + *final_len1 = idx; + buf2.buffer_memcpy(out2, len2); + *final_len2 = len2; + (buf1 + idx + len2).buffer_memcpy(out3, len1 - idx - len2); + *final_len3 = len1 - idx - len2; +} + + #endif /* _NPY_CORE_SRC_UMATH_STRING_BUFFER_H_ */ diff --git a/numpy/_core/src/umath/string_fastsearch.h b/numpy/_core/src/umath/string_fastsearch.h index 33563b7007c2..96c1e2d30140 100644 --- a/numpy/_core/src/umath/string_fastsearch.h +++ b/numpy/_core/src/umath/string_fastsearch.h @@ -9,6 +9,7 @@ #include #include +#include #include @@ -60,13 +61,13 @@ struct CheckedIndexer { char_type *buffer; size_t length; - CheckedIndexer() + CheckedIndexer() { buffer = NULL; length = 0; } - CheckedIndexer(char_type *buf, size_t len) + CheckedIndexer(char_type *buf, size_t len) { buffer = buf; length = len; diff --git a/numpy/_core/src/umath/string_ufuncs.cpp b/numpy/_core/src/umath/string_ufuncs.cpp index a5686c884fc3..2bc4ce20acd6 100644 --- a/numpy/_core/src/umath/string_ufuncs.cpp +++ b/numpy/_core/src/umath/string_ufuncs.cpp @@ -507,13 +507,139 @@ string_expandtabs_loop(PyArrayMethod_Context *context, } +template +static int +string_center_ljust_rjust_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + JUSTPOSITION pos = *(JUSTPOSITION *)(context->method->static_data); + int elsize1 = context->descriptors[0]->elsize; + int elsize3 = context->descriptors[2]->elsize; + int outsize = context->descriptors[3]->elsize; + + char *in1 = data[0]; + char *in2 = data[1]; + char *in3 = data[2]; + char *out = data[3]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer buf(in1, elsize1); + Buffer fill(in3, elsize3); + Buffer outbuf(out, outsize); + if (bufferenc == ENCODING::ASCII && fillenc == ENCODING::UTF32 && *fill > 0x7F) { + npy_gil_error(PyExc_ValueError, "non-ascii fill character is not allowed when buffer is ascii"); + return -1; + } + npy_intp len = string_pad(buf, *(npy_int64 *)in2, *fill, pos, outbuf); + if (len < 0) { + return -1; + } + outbuf.buffer_fill_with_zeros_after_index(len); + + in1 += strides[0]; + in2 += strides[1]; + in3 += strides[2]; + out += strides[3]; + } + + return 0; +} + + +template +static int +string_zfill_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + int elsize = context->descriptors[0]->elsize; + int outsize = context->descriptors[2]->elsize; + + char *in1 = data[0]; + char *in2 = data[1]; + char *out = data[2]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer buf(in1, elsize); + Buffer outbuf(out, outsize); + npy_intp newlen = string_zfill(buf, *(npy_int64 *)in2, outbuf); + if (newlen < 0) { + return -1; + } + outbuf.buffer_fill_with_zeros_after_index(newlen); + + in1 += strides[0]; + in2 += strides[1]; + out += strides[2]; + } + + return 0; +} + + +template +static int +string_partition_index_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + STARTPOSITION startposition = *(STARTPOSITION *)(context->method->static_data); + int elsize1 = context->descriptors[0]->elsize; + int elsize2 = context->descriptors[1]->elsize; + int outsize1 = context->descriptors[3]->elsize; + int outsize2 = context->descriptors[4]->elsize; + int outsize3 = context->descriptors[5]->elsize; + + char *in1 = data[0]; + char *in2 = data[1]; + char *in3 = data[2]; + char *out1 = data[3]; + char *out2 = data[4]; + char *out3 = data[5]; + + npy_intp N = dimensions[0]; + + while (N--) { + Buffer buf1(in1, elsize1); + Buffer buf2(in2, elsize2); + Buffer outbuf1(out1, outsize1); + Buffer outbuf2(out2, outsize2); + Buffer outbuf3(out3, outsize3); + + npy_intp final_len1, final_len2, final_len3; + string_partition(buf1, buf2, *(npy_int64 *)in3, outbuf1, outbuf2, outbuf3, + &final_len1, &final_len2, &final_len3, startposition); + if (final_len1 < 0 || final_len2 < 0 || final_len3 < 0) { + return -1; + } + outbuf1.buffer_fill_with_zeros_after_index(final_len1); + outbuf2.buffer_fill_with_zeros_after_index(final_len2); + outbuf3.buffer_fill_with_zeros_after_index(final_len3); + + in1 += strides[0]; + in2 += strides[1]; + in3 += strides[2]; + out1 += strides[3]; + out2 += strides[4]; + out3 += strides[5]; + } + + return 0; +} + + /* Resolve descriptors & promoter functions */ static NPY_CASTING string_addition_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -540,8 +666,8 @@ string_addition_resolve_descriptors( static NPY_CASTING string_multiply_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -574,8 +700,8 @@ string_multiply_resolve_descriptors( static NPY_CASTING string_strip_whitespace_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[2]), - PyArray_Descr *given_descrs[2], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[2]), + PyArray_Descr *const given_descrs[2], PyArray_Descr *loop_descrs[2], npy_intp *NPY_UNUSED(view_offset)) { @@ -594,8 +720,8 @@ string_strip_whitespace_resolve_descriptors( static NPY_CASTING string_strip_chars_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -618,7 +744,7 @@ string_strip_chars_resolve_descriptors( static int string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -634,7 +760,7 @@ string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), static int string_replace_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -657,8 +783,8 @@ string_replace_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_replace_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[5]), - PyArray_Descr *given_descrs[5], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[5]), + PyArray_Descr *const given_descrs[5], PyArray_Descr *loop_descrs[5], npy_intp *NPY_UNUSED(view_offset)) { @@ -694,7 +820,7 @@ string_replace_resolve_descriptors( static int string_startswith_endswith_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -710,7 +836,7 @@ string_startswith_endswith_promoter(PyObject *NPY_UNUSED(ufunc), static int string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -723,7 +849,7 @@ string_expandtabs_length_promoter(PyObject *NPY_UNUSED(ufunc), static int string_expandtabs_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { Py_INCREF(op_dtypes[0]); @@ -738,8 +864,8 @@ string_expandtabs_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_expandtabs_resolve_descriptors( PyArrayMethodObject *NPY_UNUSED(self), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[3]), - PyArray_Descr *given_descrs[3], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], PyArray_Descr *loop_descrs[3], npy_intp *NPY_UNUSED(view_offset)) { @@ -769,6 +895,158 @@ string_expandtabs_resolve_descriptors( } +static int +string_center_ljust_rjust_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + Py_INCREF(op_dtypes[0]); + new_op_dtypes[2] = op_dtypes[0]; + Py_INCREF(op_dtypes[0]); + new_op_dtypes[3] = op_dtypes[0]; + return 0; +} + + +static NPY_CASTING +string_center_ljust_rjust_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[5], + PyArray_Descr *loop_descrs[5], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[3] == NULL) { + PyErr_SetString( + PyExc_TypeError, + "The 'out' kwarg is necessary. Use the version in numpy.strings without it."); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); + if (loop_descrs[0] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); + if (loop_descrs[1] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[2] = NPY_DT_CALL_ensure_canonical(given_descrs[2]); + if (loop_descrs[2] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[3] = NPY_DT_CALL_ensure_canonical(given_descrs[3]); + if (loop_descrs[3] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + return NPY_NO_CASTING; +} + + +static int +string_zfill_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + Py_INCREF(op_dtypes[0]); + new_op_dtypes[2] = op_dtypes[0]; + return 0; +} + + +static NPY_CASTING +string_zfill_resolve_descriptors( + PyArrayMethodObject *NPY_UNUSED(self), + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[2] == NULL) { + PyErr_SetString( + PyExc_TypeError, + "The 'out' kwarg is necessary. Use numpy.strings.zfill without it."); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[0] = NPY_DT_CALL_ensure_canonical(given_descrs[0]); + if (loop_descrs[0] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[1] = NPY_DT_CALL_ensure_canonical(given_descrs[1]); + if (loop_descrs[1] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + loop_descrs[2] = NPY_DT_CALL_ensure_canonical(given_descrs[2]); + if (loop_descrs[2] == NULL) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + return NPY_NO_CASTING; +} + + +static int +string_partition_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + Py_INCREF(op_dtypes[0]); + new_op_dtypes[0] = op_dtypes[0]; + Py_INCREF(op_dtypes[1]); + new_op_dtypes[1] = op_dtypes[1]; + + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_Int64DType); + + Py_INCREF(op_dtypes[0]); + new_op_dtypes[3] = op_dtypes[0]; + Py_INCREF(op_dtypes[0]); + new_op_dtypes[4] = op_dtypes[0]; + Py_INCREF(op_dtypes[0]); + new_op_dtypes[5] = op_dtypes[0]; + return 0; +} + + +static NPY_CASTING +string_partition_resolve_descriptors( + PyArrayMethodObject *self, + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (!given_descrs[3] || !given_descrs[4] || !given_descrs[5]) { + PyErr_Format(PyExc_TypeError, + "The '%s' ufunc requires the 'out' keyword to be set. The " + "python wrapper in numpy.strings can be used without the " + "out keyword.", self->name); + return _NPY_ERROR_OCCURRED_IN_CAST; + } + + for (int i = 0; i < 6; i++) { + loop_descrs[i] = NPY_DT_CALL_ensure_canonical(given_descrs[i]); + if (!loop_descrs[i]) { + return _NPY_ERROR_OCCURRED_IN_CAST; + } + } + + return NPY_NO_CASTING; +} + + /* * Machinery to add the string loops to the existing ufuncs. */ @@ -986,10 +1264,71 @@ init_ufunc(PyObject *umath, const char *name, int nin, int nout, } +/* + * This is a variant of init_ufunc that allows for mixed string dtypes + * in its parameters. Instead of having NPY_OBJECT be a sentinel for a + * fixed dtype, here the typenums are always the correct ones. + */ +static int +init_mixed_type_ufunc(PyObject *umath, const char *name, int nin, int nout, + NPY_TYPES *typenums, PyArrayMethod_StridedLoop loop, + PyArrayMethod_ResolveDescriptors resolve_descriptors, + void *static_data) +{ + int res = -1; + + PyArray_DTypeMeta **dtypes = (PyArray_DTypeMeta **) PyMem_Malloc( + (nin + nout) * sizeof(PyArray_DTypeMeta *)); + if (dtypes == NULL) { + PyErr_NoMemory(); + return -1; + } + + for (int i = 0; i < nin+nout; i++) { + dtypes[i] = PyArray_DTypeFromTypeNum(typenums[i]); + } + + PyType_Slot slots[4]; + slots[0] = {NPY_METH_strided_loop, nullptr}; + slots[1] = {_NPY_METH_static_data, static_data}; + slots[3] = {0, nullptr}; + if (resolve_descriptors != NULL) { + slots[2] = {NPY_METH_resolve_descriptors, (void *) resolve_descriptors}; + } + else { + slots[2] = {0, nullptr}; + } + + char loop_name[256] = {0}; + snprintf(loop_name, sizeof(loop_name), "templated_string_%s", name); + + PyArrayMethod_Spec spec = {}; + spec.name = loop_name; + spec.nin = nin; + spec.nout = nout; + spec.dtypes = dtypes; + spec.slots = slots; + spec.flags = NPY_METH_NO_FLOATINGPOINT_ERRORS; + + if (add_loop(umath, name, &spec, loop) < 0) { + goto finish; + } + + res = 0; + finish: + for (int i = 0; i < nin+nout; i++) { + Py_DECREF(dtypes[i]); + } + PyMem_Free((void *) dtypes); + return res; +} + + + NPY_NO_EXPORT int init_string_ufuncs(PyObject *umath) { - NPY_TYPES dtypes[] = {NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING}; + NPY_TYPES dtypes[] = {NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING, NPY_STRING}; if (init_comparison(umath) < 0) { return -1; @@ -1284,6 +1623,110 @@ init_string_ufuncs(PyObject *umath) return -1; } + dtypes[1] = NPY_INT64; + + const char *center_ljust_rjust_names[] = { + "_center", "_ljust", "_rjust" + }; + + static JUSTPOSITION padpositions[] = { + JUSTPOSITION::CENTER, JUSTPOSITION::LEFT, JUSTPOSITION::RIGHT + }; + + for (int i = 0; i < 3; i++) { + dtypes[0] = NPY_STRING; + dtypes[2] = NPY_STRING; + dtypes[3] = NPY_STRING; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + dtypes[0] = NPY_STRING; + dtypes[2] = NPY_UNICODE; + dtypes[3] = NPY_STRING; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + dtypes[0] = NPY_UNICODE; + dtypes[2] = NPY_UNICODE; + dtypes[3] = NPY_UNICODE; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + dtypes[0] = NPY_UNICODE; + dtypes[2] = NPY_STRING; + dtypes[3] = NPY_UNICODE; + if (init_mixed_type_ufunc( + umath, center_ljust_rjust_names[i], 3, 1, dtypes, + string_center_ljust_rjust_loop, + string_center_ljust_rjust_resolve_descriptors, + &padpositions[i]) < 0) { + return -1; + } + if (init_promoter(umath, center_ljust_rjust_names[i], 3, 1, + string_center_ljust_rjust_promoter) < 0) { + return -1; + } + } + + dtypes[0] = NPY_OBJECT; + dtypes[1] = NPY_INT64; + dtypes[2] = NPY_OBJECT; + if (init_ufunc( + umath, "_zfill", 2, 1, dtypes, ENCODING::ASCII, + string_zfill_loop, + string_zfill_resolve_descriptors, NULL) < 0) { + return -1; + } + if (init_ufunc( + umath, "_zfill", 2, 1, dtypes, ENCODING::UTF32, + string_zfill_loop, + string_zfill_resolve_descriptors, NULL) < 0) { + return -1; + } + if (init_promoter(umath, "_zfill", 2, 1, string_zfill_promoter) < 0) { + return -1; + } + + dtypes[0] = dtypes[1] = dtypes[3] = dtypes[4] = dtypes[5] = NPY_OBJECT; + dtypes[2] = NPY_INT64; + + const char *partition_names[] = {"_partition_index", "_rpartition_index"}; + + static STARTPOSITION partition_startpositions[] = { + STARTPOSITION::FRONT, STARTPOSITION::BACK + }; + + for (int i = 0; i < 2; i++) { + if (init_ufunc( + umath, partition_names[i], 3, 3, dtypes, ENCODING::ASCII, + string_partition_index_loop, + string_partition_resolve_descriptors, &partition_startpositions[i]) < 0) { + return -1; + } + if (init_ufunc( + umath, partition_names[i], 3, 3, dtypes, ENCODING::UTF32, + string_partition_index_loop, + string_partition_resolve_descriptors, &partition_startpositions[i]) < 0) { + return -1; + } + if (init_promoter(umath, partition_names[i], 3, 3, + string_partition_promoter) < 0) { + return -1; + } + } + return 0; } diff --git a/numpy/_core/src/umath/stringdtype_ufuncs.cpp b/numpy/_core/src/umath/stringdtype_ufuncs.cpp index 340079a197d8..8e25b3968cfe 100644 --- a/numpy/_core/src/umath/stringdtype_ufuncs.cpp +++ b/numpy/_core/src/umath/stringdtype_ufuncs.cpp @@ -43,7 +43,7 @@ static NPY_CASTING multiply_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *dtypes[], PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_Descr *ldescr = given_descrs[0]; @@ -148,7 +148,7 @@ static int multiply_loop_core( buf = (char *)PyMem_RawMalloc(newsize); if (buf == NULL) { npy_gil_error(PyExc_MemoryError, - "Failed to allocate string in multiply"); + "Failed to allocate string in multiply"); goto fail; } } @@ -172,7 +172,7 @@ static int multiply_loop_core( if (descrs[0] == descrs[1]) { if (NpyString_pack(oallocator, ops, buf, newsize) < 0) { npy_gil_error(PyExc_MemoryError, - "Failed to pack string in multiply"); + "Failed to pack string in multiply"); goto fail; } @@ -239,27 +239,18 @@ static int multiply_left_strided_loop( static NPY_CASTING binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + int out_coerce = descr1->coerce && descr1->coerce; + PyObject *out_na_object = NULL; - // _eq_comparison has a short-circuit pointer comparison fast path, - // so no need to check here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -272,8 +263,7 @@ binary_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[2] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[1])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[1])->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -429,7 +419,7 @@ minimum_maximum_strided_loop(PyArrayMethod_Context *context, char *const data[], npy_packed_static_string *sout = (npy_packed_static_string *)out; int cmp = _compare(in1, in2, in1_descr, in2_descr); if (cmp == 0 && (in1 == out || in2 == out)) { - continue; + goto next_step; } if ((cmp < 0) ^ invert) { // if in and out are the same address, do nothing to avoid a @@ -449,6 +439,8 @@ minimum_maximum_strided_loop(PyArrayMethod_Context *context, char *const data[], } } } + + next_step: in1 += in1_stride; in2 += in2_stride; out += out_stride; @@ -556,9 +548,17 @@ string_comparison_strided_loop(PyArrayMethod_Context *context, char *const data[ static NPY_CASTING string_comparison_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { + PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { + return (NPY_CASTING)-1; + } + Py_INCREF(given_descrs[0]); loop_descrs[0] = given_descrs[0]; Py_INCREF(given_descrs[1]); @@ -602,7 +602,8 @@ string_isnan_strided_loop(PyArrayMethod_Context *context, char *const data[], static NPY_CASTING string_bool_output_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -615,7 +616,8 @@ string_bool_output_resolve_descriptors( static NPY_CASTING string_intp_output_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { Py_INCREF(given_descrs[0]); @@ -761,7 +763,8 @@ string_strlen_strided_loop(PyArrayMethod_Context *context, char *const data[], static int string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -775,27 +778,15 @@ string_findlike_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING string_findlike_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - // _eq_comparison has a short-circuit pointer comparison fast path, - // so no need to check here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -821,7 +812,8 @@ string_findlike_resolve_descriptors( static int string_startswith_endswith_promoter( PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -835,27 +827,15 @@ string_startswith_endswith_promoter( static NPY_CASTING string_startswith_endswith_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; - // _eq_comparison has a short-circuit pointer comparison fast path, so - // no need to do it here - int eq_res = _eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object); - - if (eq_res < 0) { - return (NPY_CASTING)-1; - } - - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "Can only do binary operations with equal StringDType " - "instances."); + if (stringdtype_compatible_na(descr1->na_object, descr2->na_object, NULL) == -1) { return (NPY_CASTING)-1; } @@ -1043,56 +1023,36 @@ string_startswith_endswith_strided_loop(PyArrayMethod_Context *context, } static int -strip_chars_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *new_op_dtypes[]) +all_strings_promoter(PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) { + if ((op_dtypes[0] != &PyArray_StringDType && + op_dtypes[1] != &PyArray_StringDType && + op_dtypes[2] != &PyArray_StringDType)) { + /* + * This promoter was triggered with only unicode arguments, so use + * unicode. This can happen due to `dtype=` support which sets the + * output DType/signature. + */ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_UnicodeDType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_UnicodeDType); + return 0; + } + if ((signature[0] == &PyArray_UnicodeDType && + signature[1] == &PyArray_UnicodeDType && + signature[2] == &PyArray_UnicodeDType)) { + /* Unicode forced, but didn't override a string input: invalid */ + return -1; + } new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_StringDType); new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); return 0; } -static NPY_CASTING -strip_chars_resolve_descriptors( - struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], - PyArray_Descr *loop_descrs[], - npy_intp *NPY_UNUSED(view_offset)) -{ - Py_INCREF(given_descrs[0]); - loop_descrs[0] = given_descrs[0]; - - // we don't actually care about the null behavior of the second argument, - // so no need to check if the first two descrs are equal like in - // binary_resolve_descriptors - - Py_INCREF(given_descrs[1]); - loop_descrs[1] = given_descrs[1]; - - PyArray_Descr *out_descr = NULL; - - if (given_descrs[2] == NULL) { - out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[0])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[0])->coerce); - - if (out_descr == NULL) { - return (NPY_CASTING)-1; - } - } - else { - Py_INCREF(given_descrs[2]); - out_descr = given_descrs[2]; - } - - loop_descrs[2] = out_descr; - - return NPY_NO_CASTING; -} - - NPY_NO_EXPORT int string_lrstrip_chars_strided_loop( PyArrayMethod_Context *context, char *const data[], @@ -1105,6 +1065,7 @@ string_lrstrip_chars_strided_loop( PyArray_StringDTypeObject *s1descr = (PyArray_StringDTypeObject *)context->descriptors[0]; int has_null = s1descr->na_object != NULL; int has_string_na = s1descr->has_string_na; + int has_nan_na = s1descr->has_nan_na; const npy_static_string *default_string = &s1descr->default_string; npy_intp N = dimensions[0]; @@ -1131,28 +1092,47 @@ string_lrstrip_chars_strided_loop( s2 = *default_string; } } + else if (has_nan_na) { + if (s2_isnull) { + npy_gil_error(PyExc_ValueError, + "Cannot use a null string that is not a " + "string as the %s delimiter", ufunc_name); + } + if (s1_isnull) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } + } else { npy_gil_error(PyExc_ValueError, - "Cannot strip null values that are not strings"); + "Can only strip null values that are strings " + "or NaN-like values"); goto fail; } } + { + char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); + Buffer buf1((char *)s1.buf, s1.size); + Buffer buf2((char *)s2.buf, s2.size); + Buffer outbuf(new_buf, s1.size); + size_t new_buf_size = string_lrstrip_chars + (buf1, buf2, outbuf, striptype); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", + ufunc_name); + PyMem_RawFree(new_buf); + goto fail; + } - - char *new_buf = (char *)PyMem_RawCalloc(s1.size, 1); - Buffer buf1((char *)s1.buf, s1.size); - Buffer buf2((char *)s2.buf, s2.size); - Buffer outbuf(new_buf, s1.size); - size_t new_buf_size = string_lrstrip_chars - (buf1, buf2, outbuf, striptype); - - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", - ufunc_name); - goto fail; + PyMem_RawFree(new_buf); } - - PyMem_RawFree(new_buf); + next_step: in1 += strides[0]; in2 += strides[1]; @@ -1171,8 +1151,8 @@ string_lrstrip_chars_strided_loop( static NPY_CASTING strip_whitespace_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1209,8 +1189,9 @@ string_lrstrip_whitespace_strided_loop( const char *ufunc_name = ((PyUFuncObject *)context->caller)->name; STRIPTYPE striptype = *(STRIPTYPE *)context->method->static_data; PyArray_StringDTypeObject *descr = (PyArray_StringDTypeObject *)context->descriptors[0]; - int has_string_na = descr->has_string_na; int has_null = descr->na_object != NULL; + int has_string_na = descr->has_string_na; + int has_nan_na = descr->has_nan_na; const npy_static_string *default_string = &descr->default_string; npy_string_allocator *allocators[2] = {}; @@ -1240,26 +1221,39 @@ string_lrstrip_whitespace_strided_loop( if (has_string_na || !has_null) { s = *default_string; } + else if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } else { npy_gil_error(PyExc_ValueError, - "Cannot strip null values that are not strings"); + "Can only strip null values that are strings or " + "NaN-like values"); goto fail; } } + { + char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); + Buffer buf((char *)s.buf, s.size); + Buffer outbuf(new_buf, s.size); + size_t new_buf_size = string_lrstrip_whitespace( + buf, outbuf, striptype); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", + ufunc_name); + goto fail; + } - char *new_buf = (char *)PyMem_RawCalloc(s.size, 1); - Buffer buf((char *)s.buf, s.size); - Buffer outbuf(new_buf, s.size); - size_t new_buf_size = string_lrstrip_whitespace( - buf, outbuf, striptype); - - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in %s", - ufunc_name); - goto fail; + PyMem_RawFree(new_buf); } - PyMem_RawFree(new_buf); + next_step: in += strides[0]; out += strides[1]; @@ -1278,7 +1272,8 @@ string_lrstrip_whitespace_strided_loop( static int string_replace_promoter(PyObject *NPY_UNUSED(ufunc), - PyArray_DTypeMeta *op_dtypes[], PyArray_DTypeMeta *signature[], + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); @@ -1291,30 +1286,24 @@ string_replace_promoter(PyObject *NPY_UNUSED(ufunc), static NPY_CASTING replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; PyArray_StringDTypeObject *descr3 = (PyArray_StringDTypeObject *)given_descrs[2]; + int out_coerce = descr1->coerce && descr2->coerce && descr3->coerce; + PyObject *out_na_object = NULL; - // _eq_comparison has a short-circuit pointer comparison fast path, so - // no need to do it here - int eq_res = (_eq_comparison(descr1->coerce, descr2->coerce, - descr1->na_object, descr2->na_object) && - _eq_comparison(descr1->coerce, descr3->coerce, - descr1->na_object, descr3->na_object)); - - if (eq_res < 0) { + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } - if (eq_res != 1) { - PyErr_SetString(PyExc_TypeError, - "String replace is only supported with equal StringDType " - "instances."); + if (stringdtype_compatible_na( + out_na_object, descr3->na_object, &out_na_object) == -1) { return (NPY_CASTING)-1; } @@ -1331,8 +1320,7 @@ replace_resolve_descriptors(struct PyArrayMethodObject_tag *NPY_UNUSED(method), if (given_descrs[4] == NULL) { out_descr = (PyArray_Descr *)new_stringdtype_instance( - ((PyArray_StringDTypeObject *)given_descrs[0])->na_object, - ((PyArray_StringDTypeObject *)given_descrs[0])->coerce); + out_na_object, out_coerce); if (out_descr == NULL) { return (NPY_CASTING)-1; @@ -1365,7 +1353,9 @@ string_replace_strided_loop( PyArray_StringDTypeObject *descr0 = (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_null = descr0->na_object != NULL; int has_string_na = descr0->has_string_na; + int has_nan_na = descr0->has_nan_na; const npy_static_string *default_string = &descr0->default_string; @@ -1395,11 +1385,29 @@ string_replace_strided_loop( goto fail; } else if (i1_isnull || i2_isnull || i3_isnull) { - if (!has_string_na) { - npy_gil_error(PyExc_ValueError, - "Null values are not supported as replacement arguments " - "for replace"); - goto fail; + if (has_null && !has_string_na) { + if (i2_isnull || i3_isnull) { + npy_gil_error(PyExc_ValueError, + "Null values are not supported as search " + "patterns or replacement strings for " + "replace"); + goto fail; + } + else if (i1_isnull) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in replace"); + goto fail; + } + goto next_step; + } + else { + npy_gil_error(PyExc_ValueError, + "Only string or NaN-like null strings can " + "be used as search strings for replace"); + } + } } else { if (i1_isnull) { @@ -1414,32 +1422,51 @@ string_replace_strided_loop( } } - // conservatively overallocate - // TODO check overflow - size_t max_size; - if (i2s.size == 0) { - // interleaving - max_size = i1s.size + (i1s.size + 1)*(i3s.size); - } - else { - // replace i2 with i3 - max_size = i1s.size * (i3s.size/i2s.size + 1); - } - char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); - Buffer buf1((char *)i1s.buf, i1s.size); - Buffer buf2((char *)i2s.buf, i2s.size); - Buffer buf3((char *)i3s.buf, i3s.size); - Buffer outbuf(new_buf, max_size); + { + Buffer buf1((char *)i1s.buf, i1s.size); + Buffer buf2((char *)i2s.buf, i2s.size); - size_t new_buf_size = string_replace( - buf1, buf2, buf3, *(npy_int64 *)in4, outbuf); + npy_int64 in_count = *(npy_int64*)in4; + if (in_count == -1) { + in_count = NPY_MAX_INT64; + } - if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { - npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); - goto fail; - } + npy_int64 found_count = string_count( + buf1, buf2, 0, NPY_MAX_INT64); + if (found_count < 0) { + goto fail; + } - PyMem_RawFree(new_buf); + npy_intp count = Py_MIN(in_count, found_count); + + Buffer buf3((char *)i3s.buf, i3s.size); + + // conservatively overallocate + // TODO check overflow + size_t max_size; + if (i2s.size == 0) { + // interleaving + max_size = i1s.size + (i1s.size + 1)*(i3s.size); + } + else { + // replace i2 with i3 + size_t change = i2s.size >= i3s.size ? 0 : i3s.size - i2s.size; + max_size = i1s.size + count * change; + } + char *new_buf = (char *)PyMem_RawCalloc(max_size, 1); + Buffer outbuf(new_buf, max_size); + + size_t new_buf_size = string_replace( + buf1, buf2, buf3, count, outbuf); + + if (NpyString_pack(oallocator, ops, new_buf, new_buf_size) < 0) { + npy_gil_error(PyExc_MemoryError, "Failed to pack string in replace"); + goto fail; + } + + PyMem_RawFree(new_buf); + } + next_step: in1 += strides[0]; in2 += strides[1]; @@ -1459,8 +1486,8 @@ string_replace_strided_loop( static NPY_CASTING expandtabs_resolve_descriptors( struct PyArrayMethodObject_tag *NPY_UNUSED(method), - PyArray_DTypeMeta *NPY_UNUSED(dtypes[]), - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[]), + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { @@ -1571,114 +1598,623 @@ string_expandtabs_strided_loop(PyArrayMethod_Context *context, return -1; } +static int +string_center_ljust_rjust_promoter( + PyObject *NPY_UNUSED(ufunc), + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + new_op_dtypes[0] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[1] = NPY_DT_NewRef(&PyArray_Int64DType); + new_op_dtypes[2] = NPY_DT_NewRef(&PyArray_StringDType); + new_op_dtypes[3] = NPY_DT_NewRef(&PyArray_StringDType); + return 0; +} - -NPY_NO_EXPORT int -string_inputs_promoter( - PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *new_op_dtypes[], - PyArray_DTypeMeta *final_dtype, - PyArray_DTypeMeta *result_dtype) +static NPY_CASTING +center_ljust_rjust_resolve_descriptors( + struct PyArrayMethodObject_tag *NPY_UNUSED(method), + PyArray_DTypeMeta *const dtypes[], PyArray_Descr *const given_descrs[], + PyArray_Descr *loop_descrs[], npy_intp *NPY_UNUSED(view_offset)) { - PyUFuncObject *ufunc = (PyUFuncObject *)ufunc_obj; - /* set all input operands to final_dtype */ - for (int i = 0; i < ufunc->nin; i++) { - PyArray_DTypeMeta *tmp = final_dtype; - if (signature[i]) { - tmp = signature[i]; /* never replace a fixed one. */ - } - Py_INCREF(tmp); - new_op_dtypes[i] = tmp; + PyArray_StringDTypeObject *input_descr = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *fill_descr = (PyArray_StringDTypeObject *)given_descrs[2]; + int out_coerce = input_descr->coerce && fill_descr->coerce; + PyObject *out_na_object = NULL; + + if (stringdtype_compatible_na( + input_descr->na_object, fill_descr->na_object, &out_na_object) == -1) { + return (NPY_CASTING)-1; } - /* don't touch output dtypes if they are set */ - for (int i = ufunc->nin; i < ufunc->nargs; i++) { - if (op_dtypes[i] != NULL) { - Py_INCREF(op_dtypes[i]); - new_op_dtypes[i] = op_dtypes[i]; - } - else { - Py_INCREF(result_dtype); - new_op_dtypes[i] = result_dtype; + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + Py_INCREF(given_descrs[2]); + loop_descrs[2] = given_descrs[2]; + + PyArray_Descr *out_descr = NULL; + + if (given_descrs[3] == NULL) { + out_descr = (PyArray_Descr *)new_stringdtype_instance( + out_na_object, out_coerce); + + if (out_descr == NULL) { + return (NPY_CASTING)-1; } } + else { + Py_INCREF(given_descrs[3]); + out_descr = given_descrs[3]; + } - return 0; -} + loop_descrs[3] = out_descr; -static int -string_object_bool_output_promoter( - PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *new_op_dtypes[]) -{ - return string_inputs_promoter( - ufunc, op_dtypes, signature, - new_op_dtypes, &PyArray_ObjectDType, &PyArray_BoolDType); + return NPY_NO_CASTING; } -static int -string_unicode_bool_output_promoter( - PyObject *ufunc, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], - PyArray_DTypeMeta *new_op_dtypes[]) -{ - return string_inputs_promoter( - ufunc, op_dtypes, signature, - new_op_dtypes, &PyArray_StringDType, &PyArray_BoolDType); -} static int -is_integer_dtype(PyArray_DTypeMeta *DType) +center_ljust_rjust_strided_loop(PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) { - if (DType == &PyArray_PyIntAbstractDType) { - return 1; - } - else if (DType == &PyArray_Int8DType) { - return 1; - } - else if (DType == &PyArray_Int16DType) { - return 1; - } - else if (DType == &PyArray_Int32DType) { - return 1; - } - // int64 already has a loop registered for it, - // so don't need to consider it -#if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT - else if (DType == &PyArray_ByteDType) { - return 1; - } -#endif -#if NPY_SIZEOF_SHORT == NPY_SIZEOF_INT - else if (DType == &PyArray_ShortDType) { - return 1; - } -#endif -#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG - else if (DType == &PyArray_IntDType) { - return 1; - } -#endif -#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG - else if (DType == &PyArray_LongLongDType) { - return 1; - } -#endif - else if (DType == &PyArray_UInt8DType) { - return 1; - } - else if (DType == &PyArray_UInt16DType) { - return 1; - } - else if (DType == &PyArray_UInt32DType) { - return 1; - } - // uint64 already has a loop registered for it, - // so don't need to consider it -#if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT - else if (DType == &PyArray_UByteDType) { - return 1; + PyArray_StringDTypeObject *s1descr = (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_null = s1descr->na_object != NULL; + int has_nan_na = s1descr->has_nan_na; + int has_string_na = s1descr->has_string_na; + const npy_static_string *default_string = &s1descr->default_string; + npy_intp N = dimensions[0]; + char *in1 = data[0]; + char *in2 = data[1]; + char *in3 = data[2]; + char *out = data[3]; + npy_intp in1_stride = strides[0]; + npy_intp in2_stride = strides[1]; + npy_intp in3_stride = strides[2]; + npy_intp out_stride = strides[3]; + + npy_string_allocator *allocators[4] = {}; + NpyString_acquire_allocators(4, context->descriptors, allocators); + npy_string_allocator *s1allocator = allocators[0]; + // allocators[1] is NULL + npy_string_allocator *s2allocator = allocators[2]; + npy_string_allocator *oallocator = allocators[3]; + + JUSTPOSITION pos = *(JUSTPOSITION *)(context->method->static_data); + const char* ufunc_name = ((PyUFuncObject *)context->caller)->name; + + while (N--) { + const npy_packed_static_string *ps1 = (npy_packed_static_string *)in1; + npy_static_string s1 = {0, NULL}; + int s1_isnull = NpyString_load(s1allocator, ps1, &s1); + const npy_packed_static_string *ps2 = (npy_packed_static_string *)in3; + npy_static_string s2 = {0, NULL}; + int s2_isnull = NpyString_load(s2allocator, ps2, &s2); + npy_static_string os = {0, NULL}; + npy_packed_static_string *ops = (npy_packed_static_string *)out; + if (s1_isnull == -1 || s2_isnull == -1) { + npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", + ufunc_name); + goto fail; + } + if (NPY_UNLIKELY(s1_isnull || s2_isnull)) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in %s", + ufunc_name); + goto fail; + } + goto next_step; + } + else if (has_string_na || !has_null) { + if (s1_isnull) { + s1 = *default_string; + } + if (s2_isnull) { + s2 = *default_string; + } + } + else { + npy_gil_error(PyExc_ValueError, + "Cannot %s null that is not a nan-like value", + ufunc_name); + goto fail; + } + } + { + Buffer inbuf((char *)s1.buf, s1.size); + Buffer fill((char *)s2.buf, s2.size); + + size_t num_codepoints = inbuf.num_codepoints(); + npy_intp width = (npy_intp)*(npy_int64*)in2; + + if (num_codepoints > (size_t)width) { + width = num_codepoints; + } + + char *buf = NULL; + npy_intp newsize; + int overflowed = npy_mul_sizes_with_overflow( + &(newsize), + (npy_intp)num_bytes_for_utf8_character((unsigned char *)s2.buf), + width - num_codepoints); + newsize += s1.size; + + if (overflowed) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } + + if (context->descriptors[0] == context->descriptors[3]) { + // in-place + buf = (char *)PyMem_RawMalloc(newsize); + if (buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in %s", ufunc_name); + goto fail; + } + } + else { + if (load_new_string(ops, &os, newsize, oallocator, ufunc_name) < 0) { + goto fail; + } + /* explicitly discard const; initializing new buffer */ + buf = (char *)os.buf; + } + + Buffer outbuf(buf, newsize); + + npy_intp len = string_pad(inbuf, *(npy_int64*)in2, *fill, pos, outbuf); + + if (len < 0) { + return -1; + } + + // in-place operations need to clean up temp buffer + if (context->descriptors[0] == context->descriptors[3]) { + if (NpyString_pack(oallocator, ops, buf, newsize) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to pack string in %s", ufunc_name); + goto fail; + } + + PyMem_RawFree(buf); + } + } + next_step: + + in1 += in1_stride; + in2 += in2_stride; + in3 += in3_stride; + out += out_stride; + } + + NpyString_release_allocators(4, allocators); + return 0; + + fail: + NpyString_release_allocators(4, allocators); + return -1; +} + +static int +zfill_strided_loop(PyArrayMethod_Context *context, + char *const data[], npy_intp const dimensions[], + npy_intp const strides[], NpyAuxData *NPY_UNUSED(auxdata)) +{ + PyArray_StringDTypeObject *idescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + npy_intp N = dimensions[0]; + char *in1 = data[0]; + char *in2 = data[1]; + char *out = data[2]; + npy_intp in1_stride = strides[0]; + npy_intp in2_stride = strides[1]; + npy_intp out_stride = strides[2]; + + npy_string_allocator *allocators[3] = {}; + NpyString_acquire_allocators(3, context->descriptors, allocators); + npy_string_allocator *iallocator = allocators[0]; + // allocators[1] is NULL + npy_string_allocator *oallocator = allocators[2]; + int has_null = idescr->na_object != NULL; + int has_nan_na = idescr->has_nan_na; + int has_string_na = idescr->has_string_na; + const npy_static_string *default_string = &idescr->default_string; + + while (N--) { + npy_static_string is = {0, NULL}; + const npy_packed_static_string *ips = + (npy_packed_static_string *)in1; + npy_static_string os = {0, NULL}; + npy_packed_static_string *ops = (npy_packed_static_string *)out; + int is_isnull = NpyString_load(iallocator, ips, &is); + if (is_isnull == -1) { + npy_gil_error(PyExc_MemoryError, + "Failed to load string in zfill"); + goto fail; + } + else if (is_isnull) { + if (has_nan_na) { + if (NpyString_pack_null(oallocator, ops) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to deallocate string in zfill"); + goto fail; + } + + goto next_step; + } + else if (has_string_na || !has_null) { + is = *(npy_static_string *)default_string; + } + else { + npy_gil_error(PyExc_TypeError, + "Cannot zfill null string that is not a nan-like " + "value"); + goto fail; + } + } + { + Buffer inbuf((char *)is.buf, is.size); + size_t in_codepoints = inbuf.num_codepoints(); + size_t width = (size_t)*(npy_int64 *)in2; + if (in_codepoints > width) { + width = in_codepoints; + } + // number of leading one-byte characters plus the size of the + // original string + size_t outsize = (width - in_codepoints) + is.size; + char *buf = NULL; + if (context->descriptors[0] == context->descriptors[2]) { + // in-place + buf = (char *)PyMem_RawMalloc(outsize); + if (buf == NULL) { + npy_gil_error(PyExc_MemoryError, + "Failed to allocate string in zfill"); + goto fail; + } + } + else { + if (load_new_string(ops, &os, outsize, oallocator, "zfill") < 0) { + goto fail; + } + /* explicitly discard const; initializing new buffer */ + buf = (char *)os.buf; + } + + Buffer outbuf(buf, outsize); + if (string_zfill(inbuf, (npy_int64)width, outbuf) < 0) { + goto fail; + } + + // in-place operations need to clean up temp buffer + if (context->descriptors[0] == context->descriptors[2]) { + if (NpyString_pack(oallocator, ops, buf, outsize) < 0) { + npy_gil_error(PyExc_MemoryError, + "Failed to pack string in zfill"); + goto fail; + } + + PyMem_RawFree(buf); + } + + } + + next_step: + + in1 += in1_stride; + in2 += in2_stride; + out += out_stride; + } + + NpyString_release_allocators(3, allocators); + return 0; + +fail: + NpyString_release_allocators(3, allocators); + return -1; +} + + +static NPY_CASTING +string_partition_resolve_descriptors( + PyArrayMethodObject *self, + PyArray_DTypeMeta *const NPY_UNUSED(dtypes[3]), + PyArray_Descr *const given_descrs[3], + PyArray_Descr *loop_descrs[3], + npy_intp *NPY_UNUSED(view_offset)) +{ + if (given_descrs[2] || given_descrs[3] || given_descrs[4]) { + PyErr_Format(PyExc_TypeError, "The StringDType '%s' ufunc does not " + "currently support the 'out' keyword", self->name); + return (NPY_CASTING)-1; + } + + PyArray_StringDTypeObject *descr1 = (PyArray_StringDTypeObject *)given_descrs[0]; + PyArray_StringDTypeObject *descr2 = (PyArray_StringDTypeObject *)given_descrs[1]; + int out_coerce = descr1->coerce && descr2->coerce; + PyObject *out_na_object = NULL; + + if (stringdtype_compatible_na( + descr1->na_object, descr2->na_object, &out_na_object) == -1) { + return (NPY_CASTING)-1; + } + + Py_INCREF(given_descrs[0]); + loop_descrs[0] = given_descrs[0]; + Py_INCREF(given_descrs[1]); + loop_descrs[1] = given_descrs[1]; + + for (int i=2; i<5; i++) { + loop_descrs[i] = (PyArray_Descr *)new_stringdtype_instance( + out_na_object, out_coerce); + if (loop_descrs[i] == NULL) { + return (NPY_CASTING)-1; + } + } + + return NPY_NO_CASTING; +} + +NPY_NO_EXPORT int +string_partition_strided_loop( + PyArrayMethod_Context *context, + char *const data[], + npy_intp const dimensions[], + npy_intp const strides[], + NpyAuxData *NPY_UNUSED(auxdata)) +{ + STARTPOSITION startposition = *(STARTPOSITION *)(context->method->static_data); + int fastsearch_direction = + startposition == STARTPOSITION::FRONT ? FAST_SEARCH : FAST_RSEARCH; + + npy_intp N = dimensions[0]; + + char *in1 = data[0]; + char *in2 = data[1]; + char *out1 = data[2]; + char *out2 = data[3]; + char *out3 = data[4]; + + npy_intp in1_stride = strides[0]; + npy_intp in2_stride = strides[1]; + npy_intp out1_stride = strides[2]; + npy_intp out2_stride = strides[3]; + npy_intp out3_stride = strides[4]; + + npy_string_allocator *allocators[5] = {}; + NpyString_acquire_allocators(5, context->descriptors, allocators); + npy_string_allocator *in1allocator = allocators[0]; + npy_string_allocator *in2allocator = allocators[1]; + npy_string_allocator *out1allocator = allocators[2]; + npy_string_allocator *out2allocator = allocators[3]; + npy_string_allocator *out3allocator = allocators[4]; + + PyArray_StringDTypeObject *idescr = + (PyArray_StringDTypeObject *)context->descriptors[0]; + int has_string_na = idescr->has_string_na; + const npy_static_string *default_string = &idescr->default_string; + + while (N--) { + const npy_packed_static_string *i1ps = (npy_packed_static_string *)in1; + npy_static_string i1s = {0, NULL}; + const npy_packed_static_string *i2ps = (npy_packed_static_string *)in2; + npy_static_string i2s = {0, NULL}; + + int i1_isnull = NpyString_load(in1allocator, i1ps, &i1s); + int i2_isnull = NpyString_load(in2allocator, i2ps, &i2s); + + if (i1_isnull == -1 || i2_isnull == -1) { + npy_gil_error(PyExc_MemoryError, "Failed to load string in %s", + ((PyUFuncObject *)context->caller)->name); + goto fail; + } + else if (NPY_UNLIKELY(i1_isnull || i2_isnull)) { + if (!has_string_na) { + npy_gil_error(PyExc_ValueError, + "Null values are not supported in %s", + ((PyUFuncObject *)context->caller)->name); + goto fail; + } + else { + if (i1_isnull) { + i1s = *default_string; + } + if (i2_isnull) { + i2s = *default_string; + } + } + } + + if (i2s.size == 0) { + npy_gil_error(PyExc_ValueError, "empty separator"); + goto fail; + } + + npy_intp idx = fastsearch((char *)i1s.buf, i1s.size, (char *)i2s.buf, i2s.size, -1, + fastsearch_direction); + + npy_intp out1_size, out2_size, out3_size; + + if (idx == -1) { + if (startposition == STARTPOSITION::FRONT) { + out1_size = i1s.size; + out2_size = out3_size = 0; + } + else { + out1_size = out2_size = 0; + out3_size = i1s.size; + } + } + else { + out1_size = idx; + out2_size = i2s.size; + out3_size = i1s.size - out2_size - out1_size; + } + + npy_packed_static_string *o1ps = (npy_packed_static_string *)out1; + npy_static_string o1s = {0, NULL}; + npy_packed_static_string *o2ps = (npy_packed_static_string *)out2; + npy_static_string o2s = {0, NULL}; + npy_packed_static_string *o3ps = (npy_packed_static_string *)out3; + npy_static_string o3s = {0, NULL}; + + if (load_new_string(o1ps, &o1s, out1_size, out1allocator, + ((PyUFuncObject *)context->caller)->name) == -1) { + goto fail; + } + if (load_new_string(o2ps, &o2s, out2_size, out2allocator, + ((PyUFuncObject *)context->caller)->name) == -1) { + goto fail; + } + if (load_new_string(o3ps, &o3s, out3_size, out3allocator, + ((PyUFuncObject *)context->caller)->name) == -1) { + goto fail; + } + + if (idx == -1) { + if (startposition == STARTPOSITION::FRONT) { + memcpy((char *)o1s.buf, i1s.buf, out1_size); + } + else { + memcpy((char *)o3s.buf, i1s.buf, out3_size); + } + } + else { + memcpy((char *)o1s.buf, i1s.buf, out1_size); + memcpy((char *)o2s.buf, i2s.buf, out2_size); + memcpy((char *)o3s.buf, i1s.buf + out1_size + out2_size, out3_size); + } + + in1 += in1_stride; + in2 += in2_stride; + out1 += out1_stride; + out2 += out2_stride; + out3 += out3_stride; + } + + NpyString_release_allocators(5, allocators); + return 0; + + fail: + + NpyString_release_allocators(5, allocators); + return -1; +} + +NPY_NO_EXPORT int +string_inputs_promoter( + PyObject *ufunc_obj, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[], + PyArray_DTypeMeta *final_dtype, + PyArray_DTypeMeta *result_dtype) +{ + PyUFuncObject *ufunc = (PyUFuncObject *)ufunc_obj; + /* set all input operands to final_dtype */ + for (int i = 0; i < ufunc->nin; i++) { + PyArray_DTypeMeta *tmp = final_dtype; + if (signature[i]) { + tmp = signature[i]; /* never replace a fixed one. */ + } + Py_INCREF(tmp); + new_op_dtypes[i] = tmp; + } + /* don't touch output dtypes if they are set */ + for (int i = ufunc->nin; i < ufunc->nargs; i++) { + if (op_dtypes[i] != NULL) { + Py_INCREF(op_dtypes[i]); + new_op_dtypes[i] = op_dtypes[i]; + } + else { + Py_INCREF(result_dtype); + new_op_dtypes[i] = result_dtype; + } + } + + return 0; +} + +static int +string_object_bool_output_promoter( + PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + return string_inputs_promoter( + ufunc, op_dtypes, signature, + new_op_dtypes, &PyArray_ObjectDType, &PyArray_BoolDType); +} + +static int +string_unicode_bool_output_promoter( + PyObject *ufunc, PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], + PyArray_DTypeMeta *new_op_dtypes[]) +{ + return string_inputs_promoter( + ufunc, op_dtypes, signature, + new_op_dtypes, &PyArray_StringDType, &PyArray_BoolDType); +} + +static int +is_integer_dtype(PyArray_DTypeMeta *DType) +{ + if (DType == &PyArray_PyLongDType) { + return 1; + } + else if (DType == &PyArray_Int8DType) { + return 1; + } + else if (DType == &PyArray_Int16DType) { + return 1; + } + else if (DType == &PyArray_Int32DType) { + return 1; + } + // int64 already has a loop registered for it, + // so don't need to consider it +#if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT + else if (DType == &PyArray_ByteDType) { + return 1; + } +#endif +#if NPY_SIZEOF_SHORT == NPY_SIZEOF_INT + else if (DType == &PyArray_ShortDType) { + return 1; + } +#endif +#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG + else if (DType == &PyArray_IntDType) { + return 1; + } +#endif +#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG + else if (DType == &PyArray_LongLongDType) { + return 1; + } +#endif + else if (DType == &PyArray_UInt8DType) { + return 1; + } + else if (DType == &PyArray_UInt16DType) { + return 1; + } + else if (DType == &PyArray_UInt32DType) { + return 1; + } + // uint64 already has a loop registered for it, + // so don't need to consider it +#if NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT + else if (DType == &PyArray_UByteDType) { + return 1; } #endif #if NPY_SIZEOF_SHORT == NPY_SIZEOF_INT @@ -1701,8 +2237,9 @@ is_integer_dtype(PyArray_DTypeMeta *DType) static int -string_multiply_promoter(PyObject *ufunc_obj, PyArray_DTypeMeta *op_dtypes[], - PyArray_DTypeMeta *signature[], +string_multiply_promoter(PyObject *ufunc_obj, + PyArray_DTypeMeta *const op_dtypes[], + PyArray_DTypeMeta *const signature[], PyArray_DTypeMeta *new_op_dtypes[]) { PyUFuncObject *ufunc = (PyUFuncObject *)ufunc_obj; @@ -1794,16 +2331,16 @@ add_promoter(PyObject *numpy, const char *ufunc_name, PyObject *DType_tuple = PyTuple_New(n_dtypes); - for (size_t i=0; iflags |= ( - NPY_ARRAY_WAS_PYTHON_INT|NPY_ARRAY_WAS_INT_AND_REPLACED); - } - Py_INCREF(zero_arr); - Py_SETREF(out_op[i], zero_arr); + Py_INCREF(npy_static_pydata.zero_pyint_like_arr); + Py_SETREF(out_op[i], + (PyArrayObject *)npy_static_pydata.zero_pyint_like_arr); } *promoting_pyscalars = NPY_TRUE; } } - if (*allow_legacy_promotion && (!all_scalar && any_scalar)) { + if ((!all_scalar && any_scalar)) { *force_legacy_promotion = should_use_min_scalar(nin, out_op, 0, NULL); } @@ -789,9 +779,9 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, if (dtypes[i] != PyArray_DESCR(op[i])) { npy_intp view_offset; - NPY_CASTING safety = PyArray_GetCastInfo( - PyArray_DESCR(op[i]), dtypes[i], NULL, &view_offset); - if (safety < 0 && PyErr_Occurred()) { + npy_intp is_safe = PyArray_SafeCast( + PyArray_DESCR(op[i]), dtypes[i], &view_offset, casting, 0); + if (is_safe < 0 && PyErr_Occurred()) { /* A proper error during a cast check, should be rare */ return -1; } @@ -806,8 +796,8 @@ check_for_trivial_loop(PyArrayMethodObject *ufuncimpl, * can force cast to bool) */ } - else if (PyArray_MinCastSafety(safety, casting) != casting) { - return 0; /* the cast is not safe enough */ + else if (is_safe != 1) { + return 0; /* there was a cast error or cast is not safe enough */ } } if (must_copy) { @@ -1011,9 +1001,11 @@ try_trivial_single_output_loop(PyArrayMethod_Context *context, */ static inline int validate_casting(PyArrayMethodObject *method, PyUFuncObject *ufunc, - PyArrayObject *ops[], PyArray_Descr *descriptors[], + PyArrayObject *ops[], PyArray_Descr *const descriptors_const[], NPY_CASTING casting) { + /* Cast away const to not change old public `PyUFunc_ValidateCasting`. */ + PyArray_Descr **descriptors = (PyArray_Descr **)descriptors_const; if (method->resolve_descriptors == &wrapped_legacy_resolve_descriptors) { /* * In this case the legacy type resolution was definitely called @@ -1091,7 +1083,7 @@ execute_ufunc_loop(PyArrayMethod_Context *context, int masked, NpyIter *iter = NpyIter_AdvancedNew(nop + masked, op, iter_flags, order, NPY_UNSAFE_CASTING, - op_flags, context->descriptors, + op_flags, (PyArray_Descr **)context->descriptors, -1, NULL, NULL, buffersize); if (iter == NULL) { return -1; @@ -1336,8 +1328,6 @@ _check_keepdims_support(PyUFuncObject *ufunc) { static int _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, PyArrayObject **op, int broadcast_ndim, int **remap_axis) { - static PyObject *AxisError_cls = NULL; - int nin = ufunc->nin; int nop = ufunc->nargs; int iop, list_size; @@ -1383,12 +1373,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, if (PyTuple_Check(op_axes_tuple)) { if (PyTuple_Size(op_axes_tuple) != op_ncore) { /* must have been a tuple with too many entries. */ - npy_cache_import( - "numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - PyErr_Format(AxisError_cls, + PyErr_Format(npy_static_pydata.AxisError, "%s: operand %d has %d core dimensions, " "but %zd dimensions are specified by axes tuple.", ufunc_get_name_cstr(ufunc), iop, op_ncore, @@ -1412,11 +1397,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes, return -1; } /* If it is a single integer, inform user that more are needed */ - npy_cache_import("numpy.exceptions", "AxisError", &AxisError_cls); - if (AxisError_cls == NULL) { - return -1; - } - PyErr_Format(AxisError_cls, + PyErr_Format(npy_static_pydata.AxisError, "%s: operand %d has %d core dimensions, " "but the axes item is a single integer.", ufunc_get_name_cstr(ufunc), iop, op_ncore); @@ -1609,6 +1590,13 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op, } } + if (ufunc->process_core_dims_func != NULL) { + int status = ufunc->process_core_dims_func(ufunc, core_dim_sizes); + if (status != 0) { + return -1; + } + } + /* * Make sure no core dimension is unspecified. */ @@ -2364,16 +2352,16 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, } PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, - ops, signature, operation_DTypes, NPY_FALSE, NPY_TRUE, - NPY_FALSE, NPY_TRUE); + ops, signature, operation_DTypes, NPY_FALSE, NPY_FALSE, NPY_TRUE); if (evil_ndim_mutating_hack) { ((PyArrayObject_fields *)out)->nd = 0; } - /* DTypes may currently get filled in fallbacks and XDECREF for error: */ - Py_XDECREF(operation_DTypes[0]); - Py_XDECREF(operation_DTypes[1]); - Py_XDECREF(operation_DTypes[2]); + if (ufuncimpl == NULL) { + /* DTypes may currently get filled in fallbacks and XDECREF for error: */ + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); return NULL; } @@ -2384,8 +2372,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, * casting safety could in principle be set to the default same-kind. * (although this should possibly happen through a deprecation) */ - if (resolve_descriptors(3, ufunc, ufuncimpl, - ops, out_descrs, signature, NULL, casting) < 0) { + int res = resolve_descriptors(3, ufunc, ufuncimpl, + ops, out_descrs, signature, operation_DTypes, NULL, casting); + + Py_XDECREF(operation_DTypes[0]); + Py_XDECREF(operation_DTypes[1]); + Py_XDECREF(operation_DTypes[2]); + if (res < 0) { return NULL; } @@ -2407,6 +2400,13 @@ reducelike_promote_and_resolve(PyUFuncObject *ufunc, out_descrs[0], out_descrs[1], out_descrs[2]); goto fail; } + /* + * After checking that they are equivalent, we enforce the use of the out + * one (which the user should have defined). (Needed by string dtype) + */ + Py_INCREF(out_descrs[2]); + Py_SETREF(out_descrs[0], out_descrs[2]); + /* TODO: This really should _not_ be unsafe casting (same above)! */ if (validate_casting(ufuncimpl, ufunc, ops, out_descrs, casting) < 0) { goto fail; @@ -4029,12 +4029,13 @@ static int resolve_descriptors(int nop, PyUFuncObject *ufunc, PyArrayMethodObject *ufuncimpl, PyArrayObject *operands[], PyArray_Descr *dtypes[], - PyArray_DTypeMeta *signature[], PyObject *inputs_tup, - NPY_CASTING casting) + PyArray_DTypeMeta *signature[], PyArray_DTypeMeta *original_DTypes[], + PyObject *inputs_tup, NPY_CASTING casting) { int retval = -1; NPY_CASTING safety; - PyArray_Descr *original_dtypes[NPY_MAXARGS]; + int n_cleanup = 0; /* number of original_descrs filled (to XDECREF) */ + PyArray_Descr *original_descrs[NPY_MAXARGS]; NPY_UF_DBG_PRINT("Resolving the descriptors\n"); @@ -4049,54 +4050,95 @@ resolve_descriptors(int nop, PyObject *input_scalars[NPY_MAXARGS]; for (int i = 0; i < nop; i++) { if (operands[i] == NULL) { - original_dtypes[i] = NULL; + original_descrs[i] = NULL; } else { /* For abstract DTypes, we might want to change what this is */ - original_dtypes[i] = PyArray_DTYPE(operands[i]); - Py_INCREF(original_dtypes[i]); + original_descrs[i] = PyArray_DTYPE(operands[i]); + Py_INCREF(original_descrs[i]); } - if (i < nin - && NPY_DT_is_abstract(signature[i]) - && inputs_tup != NULL) { - /* - * TODO: We may wish to allow any scalar here. Checking for - * abstract assumes this works out for Python scalars, - * which is the important case (especially for now). - * - * One possible check would be `DType->type == type(obj)`. - */ - input_scalars[i] = PyTuple_GET_ITEM(inputs_tup, i); + /* + * Check whether something is a scalar of the given type. + * We leave it to resolve_descriptors_with_scalars to deal + * with, e.g., only doing something special for python scalars. + */ + if (i < nin && inputs_tup != NULL) { + PyObject *input = PyTuple_GET_ITEM(inputs_tup, i); + input_scalars[i] = signature[i]->scalar_type == Py_TYPE(input) ? + input : NULL; } else { input_scalars[i] = NULL; } } + n_cleanup = nop; npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors_with_scalars( - ufuncimpl, signature, original_dtypes, input_scalars, + ufuncimpl, signature, original_descrs, input_scalars, dtypes, &view_offset ); + + /* For scalars, replace the operand if needed (scalars can't be out) */ + for (int i = 0; i < nin; i++) { + if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) { + /* `resolve_descriptors_with_scalars` decides the descr */ + if (npy_update_operand_for_scalar( + &operands[i], input_scalars[i], dtypes[i], + /* ignore cast safety for this op (resolvers job) */ + NPY_SAFE_CASTING) < 0) { + goto finish; + } + } + } goto check_safety; } for (int i = 0; i < nop; ++i) { if (operands[i] == NULL) { - original_dtypes[i] = NULL; + original_descrs[i] = NULL; + continue; } - else { - /* - * The dtype may mismatch the signature, in which case we need - * to make it fit before calling the resolution. - */ - PyArray_Descr *descr = PyArray_DTYPE(operands[i]); - original_dtypes[i] = PyArray_CastDescrToDType(descr, signature[i]); - if (original_dtypes[i] == NULL) { - nop = i; /* only this much is initialized */ + PyArray_Descr *descr = PyArray_DTYPE(operands[i]); + + /* + * If we are working with Python literals/scalars, deal with them. + * If needed, we create new array with the right descriptor. + */ + if ((PyArray_FLAGS(operands[i]) & NPY_ARRAY_WAS_PYTHON_LITERAL)) { + PyObject *input; + if (inputs_tup == NULL) { + input = NULL; + } + else { + input = PyTuple_GET_ITEM(inputs_tup, i); + } + + PyArray_Descr *new_descr = npy_find_descr_for_scalar( + input, descr, original_DTypes[i], signature[i]); + if (new_descr == NULL) { goto finish; } + int res = npy_update_operand_for_scalar( + &operands[i], input, new_descr, casting); + Py_DECREF(new_descr); + if (res < 0) { + goto finish; + } + + /* Descriptor may have been modified along the way */ + descr = PyArray_DESCR(operands[i]); + } + + /* + * The dtype may mismatch the signature, in which case we need + * to make it fit before calling the resolution. + */ + original_descrs[i] = PyArray_CastDescrToDType(descr, signature[i]); + if (original_descrs[i] == NULL) { + goto finish; } + n_cleanup += 1; } if (ufuncimpl->resolve_descriptors != &wrapped_legacy_resolve_descriptors) { @@ -4104,7 +4146,7 @@ resolve_descriptors(int nop, npy_intp view_offset = NPY_MIN_INTP; /* currently ignored */ safety = ufuncimpl->resolve_descriptors(ufuncimpl, - signature, original_dtypes, dtypes, &view_offset); + signature, original_descrs, dtypes, &view_offset); goto check_safety; } else { @@ -4131,8 +4173,8 @@ resolve_descriptors(int nop, retval = 0; finish: - for (int i = 0; i < nop; i++) { - Py_XDECREF(original_dtypes[i]); + for (int i = 0; i < n_cleanup; i++) { + Py_XDECREF(original_descrs[i]); } return retval; } @@ -4441,13 +4483,12 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, npy_bool subok = NPY_TRUE; int keepdims = -1; /* We need to know if it was passed */ npy_bool force_legacy_promotion; - npy_bool allow_legacy_promotion; npy_bool promoting_pyscalars; if (convert_ufunc_arguments(ufunc, /* extract operand related information: */ full_args, operands, operand_DTypes, - &force_legacy_promotion, &allow_legacy_promotion, + &force_legacy_promotion, &promoting_pyscalars, /* extract general information: */ order_obj, &order, @@ -4468,7 +4509,7 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, */ PyArrayMethodObject *ufuncimpl = promote_and_get_ufuncimpl(ufunc, operands, signature, - operand_DTypes, force_legacy_promotion, allow_legacy_promotion, + operand_DTypes, force_legacy_promotion, promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto fail; @@ -4476,49 +4517,11 @@ ufunc_generic_fastcall(PyUFuncObject *ufunc, /* Find the correct descriptors for the operation */ if (resolve_descriptors(nop, ufunc, ufuncimpl, - operands, operation_descrs, signature, full_args.in, casting) < 0) { + operands, operation_descrs, signature, operand_DTypes, + full_args.in, casting) < 0) { goto fail; } - if (promoting_pyscalars) { - /* - * Python integers need to be cast specially. For other python - * scalars it does not hurt either. It would be nice to never create - * the array in this case, but that is difficult until value-based - * promotion rules are gone. (After that, we may get away with using - * dummy arrays rather than real arrays for the legacy resolvers.) - */ - for (int i = 0; i < nin; i++) { - int orig_flags = PyArray_FLAGS(operands[i]); - if (!(orig_flags & NPY_ARRAY_WAS_PYTHON_LITERAL)) { - continue; - } - /* - * If descriptor matches, no need to convert, but integers may - * have been too large. - */ - if (!(orig_flags & NPY_ARRAY_WAS_INT_AND_REPLACED) - && PyArray_EquivTypes( - PyArray_DESCR(operands[i]), operation_descrs[i])) { - continue; - } - /* Otherwise, replace the operand with a new array */ - PyArray_Descr *descr = operation_descrs[i]; - Py_INCREF(descr); - PyArrayObject *new = (PyArrayObject *)PyArray_NewFromDescr( - &PyArray_Type, descr, 0, NULL, NULL, NULL, 0, NULL); - Py_SETREF(operands[i], new); - if (operands[i] == NULL) { - goto fail; - } - - PyObject *value = PyTuple_GET_ITEM(full_args.in, i); - if (PyArray_SETITEM(new, PyArray_BYTES(operands[i]), value) < 0) { - goto fail; - } - } - } - /* * Do the final preparations and call the inner-loop. */ @@ -4705,6 +4708,8 @@ PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, voi /* Type resolution and inner loop selection functions */ ufunc->type_resolver = &PyUFunc_DefaultTypeResolver; + ufunc->process_core_dims_func = NULL; + ufunc->op_flags = NULL; ufunc->_loops = NULL; if (nin + nout != 0) { @@ -5244,8 +5249,8 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) { PyArrayObject *ap1 = NULL; PyObject *tmp; - static PyObject *_numpy_matrix; - npy_cache_import("numpy", "matrix", &_numpy_matrix); + npy_cache_import_runtime("numpy", "matrix", + &npy_runtime_imports.numpy_matrix); const char *matrix_deprecation_msg = ( "%s.outer() was passed a numpy matrix as %s argument. " @@ -5256,7 +5261,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) tmp = PyTuple_GET_ITEM(args, 0); - if (PyObject_IsInstance(tmp, _numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "first") < 0) { @@ -5273,7 +5278,7 @@ prepare_input_arguments_for_outer(PyObject *args, PyUFuncObject *ufunc) PyArrayObject *ap2 = NULL; tmp = PyTuple_GET_ITEM(args, 1); - if (PyObject_IsInstance(tmp, _numpy_matrix)) { + if (PyObject_IsInstance(tmp, npy_runtime_imports.numpy_matrix)) { /* DEPRECATED 2020-05-13, NumPy 1.20 */ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, matrix_deprecation_msg, ufunc->name, "second") < 0) { @@ -5799,22 +5804,20 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) operand_DTypes[0] = NPY_DTYPE(PyArray_DESCR(op1_array)); Py_INCREF(operand_DTypes[0]); int force_legacy_promotion = 0; - int allow_legacy_promotion = NPY_DT_is_legacy(operand_DTypes[0]); if (op2_array != NULL) { tmp_operands[1] = op2_array; operand_DTypes[1] = NPY_DTYPE(PyArray_DESCR(op2_array)); Py_INCREF(operand_DTypes[1]); - allow_legacy_promotion &= NPY_DT_is_legacy(operand_DTypes[1]); tmp_operands[2] = tmp_operands[0]; operand_DTypes[2] = operand_DTypes[0]; Py_INCREF(operand_DTypes[2]); - if (allow_legacy_promotion && ((PyArray_NDIM(op1_array) == 0) - != (PyArray_NDIM(op2_array) == 0))) { - /* both are legacy and only one is 0-D: force legacy */ - force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); - } + if ((PyArray_NDIM(op1_array) == 0) + != (PyArray_NDIM(op2_array) == 0)) { + /* both are legacy and only one is 0-D: force legacy */ + force_legacy_promotion = should_use_min_scalar(2, tmp_operands, 0, NULL); + } } else { tmp_operands[1] = tmp_operands[0]; @@ -5825,7 +5828,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) ufuncimpl = promote_and_get_ufuncimpl(ufunc, tmp_operands, signature, operand_DTypes, force_legacy_promotion, - allow_legacy_promotion, NPY_FALSE, NPY_FALSE); + NPY_FALSE, NPY_FALSE); if (ufuncimpl == NULL) { for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); @@ -5836,7 +5839,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args) /* Find the correct operation_descrs for the operation */ int resolve_result = resolve_descriptors(nop, ufunc, ufuncimpl, - tmp_operands, operation_descrs, signature, NULL, NPY_UNSAFE_CASTING); + tmp_operands, operation_descrs, signature, operand_DTypes, NULL, NPY_UNSAFE_CASTING); for (int i = 0; i < 3; i++) { Py_XDECREF(signature[i]); Py_XDECREF(operand_DTypes[i]); @@ -6063,11 +6066,10 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, PyArray_Descr *operation_descrs[NPY_MAXARGS] = {NULL}; /* This entry-point to promotion lives in the NEP 50 future: */ - int original_promotion_state = npy_promotion_state; - npy_promotion_state = NPY_USE_WEAK_PROMOTION; + int original_promotion_state = get_npy_promotion_state(); + set_npy_promotion_state(NPY_USE_WEAK_PROMOTION); npy_bool promoting_pyscalars = NPY_FALSE; - npy_bool allow_legacy_promotion = NPY_TRUE; if (_get_fixed_signature(ufunc, NULL, signature_obj, signature) < 0) { goto finish; @@ -6100,9 +6102,6 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, } DTypes[i] = NPY_DTYPE(descr); Py_INCREF(DTypes[i]); - if (!NPY_DT_is_legacy(DTypes[i])) { - allow_legacy_promotion = NPY_FALSE; - } } /* Explicitly allow int, float, and complex for the "weak" types. */ else if (descr_obj == (PyObject *)&PyLong_Type) { @@ -6112,8 +6111,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_INT); - Py_INCREF(&PyArray_PyIntAbstractDType); - DTypes[i] = &PyArray_PyIntAbstractDType; + Py_INCREF(&PyArray_PyLongDType); + DTypes[i] = &PyArray_PyLongDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == (PyObject *)&PyFloat_Type) { @@ -6123,8 +6122,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_FLOAT); - Py_INCREF(&PyArray_PyFloatAbstractDType); - DTypes[i] = &PyArray_PyFloatAbstractDType; + Py_INCREF(&PyArray_PyFloatDType); + DTypes[i] = &PyArray_PyFloatDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == (PyObject *)&PyComplex_Type) { @@ -6134,8 +6133,8 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, goto finish; } PyArray_ENABLEFLAGS(dummy_arrays[i], NPY_ARRAY_WAS_PYTHON_COMPLEX); - Py_INCREF(&PyArray_PyComplexAbstractDType); - DTypes[i] = &PyArray_PyComplexAbstractDType; + Py_INCREF(&PyArray_PyComplexDType); + DTypes[i] = &PyArray_PyComplexDType; promoting_pyscalars = NPY_TRUE; } else if (descr_obj == Py_None) { @@ -6158,14 +6157,14 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, if (!reduction) { ufuncimpl = promote_and_get_ufuncimpl(ufunc, dummy_arrays, signature, DTypes, NPY_FALSE, - allow_legacy_promotion, promoting_pyscalars, NPY_FALSE); + promoting_pyscalars, NPY_FALSE); if (ufuncimpl == NULL) { goto finish; } /* Find the correct descriptors for the operation */ if (resolve_descriptors(ufunc->nargs, ufunc, ufuncimpl, - dummy_arrays, operation_descrs, signature, + dummy_arrays, operation_descrs, signature, DTypes, NULL, casting) < 0) { goto finish; } @@ -6243,7 +6242,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, context->descriptors = call_info->_descrs; for (int i=0; i < ufunc->nargs; i++) { Py_INCREF(operation_descrs[i]); - context->descriptors[i] = operation_descrs[i]; + ((PyArray_Descr **)context->descriptors)[i] = operation_descrs[i]; } result = PyTuple_Pack(2, result_dtype_tuple, capsule); @@ -6251,7 +6250,7 @@ py_resolve_dtypes_generic(PyUFuncObject *ufunc, npy_bool return_context, Py_DECREF(capsule); finish: - npy_promotion_state = original_promotion_state; + set_npy_promotion_state(original_promotion_state); Py_XDECREF(result_dtype_tuple); for (int i = 0; i < ufunc->nargs; i++) { @@ -6422,15 +6421,11 @@ _typecharfromnum(int num) { static PyObject * ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) { - static PyObject *_sig_formatter; PyObject *doc; - npy_cache_import( - "numpy._core._internal", - "_ufunc_doc_signature_formatter", - &_sig_formatter); - - if (_sig_formatter == NULL) { + if (npy_cache_import_runtime( + "numpy._core._internal", "_ufunc_doc_signature_formatter", + &npy_runtime_imports._ufunc_doc_signature_formatter) == -1) { return NULL; } @@ -6439,8 +6434,9 @@ ufunc_get_doc(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) * introspection on name and nin + nout to automate the first part * of it the doc string shouldn't need the calling convention */ - doc = PyObject_CallFunctionObjArgs(_sig_formatter, - (PyObject *)ufunc, NULL); + doc = PyObject_CallFunctionObjArgs( + npy_runtime_imports._ufunc_doc_signature_formatter, + (PyObject *)ufunc, NULL); if (doc == NULL) { return NULL; } @@ -6533,10 +6529,6 @@ ufunc_get_signature(PyUFuncObject *ufunc, void *NPY_UNUSED(ignored)) #undef _typecharfromnum -/* - * Docstring is now set from python - * static char *Ufunctype__doc__ = NULL; - */ static PyGetSetDef ufunc_getset[] = { {"__doc__", (getter)ufunc_get_doc, diff --git a/numpy/_core/src/umath/ufunc_object.h b/numpy/_core/src/umath/ufunc_object.h index 645023f66aa5..f8e522374394 100644 --- a/numpy/_core/src/umath/ufunc_object.h +++ b/numpy/_core/src/umath/ufunc_object.h @@ -10,9 +10,4 @@ ufunc_get_name_cstr(PyUFuncObject *ufunc); NPY_NO_EXPORT PyObject * PyUFunc_GetDefaultIdentity(PyUFuncObject *ufunc, npy_bool *reorderable); -/* strings from umathmodule.c that are interned on umath import */ -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_ufunc; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_array_wrap; -NPY_VISIBILITY_HIDDEN extern PyObject *npy_um_str_pyvals_name; - #endif diff --git a/numpy/_core/src/umath/ufunc_type_resolution.c b/numpy/_core/src/umath/ufunc_type_resolution.c index 4975d41147ea..cabcff3b9bef 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.c +++ b/numpy/_core/src/umath/ufunc_type_resolution.c @@ -33,10 +33,11 @@ #endif #include "npy_config.h" -#include "npy_pycompat.h" -#include "npy_import.h" +#include "numpy/npy_common.h" +#include "numpy/ndarraytypes.h" #include "numpy/ufuncobject.h" +#include "npy_import.h" #include "ufunc_type_resolution.h" #include "ufunc_object.h" #include "common.h" @@ -76,16 +77,8 @@ npy_casting_to_py_object(NPY_CASTING casting) */ static int raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { - static PyObject *exc_type = NULL; PyObject *exc_value; - npy_cache_import( - "numpy._core._exceptions", "_UFuncBinaryResolutionError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - /* produce an error object */ exc_value = Py_BuildValue( "O(OO)", ufunc, @@ -95,7 +88,8 @@ raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) { if (exc_value == NULL){ return -1; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject( + npy_static_pydata._UFuncBinaryResolutionError, exc_value); Py_DECREF(exc_value); return -1; @@ -108,15 +102,6 @@ NPY_NO_EXPORT int raise_no_loop_found_error( PyUFuncObject *ufunc, PyObject **dtypes) { - static PyObject *exc_type = NULL; - - npy_cache_import( - "numpy._core._exceptions", "_UFuncNoLoopError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - PyObject *dtypes_tup = PyArray_TupleFromItems(ufunc->nargs, dtypes, 1); if (dtypes_tup == NULL) { return -1; @@ -127,7 +112,7 @@ raise_no_loop_found_error( if (exc_value == NULL) { return -1; } - PyErr_SetObject(exc_type, exc_value); + PyErr_SetObject(npy_static_pydata._UFuncNoLoopError, exc_value); Py_DECREF(exc_value); return -1; @@ -179,15 +164,8 @@ raise_input_casting_error( PyArray_Descr *to, npy_intp i) { - static PyObject *exc_type = NULL; - npy_cache_import( - "numpy._core._exceptions", "_UFuncInputCastingError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - - return raise_casting_error(exc_type, ufunc, casting, from, to, i); + return raise_casting_error(npy_static_pydata._UFuncInputCastingError, + ufunc, casting, from, to, i); } @@ -202,15 +180,8 @@ raise_output_casting_error( PyArray_Descr *to, npy_intp i) { - static PyObject *exc_type = NULL; - npy_cache_import( - "numpy._core._exceptions", "_UFuncOutputCastingError", - &exc_type); - if (exc_type == NULL) { - return -1; - } - - return raise_casting_error(exc_type, ufunc, casting, from, to, i); + return raise_casting_error(npy_static_pydata._UFuncOutputCastingError, + ufunc, casting, from, to, i); } @@ -226,7 +197,7 @@ NPY_NO_EXPORT int PyUFunc_ValidateCasting(PyUFuncObject *ufunc, NPY_CASTING casting, PyArrayObject **operands, - PyArray_Descr **dtypes) + PyArray_Descr *const *dtypes) { int i, nin = ufunc->nin, nop = nin + ufunc->nout; @@ -1439,22 +1410,6 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, PyArray_Descr **out_dtypes) { int type_num1, type_num2; - static PyObject *default_type_tup = NULL; - - /* Set default type for integer inputs to NPY_DOUBLE */ - if (default_type_tup == NULL) { - PyArray_Descr *tmp = PyArray_DescrFromType(NPY_DOUBLE); - - if (tmp == NULL) { - return -1; - } - default_type_tup = PyTuple_Pack(3, tmp, tmp, tmp); - if (default_type_tup == NULL) { - Py_DECREF(tmp); - return -1; - } - Py_DECREF(tmp); - } type_num1 = PyArray_DESCR(operands[0])->type_num; type_num2 = PyArray_DESCR(operands[1])->type_num; @@ -1462,8 +1417,9 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, if (type_tup == NULL && (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) && (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) { - return PyUFunc_DefaultTypeResolver(ufunc, casting, operands, - default_type_tup, out_dtypes); + return PyUFunc_DefaultTypeResolver( + ufunc, casting, operands, + npy_static_pydata.default_truediv_type_tup, out_dtypes); } return PyUFunc_DivisionTypeResolver(ufunc, casting, operands, type_tup, out_dtypes); @@ -1471,7 +1427,7 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc, static int find_userloop(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata) { @@ -1535,7 +1491,7 @@ find_userloop(PyUFuncObject *ufunc, NPY_NO_EXPORT int PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api) @@ -1963,10 +1919,12 @@ linear_search_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - assert(npy_promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); + int promotion_state = get_npy_promotion_state(); + + assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); /* Always "use" with new promotion in case of Python int/float/complex */ int use_min_scalar; - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (promotion_state == NPY_USE_LEGACY_PROMOTION) { use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); } else { @@ -2165,10 +2123,12 @@ type_tuple_type_resolver(PyUFuncObject *self, ufunc_name = ufunc_get_name_cstr(self); - assert(npy_promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); + int promotion_state = get_npy_promotion_state(); + + assert(promotion_state != NPY_USE_WEAK_PROMOTION_AND_WARN); /* Always "use" with new promotion in case of Python int/float/complex */ int use_min_scalar; - if (npy_promotion_state == NPY_USE_LEGACY_PROMOTION) { + if (promotion_state == NPY_USE_LEGACY_PROMOTION) { use_min_scalar = should_use_min_scalar(nin, op, 0, NULL); } else { diff --git a/numpy/_core/src/umath/ufunc_type_resolution.h b/numpy/_core/src/umath/ufunc_type_resolution.h index 84a2593f44c4..3f8e7505ea39 100644 --- a/numpy/_core/src/umath/ufunc_type_resolution.h +++ b/numpy/_core/src/umath/ufunc_type_resolution.h @@ -134,7 +134,7 @@ type_tuple_type_resolver(PyUFuncObject *self, NPY_NO_EXPORT int PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc, - PyArray_Descr **dtypes, + PyArray_Descr *const *dtypes, PyUFuncGenericFunction *out_innerloop, void **out_innerloopdata, int *out_needs_api); diff --git a/numpy/_core/src/umath/umathmodule.c b/numpy/_core/src/umath/umathmodule.c index b8b920b50137..0c8fc4857ea7 100644 --- a/numpy/_core/src/umath/umathmodule.c +++ b/numpy/_core/src/umath/umathmodule.c @@ -21,6 +21,8 @@ #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/npy_3kcompat.h" +#include "npy_pycompat.h" +#include "npy_argparse.h" #include "abstract.h" #include "numpy/npy_math.h" @@ -30,6 +32,7 @@ #include "stringdtype_ufuncs.h" #include "special_integer_comparisons.h" #include "extobj.h" /* for _extobject_contextvar exposure */ +#include "ufunc_type_resolution.h" /* Automatically generated code to define all ufuncs: */ #include "funcs.inc" @@ -207,29 +210,6 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) ***************************************************************************** */ -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_ufunc = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_array_wrap = NULL; -NPY_VISIBILITY_HIDDEN PyObject *npy_um_str_pyvals_name = NULL; - -/* intern some strings used in ufuncs, returns 0 on success */ -static int -intern_strings(void) -{ - npy_um_str_array_ufunc = PyUnicode_InternFromString("__array_ufunc__"); - if (npy_um_str_array_ufunc == NULL) { - return -1; - } - npy_um_str_array_wrap = PyUnicode_InternFromString("__array_wrap__"); - if (npy_um_str_array_wrap == NULL) { - return -1; - } - npy_um_str_pyvals_name = PyUnicode_InternFromString(UFUNC_PYVALS_NAME); - if (npy_um_str_pyvals_name == NULL) { - return -1; - } - return 0; -} - /* Setup the umath part of the module */ int initumath(PyObject *m) @@ -271,8 +251,8 @@ int initumath(PyObject *m) #undef ADDSCONST PyModule_AddIntConstant(m, "UFUNC_BUFSIZE_DEFAULT", (long)NPY_BUFSIZE); - Py_INCREF(npy_extobj_contextvar); - PyModule_AddObject(m, "_extobj_contextvar", npy_extobj_contextvar); + Py_INCREF(npy_static_pydata.npy_extobj_contextvar); + PyModule_AddObject(m, "_extobj_contextvar", npy_static_pydata.npy_extobj_contextvar); PyModule_AddObject(m, "PINF", PyFloat_FromDouble(NPY_INFINITY)); PyModule_AddObject(m, "NINF", PyFloat_FromDouble(-NPY_INFINITY)); @@ -285,47 +265,50 @@ int initumath(PyObject *m) s = PyDict_GetItemString(d, "conjugate"); s2 = PyDict_GetItemString(d, "remainder"); + /* Setup the array object's numerical structures with appropriate ufuncs in d*/ - _PyArray_SetNumericOps(d); + if (_PyArray_SetNumericOps(d) < 0) { + return -1; + } PyDict_SetItemString(d, "conj", s); PyDict_SetItemString(d, "mod", s2); - if (intern_strings() < 0) { - PyErr_SetString(PyExc_RuntimeError, - "cannot intern umath strings while initializing _multiarray_umath."); - return -1; - } - /* * Set up promoters for logical functions * TODO: This should probably be done at a better place, or even in the * code generator directly. */ - s = _PyDict_GetItemStringWithError(d, "logical_and"); - if (s == NULL) { + int res = PyDict_GetItemStringRef(d, "logical_and", &s); + if (res <= 0) { return -1; } if (install_logical_ufunc_promoter(s) < 0) { + Py_DECREF(s); return -1; } + Py_DECREF(s); - s = _PyDict_GetItemStringWithError(d, "logical_or"); - if (s == NULL) { + res = PyDict_GetItemStringRef(d, "logical_or", &s); + if (res <= 0) { return -1; } if (install_logical_ufunc_promoter(s) < 0) { + Py_DECREF(s); return -1; } + Py_DECREF(s); - s = _PyDict_GetItemStringWithError(d, "logical_xor"); - if (s == NULL) { + res = PyDict_GetItemStringRef(d, "logical_xor", &s); + if (res <= 0) { return -1; } if (install_logical_ufunc_promoter(s) < 0) { + Py_DECREF(s); return -1; } + Py_DECREF(s); if (init_string_ufuncs(d) < 0) { return -1; @@ -339,5 +322,9 @@ int initumath(PyObject *m) return -1; } + if (init_argparse_mutex() < 0) { + return -1; + } + return 0; } diff --git a/numpy/_core/src/umath/wrapping_array_method.c b/numpy/_core/src/umath/wrapping_array_method.c index 3f3228237c21..9b3970561f3f 100644 --- a/numpy/_core/src/umath/wrapping_array_method.c +++ b/numpy/_core/src/umath/wrapping_array_method.c @@ -26,6 +26,7 @@ #include "numpy/ndarraytypes.h" +#include "npy_pycompat.h" #include "common.h" #include "array_method.h" #include "legacy_array_method.h" @@ -36,8 +37,8 @@ static NPY_CASTING wrapping_method_resolve_descriptors( PyArrayMethodObject *self, - PyArray_DTypeMeta *dtypes[], - PyArray_Descr *given_descrs[], + PyArray_DTypeMeta *const dtypes[], + PyArray_Descr *const given_descrs[], PyArray_Descr *loop_descrs[], npy_intp *view_offset) { @@ -54,7 +55,7 @@ wrapping_method_resolve_descriptors( self->wrapped_meth, self->wrapped_dtypes, orig_given_descrs, orig_loop_descrs, view_offset); for (int i = 0; i < nargs; i++) { - Py_XDECREF(orig_given_descrs); + Py_XDECREF(orig_given_descrs[i]); } if (casting < 0) { return -1; @@ -62,7 +63,7 @@ wrapping_method_resolve_descriptors( int res = self->translate_loop_descrs( nin, nout, dtypes, given_descrs, orig_loop_descrs, loop_descrs); for (int i = 0; i < nargs; i++) { - Py_DECREF(orig_given_descrs); + Py_DECREF(orig_loop_descrs[i]); } if (res < 0) { return -1; @@ -95,6 +96,7 @@ wrapping_auxdata_free(wrapping_auxdata *wrapping_auxdata) if (wrapping_auxdata_freenum < WRAPPING_AUXDATA_FREELIST_SIZE) { wrapping_auxdata_freelist[wrapping_auxdata_freenum] = wrapping_auxdata; + wrapping_auxdata_freenum++; } else { PyMem_Free(wrapping_auxdata); @@ -158,8 +160,8 @@ wrapping_method_get_loop( auxdata->orig_context.caller = context->caller; if (context->method->translate_given_descrs( - nin, nout, context->method->wrapped_dtypes, - context->descriptors, auxdata->orig_context.descriptors) < 0) { + nin, nout, context->method->wrapped_dtypes, context->descriptors, + (PyArray_Descr **)auxdata->orig_context.descriptors) < 0) { NPY_AUXDATA_FREE((NpyAuxData *)auxdata); return -1; } @@ -250,8 +252,9 @@ PyUFunc_AddWrappingLoop(PyObject *ufunc_obj, PyObject *loops = ufunc->_loops; Py_ssize_t length = PyList_Size(loops); for (Py_ssize_t i = 0; i < length; i++) { - PyObject *item = PyList_GetItem(loops, i); + PyObject *item = PyList_GetItemRef(loops, i); PyObject *cur_DType_tuple = PyTuple_GetItem(item, 0); + Py_DECREF(item); int cmp = PyObject_RichCompareBool(cur_DType_tuple, wrapped_dt_tuple, Py_EQ); if (cmp < 0) { goto finish; diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index 78e39add631a..4f732fdcfdbc 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -37,6 +37,14 @@ _replace, _expandtabs_length, _expandtabs, + _center, + _ljust, + _rjust, + _zfill, + _partition, + _partition_index, + _rpartition, + _rpartition_index, ) @@ -46,12 +54,17 @@ "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower", "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find", "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip", - "rstrip", "strip", "replace", "expandtabs", + "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust", + "zfill", "partition", "rpartition", # _vec_string - Will gradually become ufuncs as well - "mod", "decode", "encode", "center", "ljust", "rjust", "zfill", "upper", - "lower", "swapcase", "capitalize", "title", "join", "split", "rsplit", - "splitlines", "partition", "rpartition", "translate", + "upper", "lower", "swapcase", "capitalize", "title", + + # _vec_string - Will probably not become ufuncs + "mod", "decode", "encode", "translate", + + # Removed from namespace until behavior has been crystallized + # "join", "split", "rsplit", "splitlines", ] @@ -113,17 +126,19 @@ def multiply(a, i): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype i : array_like, with any integer dtype Returns ------- out : ndarray - Output array of str or unicode, depending on input types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types Examples -------- + >>> import numpy as np >>> a = np.array(["a", "b", "c"]) >>> np.strings.multiply(a, 3) array(['aaa', 'bbb', 'ccc'], dtype='>> import numpy as np + >>> a = np.array(["NumPy is a %s library"]) + >>> np.strings.mod(a, values=["Python"]) + array(['NumPy is a Python library'], dtype='>> a = np.array([b'%d bytes', b'%d bits']) + >>> values = np.array([8, 64]) + >>> np.strings.mod(a, values) + array([b'8 bytes', b'64 bits'], dtype='|S7') + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, '__mod__', (values,)), a) @@ -195,7 +223,7 @@ def find(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype sub : array_like, with `np.bytes_` or `np.str_` dtype The substring to search for. @@ -214,6 +242,7 @@ def find(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> a = np.array(["NumPy is a Python library"]) >>> np.strings.find(a, "Python") array([11]) @@ -231,9 +260,9 @@ def rfind(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -248,6 +277,18 @@ def rfind(a, sub, start=0, end=None): -------- str.rfind + Examples + -------- + >>> import numpy as np + >>> a = np.array(["Computer Science"]) + >>> np.strings.rfind(a, "Science", start=0, end=None) + array([9]) + >>> np.strings.rfind(a, "Science", start=0, end=8) + array([-1]) + >>> b = np.array(["Computer Science", "Science"]) + >>> np.strings.rfind(b, "Science", start=0, end=None) + array([9, 0]) + """ end = end if end is not None else MAX return _rfind_ufunc(a, sub, start, end) @@ -259,9 +300,9 @@ def index(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype, optional @@ -276,6 +317,7 @@ def index(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> a = np.array(["Computer Science"]) >>> np.strings.index(a, "Science", start=0, end=None) array([9]) @@ -301,7 +343,7 @@ def rindex(a, sub, start=0, end=None): Returns ------- out : ndarray - Output array of ints. + Output array of ints. See Also -------- @@ -312,7 +354,7 @@ def rindex(a, sub, start=0, end=None): >>> a = np.array(["Computer Science"]) >>> np.strings.rindex(a, "Science", start=0, end=None) array([9]) - + """ end = end if end is not None else MAX return _rindex_ufunc(a, sub, start, end) @@ -325,9 +367,9 @@ def count(a, sub, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - sub : array_like, with `np.bytes_` or `np.str_` dtype + sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype The substring to search for. start, end : array_like, with any integer dtype @@ -344,6 +386,7 @@ def count(a, sub, start=0, end=None): Examples -------- + >>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np + >>> s = np.array(['foo', 'bar']) + >>> s + array(['foo', 'bar'], dtype='>> np.strings.startswith(s, 'fo') + array([True, False]) + >>> np.strings.startswith(s, 'o', start=1, end=2) + array([True, False]) + """ end = end if end is not None else MAX return _startswith_ufunc(a, prefix, start, end) @@ -397,9 +451,9 @@ def endswith(a, suffix, start=0, end=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype - suffix : array_like, with `np.bytes_` or `np.str_` dtype + suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype start, end : array_like, with any integer dtype With ``start``, test beginning at that position. With ``end``, @@ -416,6 +470,7 @@ def endswith(a, suffix, start=0, end=None): Examples -------- + >>> import numpy as np >>> s = np.array(['foo', 'bar']) >>> s array(['foo', 'bar'], dtype='>> import numpy as np >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', ... b'\x81\x82\xc2\xc1\xc2\x82\x81']) >>> c @@ -485,7 +541,7 @@ def encode(a, encoding=None, errors=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array_like, with ``StringDType`` or ``str_`` dtype encoding : str, optional The name of an encoding @@ -507,11 +563,12 @@ def encode(a, encoding=None, errors=None): Examples -------- + >>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.encode(a, encoding='cp037') array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@', b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7') - + """ return _to_bytes_or_str_array( _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)), @@ -533,7 +590,7 @@ def expandtabs(a, tabsize=8): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array tabsize : int, optional Replace tabs with `tabsize` number of spaces. If not given defaults @@ -542,7 +599,8 @@ def expandtabs(a, tabsize=8): Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input type See Also -------- @@ -550,7 +608,8 @@ def expandtabs(a, tabsize=8): Examples -------- - >>> a = np.array(['\t\tHello\tworld']) + >>> import numpy as np + >>> a = np.array(['\t\tHello\tworld']) >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP array([' Hello world'], dtype='>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='>> np.strings.center(c, width=9) @@ -608,16 +666,31 @@ def center(a, width, fillchar=' '): >>> np.strings.center(c, width=9, fillchar='*') array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='>> np.strings.center(c, width=1) - array(['a', '1', 'b', '2'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.ljust(c, width=3) - array(['aAa', ' a', 'abB'], dtype='>> np.strings.ljust(c, width=9) + array(['aAaAaA ', ' aA ', 'abBABba '], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rjust(a, width=3) - array(['aAa', ' a', 'abB'], dtype='>> np.strings.rjust(a, width=9) + array([' aAaAaA', ' aA ', ' abBABba'], dtype='>> import numpy as np + >>> np.strings.zfill(['1', '-1', '+1'], 3) + array(['001', '-01', '+01'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', 'abBABba']) >>> c array(['aAaAaA', 'abBABba'], dtype='>> import numpy as np >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c array(['aAaAaA', ' aA ', 'abBABba'], dtype='>> np.strings.zfill('1', 3) - array('001', dtype='>> import numpy as np >>> c = np.array(['a1b c', '1bca', 'bca1']); c array(['a1b c', '1bca', 'bca1'], dtype='>> np.strings.upper(c) @@ -918,13 +1046,14 @@ def lower(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -932,6 +1061,7 @@ def lower(a): Examples -------- + >>> import numpy as np >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c array(['A1B C', '1BCA', 'BCA1'], dtype='>> np.strings.lower(c) @@ -953,13 +1083,14 @@ def swapcase(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type + out : ndarray + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -967,6 +1098,7 @@ def swapcase(a): Examples -------- + >>> import numpy as np >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], dtype='|S5') @@ -990,14 +1122,14 @@ def capitalize(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array of strings to capitalize. Returns ------- out : ndarray - Output array of str or unicode, depending on input - types + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1005,6 +1137,7 @@ def capitalize(a): Examples -------- + >>> import numpy as np >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='|S4') @@ -1030,13 +1163,14 @@ def title(a): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array. Returns ------- out : ndarray - Output array of str or unicode, depending on input type + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1044,6 +1178,7 @@ def title(a): Examples -------- + >>> import numpy as np >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c array(['a1b c', '1b ca', 'b ca1', 'ca1b'], dtype='|S5') @@ -1074,7 +1209,8 @@ def replace(a, old, new, count=-1): Returns ------- out : ndarray - Output array of ``str_`` or ``bytes_`` dtype + Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype, + depending on input types See Also -------- @@ -1082,6 +1218,7 @@ def replace(a, old, new, count=-1): Examples -------- + >>> import numpy as np >>> a = np.array(["That is a mango", "Monkeys eat mangos"]) >>> np.strings.replace(a, 'mango', 'banana') array(['That is a banana', 'Monkeys eat bananas'], dtype='>> a = np.array(["The dish is fresh", "This is it"]) >>> np.strings.replace(a, 'is', 'was') array(['The dwash was fresh', 'Thwas was it'], dtype='>> np.strings.join('-', 'osd') - array('o-s-d', dtype='>> import numpy as np + >>> np.strings.join('-', 'osd') # doctest: +SKIP + array('o-s-d', dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) - array(['g-h-c', 'o.s.d'], dtype='>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP + array(['g-h-c', 'o.s.d'], dtype='>> import numpy as np >>> x = np.array("Numpy is nice!") - >>> np.strings.split(x, " ") - array(list(['Numpy', 'is', 'nice!']), dtype=object) + >>> np.strings.split(x, " ") # doctest: +SKIP + array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP - >>> np.strings.split(x, " ", 1) - array(list(['Numpy', 'is nice!']), dtype=object) + >>> np.strings.split(x, " ", 1) # doctest: +SKIP + array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP See Also -------- @@ -1188,7 +1334,7 @@ def split(a, sep=None, maxsplit=None): a, np.object_, 'split', [sep] + _clean_args(maxsplit)) -def rsplit(a, sep=None, maxsplit=None): +def _rsplit(a, sep=None, maxsplit=None): """ For each element in `a`, return a list of the words in the string, using `sep` as the delimiter string. @@ -1200,7 +1346,7 @@ def rsplit(a, sep=None, maxsplit=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype sep : str or unicode, optional If `sep` is not specified or None, any whitespace string @@ -1212,7 +1358,7 @@ def rsplit(a, sep=None, maxsplit=None): Returns ------- out : ndarray - Array of list objects + Array of list objects See Also -------- @@ -1220,10 +1366,12 @@ def rsplit(a, sep=None, maxsplit=None): Examples -------- + >>> import numpy as np >>> a = np.array(['aAaAaA', 'abBABba']) - >>> np.strings.rsplit(a, 'A') - array([list(['a', 'a', 'a', '']), list(['abB', 'Bba'])], dtype=object) - + >>> np.strings.rsplit(a, 'A') # doctest: +SKIP + array([list(['a', 'a', 'a', '']), # doctest: +SKIP + list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP + """ # This will return an array of lists of different sizes, so we # leave it as an object array @@ -1231,7 +1379,7 @@ def rsplit(a, sep=None, maxsplit=None): a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit)) -def splitlines(a, keepends=None): +def _splitlines(a, keepends=None): """ For each element in `a`, return a list of the lines in the element, breaking at line boundaries. @@ -1240,7 +1388,7 @@ def splitlines(a, keepends=None): Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype keepends : bool, optional Line breaks are not included in the resulting list unless @@ -1262,70 +1410,98 @@ def splitlines(a, keepends=None): def partition(a, sep): """ - Partition each element in `a` around `sep`. - - Calls :meth:`str.partition` element-wise. + Partition each element in ``a`` around ``sep``. - For each element in `a`, split the element as the first - occurrence of `sep`, and return 3 strings containing the part + For each element in ``a``, split the element at the first + occurrence of ``sep``, and return a 3-tuple containing the part before the separator, the separator itself, and the part after - the separator. If the separator is not found, return 3 strings - containing the string itself, followed by two empty strings. + the separator. If the separator is not found, the first item of + the tuple will contain the whole string, and the second and third + ones will be the empty string. Parameters ---------- - a : array_like, with `np.bytes_` or `np.str_` dtype + a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype Input array - sep : {str, unicode} - Separator to split each string element in `a`. + sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype + Separator to split each string element in ``a``. Returns ------- - out : ndarray, {str, unicode} - Output array of str or unicode, depending on input type. - The output array will have an extra dimension with 3 - elements per input element. + out : 3-tuple: + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part before the separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + separator + - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the + part after the separator + + See Also + -------- + str.partition Examples -------- + >>> import numpy as np >>> x = np.array(["Numpy is nice!"]) >>> np.strings.partition(x, " ") - array([['Numpy', ' ', 'is nice!']], dtype='>> import numpy as np >>> a = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> np.strings.rpartition(a, 'A') - array([['aAaAa', 'A', ''], - [' a', 'A', ' '], - ['abB', 'A', 'Bba']], dtype='>> import numpy as np >>> a = np.array(['a1b c', '1bca', 'bca1']) >>> table = a[0].maketrans('abc', '123') >>> deletechars = ' ' >>> np.char.translate(a, table, deletechars) array(['112 3', '1231', '2311'], dtype='=3.0.6') error('tests requires Cython >= 3.0.6') endif +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + npy_include_path = run_command(py, [ '-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))' @@ -34,4 +39,5 @@ py.extension_module( '-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION', ], include_directories: [npy_include_path], + cython_args: cython_args, ) diff --git a/numpy/_core/tests/examples/cython/setup.py b/numpy/_core/tests/examples/cython/setup.py index 97b7b4317ffa..1bf027700748 100644 --- a/numpy/_core/tests/examples/cython/setup.py +++ b/numpy/_core/tests/examples/cython/setup.py @@ -3,7 +3,9 @@ for testing. """ +import Cython import numpy as np +from numpy._utils import _pep440 from distutils.core import setup from Cython.Build import cythonize from setuptools.extension import Extension @@ -24,6 +26,12 @@ extensions = [checks] +compiler_directives = {} +if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"): + compiler_directives['freethreading_compatible'] = True + setup( - ext_modules=cythonize(extensions) + ext_modules=cythonize( + extensions, + compiler_directives=compiler_directives) ) diff --git a/numpy/_core/tests/examples/limited_api/limited_api_latest.c b/numpy/_core/tests/examples/limited_api/limited_api_latest.c new file mode 100644 index 000000000000..13668f2f0ebf --- /dev/null +++ b/numpy/_core/tests/examples/limited_api/limited_api_latest.c @@ -0,0 +1,19 @@ +#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000 + # error "Py_LIMITED_API not defined to Python major+minor version" +#endif + +#include +#include +#include + +static PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = "limited_api_latest" +}; + +PyMODINIT_FUNC PyInit_limited_api_latest(void) +{ + import_array(); + import_umath(); + return PyModule_Create(&moduledef); +} diff --git a/numpy/_core/tests/examples/limited_api/meson.build b/numpy/_core/tests/examples/limited_api/meson.build index a6d290304036..65287d8654f5 100644 --- a/numpy/_core/tests/examples/limited_api/meson.build +++ b/numpy/_core/tests/examples/limited_api/meson.build @@ -34,6 +34,16 @@ py.extension_module( limited_api: '3.6', ) +py.extension_module( + 'limited_api_latest', + 'limited_api_latest.c', + c_args: [ + '-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION', + ], + include_directories: [npy_include_path], + limited_api: py.language_version(), +) + py.extension_module( 'limited_api2', 'limited_api2.pyx', diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index 5b9bdb60f1b3..1ac7a49b3610 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -406,6 +406,37 @@ def test_copyto(): # 'dst' must be an array assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4]) + +def test_copyto_cast_safety(): + with pytest.raises(TypeError): + np.copyto(np.arange(3), 3., casting="safe") + + # Can put integer and float scalars safely (and equiv): + np.copyto(np.arange(3), 3, casting="equiv") + np.copyto(np.arange(3.), 3., casting="equiv") + # And also with less precision safely: + np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe") + np.copyto(np.arange(3., dtype="float32"), 3., casting="safe") + + # But not equiv: + with pytest.raises(TypeError): + np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv") + + with pytest.raises(TypeError): + np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv") + + # As a special thing, object is equiv currently: + np.copyto(np.arange(3, dtype=object), 3, casting="equiv") + + # The following raises an overflow error/gives a warning but not + # type error (due to casting), though: + with pytest.raises(OverflowError): + np.copyto(np.arange(3), 2**80, casting="safe") + + with pytest.warns(RuntimeWarning): + np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe") + + def test_copyto_permut(): # test explicit overflow case pad = 500 diff --git a/numpy/_core/tests/test_argparse.py b/numpy/_core/tests/test_argparse.py index cddee72ea04c..ededced3b9fe 100644 --- a/numpy/_core/tests/test_argparse.py +++ b/numpy/_core/tests/test_argparse.py @@ -11,10 +11,29 @@ def func(arg1, /, arg2, *, arg3): return None """ +import threading + import pytest import numpy as np -from numpy._core._multiarray_tests import argparse_example_function as func +from numpy._core._multiarray_tests import ( + argparse_example_function as func, + threaded_argparse_example_function as thread_func, +) +from numpy.testing import IS_WASM + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_safe_argparse_cache(): + b = threading.Barrier(8) + + def call_thread_func(): + b.wait() + thread_func(arg1=3, arg2=None) + + tasks = [threading.Thread(target=call_thread_func) for _ in range(8)] + [t.start() for t in tasks] + [t.join() for t in tasks] def test_invalid_integers(): diff --git a/numpy/_core/tests/test_array_api_info.py b/numpy/_core/tests/test_array_api_info.py new file mode 100644 index 000000000000..154b3837325d --- /dev/null +++ b/numpy/_core/tests/test_array_api_info.py @@ -0,0 +1,112 @@ +import numpy as np +import pytest + +info = np.__array_namespace_info__() + + +def test_capabilities(): + caps = info.capabilities() + assert caps["boolean indexing"] == True + assert caps["data-dependent shapes"] == True + + # This will be added in the 2024.12 release of the array API standard. + + # assert caps["max rank"] == 64 + # np.zeros((1,)*64) + # with pytest.raises(ValueError): + # np.zeros((1,)*65) + + +def test_default_device(): + assert info.default_device() == "cpu" == np.asarray(0).device + + +def test_default_dtypes(): + dtypes = info.default_dtypes() + assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype + assert dtypes["complex floating"] == np.complex128 == \ + np.asarray(0.0j).dtype + assert dtypes["integral"] == np.intp == np.asarray(0).dtype + assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype + + with pytest.raises(ValueError, match="Device not understood"): + info.default_dtypes(device="gpu") + + +def test_dtypes_all(): + dtypes = info.dtypes() + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + "float32": np.float32, + "float64": np.float64, + "complex64": np.complex64, + "complex128": np.complex128, + } + + +dtype_categories = { + "bool": {"bool": np.bool_}, + "signed integer": { + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + }, + "unsigned integer": { + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + }, + "integral": ("signed integer", "unsigned integer"), + "real floating": {"float32": np.float32, "float64": np.float64}, + "complex floating": {"complex64": np.complex64, "complex128": + np.complex128}, + "numeric": ("integral", "real floating", "complex floating"), +} + + +@pytest.mark.parametrize("kind", dtype_categories) +def test_dtypes_kind(kind): + expected = dtype_categories[kind] + if isinstance(expected, tuple): + assert info.dtypes(kind=kind) == info.dtypes(kind=expected) + else: + assert info.dtypes(kind=kind) == expected + + +def test_dtypes_tuple(): + dtypes = info.dtypes(kind=("bool", "integral")) + assert dtypes == { + "bool": np.bool_, + "int8": np.int8, + "int16": np.int16, + "int32": np.int32, + "int64": np.int64, + "uint8": np.uint8, + "uint16": np.uint16, + "uint32": np.uint32, + "uint64": np.uint64, + } + + +def test_dtypes_invalid_kind(): + with pytest.raises(ValueError, match="unsupported kind"): + info.dtypes(kind="invalid") + + +def test_dtypes_invalid_device(): + with pytest.raises(ValueError, match="Device not understood"): + info.dtypes(device="gpu") + + +def test_devices(): + assert info.devices() == ["cpu"] diff --git a/numpy/_core/tests/test_array_coercion.py b/numpy/_core/tests/test_array_coercion.py index 726e8d8252a8..c2172d40d81e 100644 --- a/numpy/_core/tests/test_array_coercion.py +++ b/numpy/_core/tests/test_array_coercion.py @@ -38,7 +38,7 @@ def subclass(a): yield subclass - class _SequenceLike(): + class _SequenceLike: # Older NumPy versions, sometimes cared whether a protocol array was # also _SequenceLike. This shouldn't matter, but keep it for now # for __array__ and not the others. @@ -54,7 +54,9 @@ def __init__(self, a): self.a = a def __array__(self, dtype=None, copy=None): - return self.a + if dtype is None: + return self.a + return self.a.astype(dtype) yield param(ArrayDunder, id="__array__") @@ -760,6 +762,17 @@ def __getitem__(self): with pytest.raises(error): np.array(BadSequence()) + def test_array_interface_descr_optional(self): + # The descr should be optional regresion test for gh-27249 + arr = np.ones(10, dtype="V10") + iface = arr.__array_interface__ + iface.pop("descr") + + class MyClass: + __array_interface__ = iface + + assert_array_equal(np.asarray(MyClass), arr) + class TestAsArray: """Test expected behaviors of ``asarray``.""" diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index f8e0dfad64c5..ae719568a4b2 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -1,7 +1,7 @@ import sys import pytest import numpy as np -from numpy.testing import extbuild, IS_WASM +from numpy.testing import extbuild, IS_WASM, IS_EDITABLE @pytest.fixture @@ -9,11 +9,12 @@ def get_module(tmp_path): """ Some codes to generate data and manage temporary buffers use when sharing with numpy via the array interface protocol. """ - - if not sys.platform.startswith('linux'): + if sys.platform.startswith('cygwin'): pytest.skip('link fails on cygwin') if IS_WASM: pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") prologue = ''' #include diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index d9caced3c1bc..e2305c974147 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -7,8 +7,9 @@ import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT, - assert_raises_regex, + assert_raises_regex, IS_WASM ) +from numpy.testing._private.utils import run_threaded from numpy._core.arrayprint import _typelessdata import textwrap @@ -626,8 +627,9 @@ def teardown_method(self): def test_basic(self): x = np.array([1.5, 0, 1.234567890]) assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])") - np.set_printoptions(precision=4) + ret = np.set_printoptions(precision=4) assert_equal(repr(x), "array([1.5 , 0. , 1.2346])") + assert ret is None def test_precision_zero(self): np.set_printoptions(precision=0) @@ -667,6 +669,17 @@ def test_formatter_reset(self): np.set_printoptions(formatter={'float_kind':None}) assert_equal(repr(x), "array([0., 1., 2.])") + def test_override_repr(self): + x = np.arange(3) + np.set_printoptions(override_repr=lambda x: "FOO") + assert_equal(repr(x), "FOO") + np.set_printoptions(override_repr=None) + assert_equal(repr(x), "array([0, 1, 2])") + + with np.printoptions(override_repr=lambda x: "BAR"): + assert_equal(repr(x), "BAR") + assert_equal(repr(x), "array([0, 1, 2])") + def test_0d_arrays(self): assert_equal(str(np.array('café', 'f4')]) assert str(scalar) == "(1.0, 2.0)" + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio") +@pytest.mark.skipif(sys.version_info < (3, 11), + reason="asyncio.barrier was added in Python 3.11") +def test_printoptions_asyncio_safe(): + asyncio = pytest.importorskip("asyncio") + + b = asyncio.Barrier(2) + + async def legacy_113(): + np.set_printoptions(legacy='1.13', precision=12) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=34, legacy='1.21'): + po = np.get_printoptions() + assert po['legacy'] == '1.21' + assert po['precision'] == 12 + assert po['linewidth'] == 34 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.13' + assert po['precision'] == 12 + + async def legacy_125(): + np.set_printoptions(legacy='1.25', precision=7) + await b.wait() + po = np.get_printoptions() + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + orig_linewidth = po['linewidth'] + with np.printoptions(linewidth=6, legacy='1.13'): + po = np.get_printoptions() + assert po['legacy'] == '1.13' + assert po['precision'] == 7 + assert po['linewidth'] == 6 + po = np.get_printoptions() + assert po['linewidth'] == orig_linewidth + assert po['legacy'] == '1.25' + assert po['precision'] == 7 + + async def main(): + await asyncio.gather(legacy_125(), legacy_125()) + + loop = asyncio.new_event_loop() + asyncio.run(main()) + loop.close() + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads") +def test_multithreaded_array_printing(): + # the dragon4 implementation uses a static scratch space for performance + # reasons this test makes sure it is set up in a thread-safe manner + + run_threaded(TestPrintOptions().test_floatmode, 500) diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 9649be2fcc67..35d81005cfc1 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -85,10 +85,7 @@ def cpu_have(self, feature_name): map_names = self.features_map.get(feature_name, feature_name) if isinstance(map_names, str): return map_names in self.features_flags - for f in map_names: - if f in self.features_flags: - return True - return False + return any(f in self.features_flags for f in map_names) def load_flags_cpuinfo(self, magic_key): self.features_flags = self.get_cpuinfo_item(magic_key) @@ -311,8 +308,8 @@ def test_impossible_feature_enable(self): err_type = "RuntimeError" self._expect_error(msg, err_type) - # Ensure that only the bad feature gets reported - feats = f"{bad_feature}, {self.BASELINE_FEAT}" + # Ensure that it fails even when providing garbage in addition + feats = f"{bad_feature}, Foobar" self.env['NPY_ENABLE_CPU_FEATURES'] = feats msg = ( f"You cannot enable CPU features \\({bad_feature}\\), since they " @@ -320,6 +317,16 @@ def test_impossible_feature_enable(self): ) self._expect_error(msg, err_type) + if self.BASELINE_FEAT is not None: + # Ensure that only the bad feature gets reported + feats = f"{bad_feature}, {self.BASELINE_FEAT}" + self.env['NPY_ENABLE_CPU_FEATURES'] = feats + msg = ( + f"You cannot enable CPU features \\({bad_feature}\\), since " + "they are not supported by your machine." + ) + self._expect_error(msg, err_type) + is_linux = sys.platform.startswith('linux') is_cygwin = sys.platform.startswith('cygwin') machine = platform.machine() diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 3d9ac2927a33..71c1a457761b 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -7,7 +7,7 @@ import pytest import numpy as np -from numpy.testing import assert_array_equal, IS_WASM +from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE # This import is copied from random.tests.test_extending try: @@ -27,6 +27,13 @@ pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + @pytest.fixture(scope='module') def install_temp(tmpdir_factory): # Based in part on test_cython from random.tests.test_extending @@ -61,9 +68,13 @@ def install_temp(tmpdir_factory): ) try: subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) - except subprocess.CalledProcessError as p: - print(f"{p.stdout=}") - print(f"{p.stderr=}") + except subprocess.CalledProcessError: + print("----------------") + print("meson build failed when doing") + print(f"'meson setup --native-file {native_file} {srcdir}'") + print(f"'meson compile -vv'") + print(f"in {build_dir}") + print("----------------") raise sys.path.append(str(build_dir)) @@ -142,6 +153,13 @@ def test_default_int(install_temp): assert checks.get_default_integer() is np.dtype(int) + +def test_ravel_axis(install_temp): + import checks + + assert checks.get_ravel_axis() == np.iinfo("intc").min + + def test_convert_datetime64_to_datetimestruct(install_temp): # GH#21199 import checks @@ -274,3 +292,11 @@ def test_fillwithbytes(install_temp): arr = checks.compile_fillwithbyte() assert_array_equal(arr, np.ones((1, 2))) + + +def test_complex(install_temp): + from checks import inc2_cfloat_struct + + arr = np.array([0, 10+10j], dtype="F") + inc2_cfloat_struct(arr) + assert arr[1] == (12 + 12j) diff --git a/numpy/_core/tests/test_defchararray.py b/numpy/_core/tests/test_defchararray.py index a7716ab7baf0..6b688ab443a4 100644 --- a/numpy/_core/tests/test_defchararray.py +++ b/numpy/_core/tests/test_defchararray.py @@ -738,6 +738,16 @@ def test_slice(self): assert_(arr[0, 0] == b'abc') + @pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'], + [b'retro', b' ', b'encabulator']]) + def test_getitem_length_zero_item(self, data): + # Regression test for gh-26375. + a = np.char.array(data) + # a.dtype.type() will be an empty string or bytes instance. + # The equality test will fail if a[1] has the wrong type + # or does not have length 0. + assert_equal(a[1], a.dtype.type()) + class TestMethodsEmptyArray: def setup_method(self): diff --git a/numpy/_core/tests/test_deprecations.py b/numpy/_core/tests/test_deprecations.py index f0d4d533cd92..33431faef684 100644 --- a/numpy/_core/tests/test_deprecations.py +++ b/numpy/_core/tests/test_deprecations.py @@ -14,7 +14,7 @@ import numpy as np from numpy.testing import ( assert_raises, assert_warns, assert_, assert_array_equal, SkipTest, - KnownFailureException, break_cycles, + KnownFailureException, break_cycles, temppath ) from numpy._core._multiarray_tests import fromstring_null_term_c_api @@ -217,6 +217,11 @@ class TestBincount(_DeprecationTestCase): def test_bincount_minlength(self): self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None)) + # 2024-07-29, 2.1.0 + @pytest.mark.parametrize('badlist', [[0.5, 1.2, 1.5], + ['0', '1', '1']]) + def test_bincount_bad_list(self, badlist): + self.assert_deprecated(lambda: np.bincount(badlist)) class TestGeneratorSum(_DeprecationTestCase): @@ -283,13 +288,6 @@ def test_deprecate_unparsable_string(self, invalid_str): assert_array_equal(res, x) -class TestNonZero(_DeprecationTestCase): - # 2019-05-26, 1.17.0 - def test_zerod(self): - self.assert_deprecated(lambda: np.nonzero(np.array(0))) - self.assert_deprecated(lambda: np.nonzero(np.array(1))) - - class TestToString(_DeprecationTestCase): # 2020-03-06 1.19.0 message = re.escape("tostring() is deprecated. Use tobytes() instead.") @@ -677,18 +675,22 @@ def test_lib_functions_deprecation_call(self): class TestDeprecatedDTypeAliases(_DeprecationTestCase): - @staticmethod - def _check_for_warning(func): + def _check_for_warning(self, func): with warnings.catch_warnings(record=True) as caught_warnings: func() assert len(caught_warnings) == 1 w = caught_warnings[0] assert w.category is DeprecationWarning - assert "alias `a` was removed in NumPy 2.0" in str(w.message) + assert "alias 'a' was deprecated in NumPy 2.0" in str(w.message) def test_a_dtype_alias(self): - self._check_for_warning(lambda: np.dtype("a")) - self._check_for_warning(lambda: np.dtype("a10")) + for dtype in ["a", "a10"]: + f = lambda: np.dtype(dtype) + self._check_for_warning(f) + self.assert_deprecated(f) + f = lambda: np.array(["hello", "world"]).astype("a10") + self._check_for_warning(f) + self.assert_deprecated(f) class TestDeprecatedArrayWrap(_DeprecationTestCase): @@ -723,3 +725,26 @@ class TestDeprecatedDTypeParenthesizedRepeatCount(_DeprecationTestCase): @pytest.mark.parametrize("string", ["(2)i,", "(3)3S,", "f,(2)f"]) def test_parenthesized_repeat_count(self, string): self.assert_deprecated(np.dtype, args=(string,)) + + +class TestDeprecatedSaveFixImports(_DeprecationTestCase): + # Deprecated in Numpy 2.1, 2024-05 + message = "The 'fix_imports' flag is deprecated and has no effect." + + def test_deprecated(self): + with temppath(suffix='.npy') as path: + sample_args = (path, np.array(np.zeros((1024, 10)))) + self.assert_not_deprecated(np.save, args=sample_args) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'fix_imports': False}) + for allow_pickle in [True, False]: + self.assert_not_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': True}) + self.assert_deprecated(np.save, args=sample_args, + kwargs={'allow_pickle': allow_pickle, + 'fix_imports': False}) diff --git a/numpy/_core/tests/test_dlpack.py b/numpy/_core/tests/test_dlpack.py index da648fd36afb..d9205912124e 100644 --- a/numpy/_core/tests/test_dlpack.py +++ b/numpy/_core/tests/test_dlpack.py @@ -5,11 +5,23 @@ from numpy.testing import assert_array_equal, IS_PYPY +def new_and_old_dlpack(): + yield np.arange(5) + + class OldDLPack(np.ndarray): + # Support only the "old" version + def __dlpack__(self, stream=None): + return super().__dlpack__(stream=None) + + yield np.arange(5).view(OldDLPack) + + class TestDLPack: @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - def test_dunder_dlpack_refcount(self): + @pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)]) + def test_dunder_dlpack_refcount(self, max_version): x = np.arange(5) - y = x.__dlpack__() + y = x.__dlpack__(max_version=max_version) assert sys.getrefcount(x) == 3 del y assert sys.getrefcount(x) == 2 @@ -21,6 +33,18 @@ def test_dunder_dlpack_stream(self): with pytest.raises(RuntimeError): x.__dlpack__(stream=1) + def test_dunder_dlpack_copy(self): + # Checks the argument parsing of __dlpack__ explicitly. + # Honoring the flag is tested in the from_dlpack round-tripping test. + x = np.arange(5) + x.__dlpack__(copy=True) + x.__dlpack__(copy=None) + x.__dlpack__(copy=False) + + with pytest.raises(ValueError): + # NOTE: The copy converter should be stricter, but not just here. + x.__dlpack__(copy=np.array([1, 2, 3])) + def test_strides_not_multiple_of_itemsize(self): dt = np.dtype([('int', np.int32), ('char', np.int8)]) y = np.zeros((5,), dtype=dt) @@ -30,12 +54,13 @@ def test_strides_not_multiple_of_itemsize(self): np.from_dlpack(z) @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.") - def test_from_dlpack_refcount(self): - x = np.arange(5) - y = np.from_dlpack(x) - assert sys.getrefcount(x) == 3 + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_from_dlpack_refcount(self, arr): + arr = arr.copy() + y = np.from_dlpack(arr) + assert sys.getrefcount(arr) == 3 del y - assert sys.getrefcount(x) == 2 + assert sys.getrefcount(arr) == 2 @pytest.mark.parametrize("dtype", [ np.bool, @@ -44,8 +69,9 @@ def test_from_dlpack_refcount(self): np.float16, np.float32, np.float64, np.complex64, np.complex128 ]) - def test_dtype_passthrough(self, dtype): - x = np.arange(5).astype(dtype) + @pytest.mark.parametrize("arr", new_and_old_dlpack()) + def test_dtype_passthrough(self, arr, dtype): + x = arr.astype(dtype) y = np.from_dlpack(x) assert y.dtype == x.dtype @@ -97,21 +123,27 @@ def test_dlpack_device(self): z = y[::2] assert z.__dlpack_device__() == (1, 0) - def dlpack_deleter_exception(self): + def dlpack_deleter_exception(self, max_version): x = np.arange(5) - _ = x.__dlpack__() + _ = x.__dlpack__(max_version=max_version) raise RuntimeError - def test_dlpack_destructor_exception(self): + @pytest.mark.parametrize("max_version", [None, (1, 0)]) + def test_dlpack_destructor_exception(self, max_version): with pytest.raises(RuntimeError): - self.dlpack_deleter_exception() + self.dlpack_deleter_exception(max_version=max_version) def test_readonly(self): x = np.arange(5) x.flags.writeable = False + # Raises without max_version with pytest.raises(BufferError): x.__dlpack__() + # But works fine if we try with version + y = np.from_dlpack(x) + assert not y.flags.writeable + def test_ndim0(self): x = np.array(1.0) y = np.from_dlpack(x) @@ -122,3 +154,25 @@ def test_size1dims_arrays(self): buffer=np.ones(1000, dtype=np.uint8), order='F') y = np.from_dlpack(x) assert_array_equal(x, y) + + def test_copy(self): + x = np.arange(5) + + y = np.from_dlpack(x) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=False) + assert np.may_share_memory(x, y) + y = np.from_dlpack(x, copy=True) + assert not np.may_share_memory(x, y) + + def test_device(self): + x = np.arange(5) + # requesting (1, 0), i.e. CPU device works in both calls: + x.__dlpack__(dl_device=(1, 0)) + np.from_dlpack(x, device="cpu") + np.from_dlpack(x, device=None) + + with pytest.raises(ValueError): + x.__dlpack__(dl_device=(10, 0)) + with pytest.raises(ValueError): + np.from_dlpack(x, device="gpu") diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 664f4e028151..869183956f78 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -96,6 +96,11 @@ def test_invalid_types(self): assert_raises(TypeError, np.dtype, 'q8') assert_raises(TypeError, np.dtype, 'Q8') + # Make sure negative-sized dtype raises an error + assert_raises(TypeError, np.dtype, 'S-1') + assert_raises(TypeError, np.dtype, 'U-1') + assert_raises(TypeError, np.dtype, 'V-1') + def test_richcompare_invalid_dtype_equality(self): # Make sure objects that cannot be converted to valid # dtypes results in False/True when compared to valid dtypes. @@ -231,6 +236,22 @@ def test_create_invalid_string_errors(self): with pytest.raises(ValueError): type(np.dtype("U"))(-1) + # OverflowError on 32 bit + with pytest.raises((TypeError, OverflowError)): + # see gh-26556 + type(np.dtype("S"))(2**61) + + with pytest.raises(TypeError): + np.dtype("S1234hello") + + def test_leading_zero_parsing(self): + dt1 = np.dtype('S010') + dt2 = np.dtype('S10') + + assert dt1 == dt2 + assert repr(dt1) == "dtype('S10')" + assert dt1.itemsize == 10 + class TestRecord: def test_equivalent_record(self): @@ -1493,7 +1514,7 @@ def test_python_integer_promotion(self, val): @np._no_nep50_warning() def test_float_int_pyscalar_promote_rational( self, weak_promotion, other, expected): - # Note that rationals are a bit akward as they promote with float64 + # Note that rationals are a bit awkward as they promote with float64 # or default ints, but not float16 or uint8/int8 (which looks # inconsistent here). The new promotion fixes this (partially?) if not weak_promotion and type(other) == float: diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 55f2546185e7..0a97693f73b0 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -10,12 +10,6 @@ assert_raises, suppress_warnings, assert_raises_regex, assert_allclose ) -try: - COMPILERS = np.show_config(mode="dicts")["Compilers"] - USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl" -except TypeError: - USING_CLANG_CL = False - # Setup for optimize einsum chars = 'abcdefghij' sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3]) @@ -621,23 +615,9 @@ def check_einsum_sums(self, dtype, do_opt=False): [2.]) # contig_stride0_outstride0_two def test_einsum_sums_int8(self): - if ( - (sys.platform == 'darwin' and platform.machine() == 'x86_64') - or - USING_CLANG_CL - ): - pytest.xfail('Fails on macOS x86-64 and when using clang-cl ' - 'with Meson, see gh-23838') self.check_einsum_sums('i1') def test_einsum_sums_uint8(self): - if ( - (sys.platform == 'darwin' and platform.machine() == 'x86_64') - or - USING_CLANG_CL - ): - pytest.xfail('Fails on macOS x86-64 and when using clang-cl ' - 'with Meson, see gh-23838') self.check_einsum_sums('u1') def test_einsum_sums_int16(self): diff --git a/numpy/_core/tests/test_function_base.py b/numpy/_core/tests/test_function_base.py index bebc7c52e9df..333943212646 100644 --- a/numpy/_core/tests/test_function_base.py +++ b/numpy/_core/tests/test_function_base.py @@ -223,7 +223,7 @@ def test_complex(self): assert_allclose(y, [-5, 3j]) def test_complex_shortest_path(self): - # test the shortest logorithmic spiral is used, see gh-25644 + # test the shortest logarithmic spiral is used, see gh-25644 x = 1.2 + 3.4j y = np.exp(1j*(np.pi-.1)) * x z = np.geomspace(x, y, 5) diff --git a/numpy/_core/tests/test_hashtable.py b/numpy/_core/tests/test_hashtable.py index e75cfceea412..41da06be3f2b 100644 --- a/numpy/_core/tests/test_hashtable.py +++ b/numpy/_core/tests/test_hashtable.py @@ -24,7 +24,12 @@ def test_identity_hashtable(key_length, length): res = identityhash_tester(key_length, keys_vals, replace=True) assert res is expected - # check that ensuring one duplicate definitely raises: - keys_vals.insert(0, keys_vals[-2]) + if length == 1: + return + + # add a new item with a key that is already used and a new value, this + # should error if replace is False, see gh-26690 + new_key = (keys_vals[1][0], object()) + keys_vals[0] = new_key with pytest.raises(RuntimeError): identityhash_tester(key_length, keys_vals) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index bea1c1017fb2..686caf9c7822 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -133,6 +133,28 @@ def test_empty_fancy_index(self): b = np.array([]) assert_raises(IndexError, a.__getitem__, b) + def test_gh_26542(self): + a = np.array([0, 1, 2]) + idx = np.array([2, 1, 0]) + a[idx] = a + expected = np.array([2, 1, 0]) + assert_equal(a, expected) + + def test_gh_26542_2d(self): + a = np.array([[0, 1, 2]]) + idx_row = np.zeros(3, dtype=int) + idx_col = np.array([2, 1, 0]) + a[idx_row, idx_col] = a + expected = np.array([[2, 1, 0]]) + assert_equal(a, expected) + + def test_gh_26542_index_overlap(self): + arr = np.arange(100) + expected_vals = np.copy(arr[:-10]) + arr[10:] = arr[:-10] + actual_vals = arr[10:] + assert_equal(actual_vals, expected_vals) + def test_ellipsis_index(self): a = np.array([[1, 2, 3], [4, 5, 6], @@ -961,7 +983,7 @@ def _get_multi_index(self, arr, indices): elif indx is None: # this is like taking a slice with one element from a new axis: indices.append(['n', np.array([0], dtype=np.intp)]) - arr = arr.reshape((arr.shape[:ax] + (1,) + arr.shape[ax:])) + arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:]) continue if isinstance(indx, np.ndarray) and indx.dtype == bool: if indx.shape != arr.shape[ax:ax+indx.ndim]: @@ -976,9 +998,9 @@ def _get_multi_index(self, arr, indices): flat_indx = np.array([0]*indx.sum(), dtype=np.intp) # concatenate axis into a single one: if indx.ndim != 0: - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + (np.prod(arr.shape[ax:ax+indx.ndim]),) - + arr.shape[ax+indx.ndim:])) + + arr.shape[ax+indx.ndim:]) indx = flat_indx else: # This could be changed, a 0-d boolean index can @@ -1045,9 +1067,9 @@ def _get_multi_index(self, arr, indices): # First of all, reshape arr to combine fancy axes into one: orig_shape = arr.shape orig_slice = orig_shape[ax:ax + len(indx[1:])] - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + (np.prod(orig_slice).astype(int),) - + arr.shape[ax + len(indx[1:]):])) + + arr.shape[ax + len(indx[1:]):]) # Check if broadcasting works res = np.broadcast(*indx[1:]) @@ -1081,9 +1103,9 @@ def _get_multi_index(self, arr, indices): raise ValueError arr = arr.take(mi.ravel(), axis=ax) try: - arr = arr.reshape((arr.shape[:ax] + arr = arr.reshape(arr.shape[:ax] + mi.shape - + arr.shape[ax+1:])) + + arr.shape[ax+1:]) except ValueError: # too many dimensions, probably raise IndexError diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index c1b2cfcbaff9..5a23b49171a0 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -5,7 +5,7 @@ import sysconfig import pytest -from numpy.testing import IS_WASM, IS_PYPY +from numpy.testing import IS_WASM, IS_PYPY, NOGIL_BUILD, IS_EDITABLE # This import is copied from random.tests.test_extending try: @@ -25,6 +25,13 @@ pytestmark = pytest.mark.skipif(cython is None, reason="requires cython") +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) + + @pytest.fixture(scope='module') def install_temp(tmpdir_factory): # Based in part on test_cython from random.tests.test_extending @@ -40,16 +47,18 @@ def install_temp(tmpdir_factory): pytest.skip("No usable 'meson' found") if sys.platform == "win32": subprocess.check_call(["meson", "setup", + "--werror", "--buildtype=release", "--vsenv", str(srcdir)], cwd=build_dir, ) else: - subprocess.check_call(["meson", "setup", str(srcdir)], + subprocess.check_call(["meson", "setup", "--werror", str(srcdir)], cwd=build_dir ) try: - subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir) + subprocess.check_call( + ["meson", "compile", "-vv"], cwd=build_dir) except subprocess.CalledProcessError as p: print(f"{p.stdout=}") print(f"{p.stderr=}") @@ -67,11 +76,16 @@ def install_temp(tmpdir_factory): "and Py_REF_DEBUG" ), ) +@pytest.mark.xfail( + NOGIL_BUILD, + reason="Py_GIL_DISABLED builds do not currently support the limited API", +) @pytest.mark.skipif(IS_PYPY, reason="no support for limited API in PyPy") def test_limited_api(install_temp): """Test building a third-party C extension with the limited API and building a cython extension with the limited API """ - import limited_api1 - import limited_api2 + import limited_api1 # Earliest (3.6) + import limited_api_latest # Latest version (current Python) + import limited_api2 # cython diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 9cbbedeca0f5..32459ab4d999 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -8,15 +8,10 @@ import pytest import numpy as np -from numpy.testing import extbuild, assert_warns, IS_WASM +from numpy.testing import extbuild, assert_warns, IS_WASM, IS_EDITABLE from numpy._core.multiarray import get_handler_name -# FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on -# Python 3.12 and up. It's an internal test utility, so for now we just skip -# these tests. - - @pytest.fixture def get_module(tmp_path): """ Add a memory policy that returns a false pointer 64 bytes into the @@ -28,6 +23,9 @@ def get_module(tmp_path): pytest.skip('link fails on cygwin') if IS_WASM: pytest.skip("Can't build module inside Wasm") + if IS_EDITABLE: + pytest.skip("Can't build module for editable install") + functions = [ ("get_default_policy", "METH_NOARGS", """ Py_INCREF(PyDataMem_DefaultHandler); @@ -43,6 +41,16 @@ def get_module(tmp_path): Py_DECREF(secret_data); return old; """), + ("set_wrong_capsule_name_data_policy", "METH_NOARGS", """ + PyObject *wrong_name_capsule = + PyCapsule_New(&secret_data_handler, "not_mem_handler", NULL); + if (wrong_name_capsule == NULL) { + return NULL; + } + PyObject *old = PyDataMem_SetHandler(wrong_name_capsule); + Py_DECREF(wrong_name_capsule); + return old; + """), ("set_old_policy", "METH_O", """ PyObject *old; if (args != NULL && PyCapsule_CheckExact(args)) { @@ -221,7 +229,6 @@ def get_module(tmp_path): more_init=more_init) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_set_policy(get_module): get_handler_name = np._core.multiarray.get_handler_name @@ -249,8 +256,11 @@ def test_set_policy(get_module): get_module.set_old_policy(orig_policy) assert get_handler_name() == orig_policy_name + with pytest.raises(ValueError, + match="Capsule must be named 'mem_handler'"): + get_module.set_wrong_capsule_name_data_policy() + -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_default_policy_singleton(get_module): get_handler_name = np._core.multiarray.get_handler_name @@ -272,7 +282,6 @@ def test_default_policy_singleton(get_module): assert def_policy_1 is def_policy_2 is get_module.get_default_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_policy_propagation(get_module): # The memory policy goes hand-in-hand with flags.owndata @@ -331,7 +340,6 @@ async def async_test_context_locality(get_module): assert np._core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_context_locality(get_module): if (sys.implementation.name == 'pypy' and sys.pypy_version_info[:3] < (7, 3, 6)): @@ -353,7 +361,6 @@ def concurrent_thread2(get_module, event): get_module.set_secret_data_policy() -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_thread_locality(get_module): orig_policy_name = np._core.multiarray.get_handler_name() @@ -372,7 +379,6 @@ def test_thread_locality(get_module): assert np._core.multiarray.get_handler_name() == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.skip(reason="too slow, see gh-23975") def test_new_policy(get_module): a = np.arange(10) @@ -403,7 +409,6 @@ def test_new_policy(get_module): assert np._core.multiarray.get_handler_name(c) == orig_policy_name -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") @pytest.mark.xfail(sys.implementation.name == "pypy", reason=("bad interaction between getenv and " "os.environ inside pytest")) @@ -437,7 +442,6 @@ def test_switch_owner(get_module, policy): np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval) -@pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils") def test_owner_is_base(get_module): a = get_module.get_array_with_base() with pytest.warns(UserWarning, match='warn_on_free'): diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 4a75d96fc06e..2f7e3c574c79 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -30,7 +30,7 @@ assert_array_equal, assert_raises_regex, assert_array_almost_equal, assert_allclose, IS_PYPY, IS_WASM, IS_PYSTON, HAS_REFCOUNT, assert_array_less, runstring, temppath, suppress_warnings, break_cycles, - _SUPPORTS_SVE, assert_array_compare, + check_support_sve, assert_array_compare, ) from numpy.testing._private.utils import requires_memory, _no_tracing from numpy._core.tests._locales import CommaDecimalPointLocale @@ -234,7 +234,7 @@ def test_readonly_flag_protocols(self, flag, flag_value, writeable): a = np.arange(10) setattr(a.flags, flag, flag_value) - class MyArr(): + class MyArr: __array_struct__ = a.__array_struct__ assert memoryview(a).readonly is not writeable @@ -266,6 +266,17 @@ def test_void_align(self): a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")])) assert_(a.flags.aligned) + @pytest.mark.parametrize("row_size", [5, 1 << 16]) + @pytest.mark.parametrize("row_count", [1, 5]) + @pytest.mark.parametrize("ndmin", [0, 1, 2]) + def test_xcontiguous_load_txt(self, row_size, row_count, ndmin): + s = io.StringIO('\n'.join(['1.0 ' * row_size] * row_count)) + a = np.loadtxt(s, ndmin=ndmin) + + assert a.flags.c_contiguous + x = [i for i in a.shape if i != 1] + assert a.flags.f_contiguous == (len(x) <= 1) + class TestHash: # see #3793 @@ -425,6 +436,18 @@ def test_fill_readonly(self): with pytest.raises(ValueError, match=".*read-only"): a.fill(0) + def test_fill_subarrays(self): + # NOTE: + # This is also a regression test for a crash with PYTHONMALLOC=debug + + dtype = np.dtype("2= 100).all() + assert (res[x >= 100] == x[x >= 100]).all() def test_record_array(self): rec = np.array([(-5, 2.0, 3.0), (5.0, 4.0, 3.0)], @@ -6448,11 +6511,11 @@ def test_std_where(self): [True], [False]]) _cases = [ - (0, True, 7.07106781*np.ones((5))), - (1, True, 1.41421356*np.ones((5))), + (0, True, 7.07106781*np.ones(5)), + (1, True, 1.41421356*np.ones(5)), (0, whf, np.array([4.0824829 , 8.16496581, 5., 7.39509973, 8.49836586])), - (0, whp, 2.5*np.ones((5))) + (0, whp, 2.5*np.ones(5)) ] for _ax, _wh, _res in _cases: assert_allclose(a.std(axis=_ax, where=_wh), _res) @@ -6854,7 +6917,7 @@ def test_huge_vectordot(self, dtype): def test_dtype_discovery_fails(self): # See gh-14247, error checking was missing for failed dtype discovery - class BadObject(object): + class BadObject: def __array__(self, dtype=None, copy=None): raise TypeError("just this tiny mint leaf") @@ -7215,7 +7278,7 @@ def test_matmul_empty(self): def test_matmul_exception_multiply(self): # test that matmul fails if `__mul__` is missing - class add_not_multiply(): + class add_not_multiply: def __add__(self, other): return self a = np.full((3,3), add_not_multiply()) @@ -7224,7 +7287,7 @@ def __add__(self, other): def test_matmul_exception_add(self): # test that matmul fails if `__add__` is missing - class multiply_not_add(): + class multiply_not_add: def __mul__(self, other): return self a = np.full((3,3), multiply_not_add()) @@ -8324,7 +8387,7 @@ def test_no_suboffsets(self): np.frombuffer(buffer) -class TestArrayCreationCopyArgument(object): +class TestArrayCreationCopyArgument: class RaiseOnBool: @@ -8440,10 +8503,9 @@ def __array__(self, dtype=None, copy=None): for copy in self.true_vals: res = np.array(arr, copy=copy) assert_array_equal(res, base_arr) - # An additional copy is currently forced by numpy in this case, - # you could argue, numpy does not trust the ArrayLike. This - # may be open for change: - assert res is not base_arr + # An additional copy is no longer forced by NumPy in this case. + # NumPy trusts the ArrayLike made a copy: + assert res is base_arr for copy in self.if_needed_vals + self.false_vals: res = np.array(arr, copy=copy) @@ -8476,9 +8538,74 @@ def __array__(self, dtype=None): assert_array_equal(arr, base_arr) assert arr is base_arr - with pytest.warns(UserWarning, match=("should implement 'dtype' " - "and 'copy' keywords")): - np.array(a, copy=False) + # As of NumPy 2.1, explicitly passing copy=True does trigger passing + # it to __array__ (deprecation warning is triggered). + with pytest.warns(DeprecationWarning, + match="__array__.*must implement.*'copy'"): + arr = np.array(a, copy=True) + assert_array_equal(arr, base_arr) + assert arr is not base_arr + + # And passing copy=False gives a deprecation warning, but also raises + # an error: + with pytest.warns(DeprecationWarning, match="__array__.*'copy'"): + with pytest.raises(ValueError, + match=r"Unable to avoid copy(.|\n)*numpy_2_0_migration_guide.html"): + np.array(a, copy=False) + + def test___array__copy_once(self): + size = 100 + base_arr = np.zeros((size, size)) + copy_arr = np.zeros((size, size)) + + class ArrayRandom: + def __init__(self): + self.true_passed = False + + def __array__(self, dtype=None, copy=None): + if copy: + self.true_passed = True + return copy_arr + else: + return base_arr + + arr_random = ArrayRandom() + first_copy = np.array(arr_random, copy=True) + assert arr_random.true_passed + assert first_copy is copy_arr + + arr_random = ArrayRandom() + no_copy = np.array(arr_random, copy=False) + assert not arr_random.true_passed + assert no_copy is base_arr + + arr_random = ArrayRandom() + _ = np.array([arr_random], copy=True) + assert not arr_random.true_passed + + arr_random = ArrayRandom() + second_copy = np.array(arr_random, copy=True, order="F") + assert arr_random.true_passed + assert second_copy is not copy_arr + + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test__array__reference_leak(self): + class NotAnArray: + def __array__(self, dtype=None, copy=None): + raise NotImplementedError() + + x = NotAnArray() + + refcount = sys.getrefcount(x) + + try: + np.array(x) + except NotImplementedError: + pass + + gc.collect() + + assert refcount == sys.getrefcount(x) @pytest.mark.parametrize( "arr", [np.ones(()), np.arange(81).reshape((9, 9))]) @@ -8581,7 +8708,7 @@ def test_multiarray_flags_not_writable_attribute_deletion(self): assert_raises(AttributeError, delattr, a, s) -class TestArrayInterface(): +class TestArrayInterface: class Foo: def __init__(self, value): self.value = value @@ -9047,6 +9174,12 @@ def test_resize(self): d.resize(150) assert_(old < sys.getsizeof(d)) + @pytest.mark.parametrize("dtype", ["u4,f4", "u4,O"]) + def test_resize_structured(self, dtype): + a = np.array([(0, 0.0) for i in range(5)], dtype=dtype) + a.resize(1000) + assert_array_equal(a, np.zeros(1000, dtype=dtype)) + def test_error(self): d = np.ones(100) assert_raises(TypeError, d.__sizeof__, "a") @@ -9974,7 +10107,7 @@ def test_non_c_contiguous(self): assert_array_equal(x.view(' rc2_a) - assert_(sys.getrefcount(dt) > rc2_dt) + if sys.version_info < (3, 13): + # np.dtype('f4') is immortal after Python 3.13 + assert_(sys.getrefcount(dt) > rc2_dt) it = None assert_equal(sys.getrefcount(a), rc2_a) assert_equal(sys.getrefcount(dt), rc2_dt) @@ -3217,8 +3218,6 @@ def test_warn_noclose(): assert len(sup.log) == 1 -@pytest.mark.skipif(sys.version_info[:2] == (3, 9) and sys.platform == "win32", - reason="Errors with Python 3.9 on Windows") @pytest.mark.parametrize(["in_dtype", "buf_dtype"], [("i", "O"), ("O", "i"), # most simple cases ("i,O", "O,O"), # structured partially only copying O diff --git a/numpy/_core/tests/test_nep50_promotions.py b/numpy/_core/tests/test_nep50_promotions.py index ce23be9c1dcb..ab800cb5b959 100644 --- a/numpy/_core/tests/test_nep50_promotions.py +++ b/numpy/_core/tests/test_nep50_promotions.py @@ -5,6 +5,8 @@ """ import operator +import threading +import warnings import numpy as np @@ -340,3 +342,26 @@ def test_oob_creation(sctype, create): assert create(sctype, iinfo.min) == iinfo.min assert create(sctype, iinfo.max) == iinfo.max + + +@pytest.mark.skipif(IS_WASM, reason="wasm doesn't have support for threads") +def test_thread_local_promotion_state(): + b = threading.Barrier(2) + + def legacy_no_warn(): + np._set_promotion_state("legacy") + b.wait() + assert np._get_promotion_state() == "legacy" + + def weak_warn(): + np._set_promotion_state("weak") + b.wait() + assert np._get_promotion_state() == "weak" + + task1 = threading.Thread(target=legacy_no_warn) + task2 = threading.Thread(target=weak_warn) + + task1.start() + task2.start() + task1.join() + task2.join() diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index 3acbb20a1619..ae80aaddd4d7 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -8,9 +8,7 @@ import numpy as np from numpy._core import umath, sctypes -from numpy._core._exceptions import _ArrayMemoryError from numpy._core.numerictypes import obj2sctype -from numpy._core.arrayprint import set_string_function from numpy.exceptions import AxisError from numpy.random import rand, randint, randn from numpy.testing import ( @@ -166,6 +164,59 @@ def test_reshape(self): tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]] assert_equal(np.reshape(arr, (2, 6)), tgt) + def test_reshape_shape_arg(self): + arr = np.arange(12) + shape = (3, 4) + expected = arr.reshape(shape) + + with pytest.raises( + TypeError, + match="You cannot specify 'newshape' and 'shape' " + "arguments at the same time." + ): + np.reshape(arr, shape=shape, newshape=shape) + with pytest.raises( + TypeError, + match=r"reshape\(\) missing 1 required positional " + "argument: 'shape'" + ): + np.reshape(arr) + + assert_equal(np.reshape(arr, shape), expected) + assert_equal(np.reshape(arr, shape, order="C"), expected) + assert_equal(np.reshape(arr, shape, "C"), expected) + assert_equal(np.reshape(arr, shape=shape), expected) + assert_equal(np.reshape(arr, shape=shape, order="C"), expected) + with pytest.warns(DeprecationWarning): + actual = np.reshape(arr, newshape=shape) + assert_equal(actual, expected) + + def test_reshape_copy_arg(self): + arr = np.arange(24).reshape(2, 3, 4) + arr_f_ord = np.array(arr, order="F") + shape = (12, 2) + + assert np.shares_memory(np.reshape(arr, shape), arr) + assert np.shares_memory(np.reshape(arr, shape, order="C"), arr) + assert np.shares_memory( + np.reshape(arr_f_ord, shape, order="F"), arr_f_ord) + assert np.shares_memory(np.reshape(arr, shape, copy=None), arr) + assert np.shares_memory(np.reshape(arr, shape, copy=False), arr) + assert np.shares_memory(arr.reshape(shape, copy=False), arr) + assert not np.shares_memory(np.reshape(arr, shape, copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="C", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=True), arr) + assert not np.shares_memory( + np.reshape(arr, shape, order="F", copy=None), arr) + + err_msg = "Unable to avoid creating a copy while reshaping." + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr, shape, order="F", copy=False) + with pytest.raises(ValueError, match=err_msg): + np.reshape(arr_f_ord, shape, order="C", copy=False) + def test_round(self): arr = [1.56, 72.54, 6.35, 3.25] tgt = [1.6, 72.5, 6.4, 3.2] @@ -273,6 +324,18 @@ def test_take(self): out = np.take(a, indices) assert_equal(out, tgt) + pairs = [ + (np.int32, np.int32), (np.int32, np.int64), + (np.int64, np.int32), (np.int64, np.int64) + ] + for array_type, indices_type in pairs: + x = np.array([1, 2, 3, 4, 5], dtype=array_type) + ind = np.array([0, 2, 2, 3], dtype=indices_type) + tgt = np.array([1, 3, 3, 4], dtype=array_type) + out = np.take(x, ind) + assert_equal(out, tgt) + assert_equal(out.dtype, tgt.dtype) + def test_trace(self): c = [[1, 2], [3, 4], [5, 6]] assert_equal(np.trace(c), 5) @@ -799,7 +862,7 @@ def setup_method(self): # Propagation of the RISC-V Unprivileged ISA for more details. # We disable the float32 sign test on riscv64 for -np.nan as the sign # of the NaN will be lost when it's converted to a float32. - if platform.processor() != 'riscv64': + if platform.machine() != 'riscv64': self.signf[3::6][self.ef[3::6]] = -np.nan self.signd[3::6][self.ed[3::6]] = -np.nan self.signf[4::6][self.ef[4::6]] = -0. @@ -1441,6 +1504,17 @@ def test_can_cast_values(self): assert_(np.can_cast(fi.min, dt)) assert_(np.can_cast(fi.max, dt)) + @pytest.mark.parametrize("dtype", + list("?bhilqBHILQefdgFDG") + [rational]) + def test_can_cast_scalars(self, dtype): + # Basic test to ensure that scalars are supported in can-cast + # (does not check behavior exhaustively). + dtype = np.dtype(dtype) + scalar = dtype.type(0) + + assert np.can_cast(scalar, "int64") == np.can_cast(dtype, "int64") + assert np.can_cast(scalar, "float32", casting="unsafe") + # Custom exception class to test exception propagation in fromiter class NIterError(Exception): @@ -1568,16 +1642,12 @@ def test_nonzero_trivial(self): assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1) assert_equal(np.nonzero(np.array([1])), ([0],)) - def test_nonzero_zerod(self): - assert_equal(np.count_nonzero(np.array(0)), 0) - assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(0)), ([],)) - - assert_equal(np.count_nonzero(np.array(1)), 1) - assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1) - with assert_warns(DeprecationWarning): - assert_equal(np.nonzero(np.array(1)), ([0],)) + def test_nonzero_zerodim(self): + err_msg = "Calling nonzero on 0d arrays is not allowed" + with assert_raises_regex(ValueError, err_msg): + np.nonzero(np.array(0)) + with assert_raises_regex(ValueError, err_msg): + np.array(1).nonzero() def test_nonzero_onedim(self): x = np.array([1, 0, 2, -1, 0, 0, 8]) @@ -1970,6 +2040,9 @@ def test_base_range(self): with assert_raises(ValueError): np.base_repr(1, 37) + def test_minimal_signed_int(self): + assert_equal(np.base_repr(np.int8(-128)), '-10000000') + def _test_array_equal_parametrizations(): """ @@ -2118,6 +2191,13 @@ def test_array_equal_equal_nan(self, bx, by, equal_nan, expected): assert_(res is expected) assert_(type(res) is bool) + def test_array_equal_different_scalar_types(self): + # https://github.com/numpy/numpy/issues/27271 + a = np.array("foo") + b = np.array(1) + assert not np.array_equal(a, b) + assert not np.array_equiv(a, b) + def test_none_compares_elementwise(self): a = np.array([None, 1, None], dtype=object) assert_equal(a == None, [True, False, True]) @@ -2668,9 +2748,9 @@ def test_object_clip(self): assert actual.tolist() == expected.tolist() def test_clip_all_none(self): - a = np.arange(10, dtype=object) - with assert_raises_regex(ValueError, 'max or min'): - np.clip(a, None, None) + arr = np.arange(10, dtype=object) + assert_equal(np.clip(arr, None, None), arr) + assert_equal(np.clip(arr), arr) def test_clip_invalid_casting(self): a = np.arange(10, dtype=object) @@ -2787,6 +2867,45 @@ def test_clip_property(self, data, arr): assert result.dtype == t assert_array_equal(result, expected) + def test_clip_min_max_args(self): + arr = np.arange(5) + + assert_array_equal(np.clip(arr), arr) + assert_array_equal(np.clip(arr, min=2, max=3), np.clip(arr, 2, 3)) + assert_array_equal(np.clip(arr, min=None, max=2), + np.clip(arr, None, 2)) + + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_max'"): + np.clip(arr, 2) + with assert_raises_regex(TypeError, "missing 1 required positional " + "argument: 'a_min'"): + np.clip(arr, a_max=2) + msg = ("Passing `min` or `max` keyword argument when `a_min` and " + "`a_max` are provided is forbidden.") + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, max=3) + with assert_raises_regex(ValueError, msg): + np.clip(arr, 2, 3, min=2) + + @pytest.mark.parametrize("dtype,min,max", [ + ("int32", -2**32-1, 2**32), + ("int32", -2**320, None), + ("int32", None, 2**300), + ("int32", -1000, 2**32), + ("int32", -2**32-1, 1000), + ("uint8", -1, 129), + ]) + def test_out_of_bound_pyints(self, dtype, min, max): + a = np.arange(10000).astype(dtype) + # Check min only + c = np.clip(a, min=min, max=max) + assert not np.may_share_memory(a, c) + assert c.dtype == a.dtype + if min is not None: + assert (c >= min).all() + if max is not None: + assert (c <= max).all() class TestAllclose: rtol = 1e-5 @@ -3345,7 +3464,7 @@ class MyNDArray(np.ndarray): assert_(type(b) is not MyNDArray) # Test invalid dtype - with assert_raises(_ArrayMemoryError): + with assert_raises(TypeError): a = np.array(b"abc") like_function(a, dtype="S-1", **fill_kwarg) @@ -3361,7 +3480,11 @@ def test_empty_like(self): def test_filled_like(self): self.check_like_function(np.full_like, 0, True) self.check_like_function(np.full_like, 1, True) - self.check_like_function(np.full_like, 1000, True) + # Large integers may overflow, but using int64 is OK (casts) + # see also gh-27075 + with pytest.raises(OverflowError): + np.full_like(np.ones(3, dtype=np.int8), 1000) + self.check_like_function(np.full_like, np.int64(1000), True) self.check_like_function(np.full_like, 123.456, True) # Inf to integer casts cause invalid-value errors: ignore them. with np.errstate(invalid="ignore"): @@ -3521,24 +3644,6 @@ def test_list(self): assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]]) -@pytest.mark.filterwarnings( - "ignore:.*set_string_function.*:DeprecationWarning" -) -class TestStringFunction: - - def test_set_string_function(self): - a = np.array([1]) - set_string_function(lambda x: "FOO", repr=True) - assert_equal(repr(a), "FOO") - set_string_function(None, repr=True) - assert_equal(repr(a), "array([1])") - - set_string_function(lambda x: "FOO", repr=False) - assert_equal(str(a), "FOO") - set_string_function(None, repr=False) - assert_equal(str(a), "[1]") - - class TestRoll: def test_roll1d(self): x = np.arange(10) @@ -3596,6 +3701,18 @@ def test_roll_empty(self): x = np.array([]) assert_equal(np.roll(x, 1), np.array([])) + def test_roll_unsigned_shift(self): + x = np.arange(4) + shift = np.uint16(2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + shift = np.uint64(2**63+2) + assert_equal(np.roll(x, shift), np.roll(x, 2)) + + def test_roll_big_int(self): + x = np.arange(4) + assert_equal(np.roll(x, 2**100), x) + class TestRollaxis: @@ -4065,5 +4182,10 @@ def test_astype(self): actual, np.astype(actual, actual.dtype, copy=False) ) + actual = np.astype(np.int64(10), np.float64) + expected = np.float64(10) + assert_equal(actual, expected) + assert_equal(actual.dtype, expected.dtype) + with pytest.raises(TypeError, match="Input should be a NumPy array"): np.astype(data, np.float64) diff --git a/numpy/_core/tests/test_numerictypes.py b/numpy/_core/tests/test_numerictypes.py index 1134e32025fb..db4509b9c28f 100644 --- a/numpy/_core/tests/test_numerictypes.py +++ b/numpy/_core/tests/test_numerictypes.py @@ -471,8 +471,18 @@ def test_isdtype_invalid_args(self): with assert_raises_regex(TypeError, r".*must be a NumPy dtype.*"): np.isdtype("int64", np.int64) with assert_raises_regex(TypeError, r".*kind argument must.*"): + np.isdtype(np.int64, 1) + with assert_raises_regex(ValueError, r".*not a known kind name.*"): np.isdtype(np.int64, "int64") + def test_sctypes_complete(self): + # issue 26439: int32/intc were masking each other on 32-bit builds + assert np.int32 in sctypes['int'] + assert np.intc in sctypes['int'] + assert np.int64 in sctypes['int'] + assert np.uint32 in sctypes['uint'] + assert np.uintc in sctypes['uint'] + assert np.uint64 in sctypes['uint'] class TestSctypeDict: def test_longdouble(self): @@ -560,6 +570,7 @@ def test_issctype(rep, expected): # ensure proper identification of scalar # data-types by issctype() actual = issctype(rep) + assert type(actual) is bool assert_equal(actual, expected) diff --git a/numpy/_core/tests/test_overrides.py b/numpy/_core/tests/test_overrides.py index 025cd001ff0a..fabcaa10801e 100644 --- a/numpy/_core/tests/test_overrides.py +++ b/numpy/_core/tests/test_overrides.py @@ -203,6 +203,14 @@ def test_no_wrapper(self): array.__array_function__(func=func, types=(np.ndarray,), args=(array,), kwargs={}) + def test_wrong_arguments(self): + # Check our implementation guards against wrong arguments. + a = np.array([1, 2]) + with pytest.raises(TypeError, match="args must be a tuple"): + a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1)) + with pytest.raises(TypeError, match="kwargs must be a dict"): + a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1)) + class TestArrayFunctionDispatch: @@ -540,7 +548,7 @@ def __array_function__(self, func, types, args, kwargs): class TestArrayLike: def setup_method(self): - class MyArray(): + class MyArray: def __init__(self, function=None): self.function = function @@ -554,7 +562,7 @@ def __array_function__(self, func, types, args, kwargs): self.MyArray = MyArray - class MyNoArrayFunctionArray(): + class MyNoArrayFunctionArray: def __init__(self, function=None): self.function = function diff --git a/numpy/_core/tests/test_protocols.py b/numpy/_core/tests/test_protocols.py index 7cab1223bfe1..1709629fa89b 100644 --- a/numpy/_core/tests/test_protocols.py +++ b/numpy/_core/tests/test_protocols.py @@ -35,8 +35,8 @@ def test_array_called(): class Wrapper: val = '0' * 100 - def __array__(self, result=None, copy=None): - return np.array([self.val], dtype=object) + def __array__(self, dtype=None, copy=None): + return np.array([self.val], dtype=dtype, copy=copy) wrapped = Wrapper() diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index 975bb322f87c..151fa4e68727 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -114,9 +114,9 @@ def test_recarray_from_obj(self): mine = np.rec.fromarrays([a, b, c], names='date,data1,data2') for i in range(len(a)): - assert_((mine.date[i] == list(range(1, 10)))) - assert_((mine.data1[i] == 0.0)) - assert_((mine.data2[i] == 0.0)) + assert_(mine.date[i] == list(range(1, 10))) + assert_(mine.data1[i] == 0.0) + assert_(mine.data2[i] == 0.0) def test_recarray_repr(self): a = np.array([(1, 0.1), (2, 0.2)], diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 17d589af582a..8c9dbbe739e0 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1071,8 +1071,8 @@ def test_astype_copy(self): with open(filename, 'rb') as f: xp = pickle.load(f, encoding='latin1') xpd = xp.astype(np.float64) - assert_((xp.__array_interface__['data'][0] != - xpd.__array_interface__['data'][0])) + assert_(xp.__array_interface__['data'][0] != + xpd.__array_interface__['data'][0]) def test_compress_small_type(self): # Ticket #789, changeset 5217. @@ -2567,15 +2567,17 @@ def test__array_namespace__(self): assert xp is np xp = arr.__array_namespace__(api_version="2022.12") assert xp is np + xp = arr.__array_namespace__(api_version="2023.12") + assert xp is np xp = arr.__array_namespace__(api_version=None) assert xp is np with pytest.raises( ValueError, - match="Version \"2023.12\" of the Array API Standard " + match="Version \"2024.12\" of the Array API Standard " "is not supported." ): - arr.__array_namespace__(api_version="2023.12") + arr.__array_namespace__(api_version="2024.12") with pytest.raises( ValueError, @@ -2614,3 +2616,31 @@ def test_logspace_base_does_not_determine_dtype(self): base=np.array([10.0])) with pytest.raises(AssertionError, match="not almost equal"): assert_almost_equal(out2, expected) + + def test_vectorize_fixed_width_string(self): + arr = np.array(["SOme wOrd DŽ ß ᾛ ΣΣ ffi⁵Å Ç Ⅰ"]).astype(np.str_) + f = str.casefold + res = np.vectorize(f, otypes=[arr.dtype])(arr) + assert res.dtype == "U30" + + def test_repeated_square_consistency(self): + # gh-26940 + buf = np.array([-5.171866611150749e-07 + 2.5618634555957426e-07j, + 0, 0, 0, 0, 0]) + # Test buffer with regular and reverse strides + for in_vec in [buf[:3], buf[:3][::-1]]: + expected_res = np.square(in_vec) + # Output vector immediately follows input vector + # to reproduce off-by-one in nomemoverlap check. + for res in [buf[3:], buf[3:][::-1]]: + res = buf[3:] + np.square(in_vec, out=res) + assert_equal(res, expected_res) + + def test_sort_unique_crash(self): + # gh-27037 + for _ in range(4): + vals = np.linspace(0, 1, num=128) + data = np.broadcast_to(vals, (128, 128, 128)) + data = data.transpose(0, 2, 1).copy() + np.unique(data) diff --git a/numpy/_core/tests/test_scalar_methods.py b/numpy/_core/tests/test_scalar_methods.py index b0bdd126e8a7..7b6e83554d81 100644 --- a/numpy/_core/tests/test_scalar_methods.py +++ b/numpy/_core/tests/test_scalar_methods.py @@ -203,3 +203,23 @@ def test_bit_count(self): assert np.uint64(a - 1).bit_count() == exp assert np.uint64(a ^ 63).bit_count() == 7 assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8 + + +class TestDevice: + """ + Test scalar.device attribute and scalar.to_device() method. + """ + scalars = [np.bool(True), np.int64(1), np.uint64(1), np.float64(1.0), + np.complex128(1+1j)] + + @pytest.mark.parametrize("scalar", scalars) + def test_device(self, scalar): + assert scalar.device == "cpu" + + @pytest.mark.parametrize("scalar", scalars) + def test_to_device(self, scalar): + assert scalar.to_device("cpu") is scalar + + @pytest.mark.parametrize("scalar", scalars) + def test___array_namespace__(self, scalar): + assert scalar.__array_namespace__() is np diff --git a/numpy/_core/tests/test_scalarinherit.py b/numpy/_core/tests/test_scalarinherit.py index f9c574d5798e..52591215a2e7 100644 --- a/numpy/_core/tests/test_scalarinherit.py +++ b/numpy/_core/tests/test_scalarinherit.py @@ -54,6 +54,13 @@ def test_gh_15395(self): with pytest.raises(TypeError): B1(1.0, 2.0) + def test_int_repr(self): + # Test that integer repr works correctly for subclasses (gh-27106) + class my_int16(np.int16): + pass + + s = repr(my_int16(3)) + assert s == "my_int16(3)" class TestCharacter: def test_char_radd(self): diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 057f84d17633..af9360b92c23 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -12,18 +12,13 @@ import numpy as np from numpy.exceptions import ComplexWarning +from numpy._core._rational_tests import rational from numpy.testing import ( assert_, assert_equal, assert_raises, assert_almost_equal, assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data, - assert_warns, _SUPPORTS_SVE, + assert_warns, check_support_sve, ) -try: - COMPILERS = np.show_config(mode="dicts")["Compilers"] - USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl" -except TypeError: - USING_CLANG_CL = False - types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc, np.int_, np.uint, np.longlong, np.ulonglong, np.single, np.double, np.longdouble, np.csingle, @@ -32,12 +27,15 @@ floating_types = np.floating.__subclasses__() complex_floating_types = np.complexfloating.__subclasses__() -objecty_things = [object(), None] +objecty_things = [object(), None, np.array(None, dtype=object)] -reasonable_operators_for_scalars = [ +binary_operators_for_scalars = [ operator.lt, operator.le, operator.eq, operator.ne, operator.ge, operator.gt, operator.add, operator.floordiv, operator.mod, - operator.mul, operator.pow, operator.sub, operator.truediv, + operator.mul, operator.pow, operator.sub, operator.truediv +] +binary_operators_for_scalar_ints = binary_operators_for_scalars + [ + operator.xor, operator.or_, operator.and_ ] @@ -114,7 +112,7 @@ def check_ufunc_scalar_equivalence(op, arr1, arr2): @pytest.mark.slow @settings(max_examples=10000, deadline=2000) -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()), hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())) def test_array_scalar_ufunc_equivalence(op, arr1, arr2): @@ -127,7 +125,7 @@ def test_array_scalar_ufunc_equivalence(op, arr1, arr2): @pytest.mark.slow -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), hynp.scalar_dtypes(), hynp.scalar_dtypes()) def test_array_scalar_ufunc_dtypes(op, dt1, dt2): # Same as above, but don't worry about sampling weird values so that we @@ -153,7 +151,7 @@ def test_int_float_promotion_truediv(fscalar): class TestBaseMath: - @pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982") + @pytest.mark.xfail(check_support_sve(), reason="gh-22982") def test_blocked(self): # test alignments offsets for simd instructions # alignments for vz + 2 * (vs - 1) + 1 @@ -805,12 +803,6 @@ class TestBitShifts: [operator.rshift, operator.lshift], ids=['>>', '<<']) def test_shift_all_bits(self, type_code, op): """Shifts where the shift amount is the width of the type or wider """ - if ( - USING_CLANG_CL and - type_code in ("l", "L") and - op is operator.lshift - ): - pytest.xfail("Failing on clang-cl builds") # gh-2449 dt = np.dtype(type_code) nbits = dt.itemsize * 8 @@ -876,8 +868,8 @@ def recursionlimit(n): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars), - sampled_from(types)) + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) def test_operator_object_left(o, op, type_): try: with recursionlimit(200): @@ -887,8 +879,8 @@ def test_operator_object_left(o, op, type_): @given(sampled_from(objecty_things), - sampled_from(reasonable_operators_for_scalars), - sampled_from(types)) + sampled_from(binary_operators_for_scalar_ints), + sampled_from(types + [rational])) def test_operator_object_right(o, op, type_): try: with recursionlimit(200): @@ -897,7 +889,7 @@ def test_operator_object_right(o, op, type_): pass -@given(sampled_from(reasonable_operators_for_scalars), +@given(sampled_from(binary_operators_for_scalars), sampled_from(types), sampled_from(types)) def test_operator_scalars(op, type1, type2): @@ -907,7 +899,7 @@ def test_operator_scalars(op, type1, type2): pass -@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("op", binary_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) def test_longdouble_operators_with_obj(sctype, op): # This is/used to be tricky, because NumPy generally falls back to @@ -920,6 +912,9 @@ def test_longdouble_operators_with_obj(sctype, op): # # That would recurse infinitely. Other scalars return the python object # on cast, so this type of things works OK. + # + # As of NumPy 2.1, this has been consolidated into the np.generic binops + # and now checks `.item()`. That also allows the below path to work now. try: op(sctype(3), None) except TypeError: @@ -930,7 +925,16 @@ def test_longdouble_operators_with_obj(sctype, op): pass -@pytest.mark.parametrize("op", reasonable_operators_for_scalars) +@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.sub]) +@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) +def test_longdouble_with_arrlike(sctype, op): + # As of NumPy 2.1, longdouble behaves like other types and can coerce + # e.g. lists. (Not necessarily better, but consistent.) + assert_array_equal(op(sctype(3), [1, 2]), op(3, np.array([1, 2]))) + assert_array_equal(op([1, 2], sctype(3)), op(np.array([1, 2]), 3)) + + +@pytest.mark.parametrize("op", binary_operators_for_scalars) @pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble]) @np.errstate(all="ignore") def test_longdouble_operators_with_large_int(sctype, op): @@ -1069,6 +1073,9 @@ def test_longdouble_complex(): @pytest.mark.parametrize("subtype", [float, int, complex, np.float16]) @np._no_nep50_warning() def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp): + # This tests that python scalar subclasses behave like a float64 (if they + # don't override it). + # In an earlier version of NEP 50, they behaved like the Python buildins. def op_func(self, other): return __op__ @@ -1091,25 +1098,29 @@ def rop_func(self, other): # When no deferring is indicated, subclasses are handled normally. myt = type("myt", (subtype,), {__rop__: rop_func}) + behaves_like = lambda x: np.array(subtype(x))[()] # Check for float32, as a float subclass float64 may behave differently res = op(myt(1), np.float16(2)) - expected = op(subtype(1), np.float16(2)) + expected = op(behaves_like(1), np.float16(2)) assert res == expected assert type(res) == type(expected) res = op(np.float32(2), myt(1)) - expected = op(np.float32(2), subtype(1)) + expected = op(np.float32(2), behaves_like(1)) assert res == expected assert type(res) == type(expected) - # Same check for longdouble: + # Same check for longdouble (compare via dtype to accept float64 when + # longdouble has the identical size), which is currently not perfectly + # consistent. res = op(myt(1), np.longdouble(2)) - expected = op(subtype(1), np.longdouble(2)) + expected = op(behaves_like(1), np.longdouble(2)) assert res == expected - assert type(res) == type(expected) + assert np.dtype(type(res)) == np.dtype(type(expected)) res = op(np.float32(2), myt(1)) - expected = op(np.longdouble(2), subtype(1)) + expected = op(np.float32(2), behaves_like(1)) assert res == expected + assert np.dtype(type(res)) == np.dtype(type(expected)) def test_truediv_int(): @@ -1120,7 +1131,7 @@ def test_truediv_int(): @pytest.mark.slow @pytest.mark.parametrize("op", # TODO: Power is a bit special, but here mostly bools seem to behave oddly - [op for op in reasonable_operators_for_scalars if op is not operator.pow]) + [op for op in binary_operators_for_scalars if op is not operator.pow]) @pytest.mark.parametrize("sctype", types) @pytest.mark.parametrize("other_type", [float, int, complex]) @pytest.mark.parametrize("rop", [True, False]) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 5b9de37f5f60..a885cb64a661 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -154,7 +154,7 @@ def test_2D_array(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - hstack((np.arange(3) for _ in range(2))) + hstack(np.arange(3) for _ in range(2)) with pytest.raises(TypeError, match="arrays to stack must be"): hstack(map(lambda x: x, np.ones((3, 2)))) @@ -209,7 +209,7 @@ def test_2D_array2(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - vstack((np.arange(3) for _ in range(2))) + vstack(np.arange(3) for _ in range(2)) def test_casting_and_dtype(self): a = np.array([1, 2, 3]) @@ -477,7 +477,7 @@ def test_stack(): # do not accept generators with pytest.raises(TypeError, match="arrays to stack must be"): - stack((x for x in range(3))) + stack(x for x in range(3)) #casting and dtype test a = np.array([1, 2, 3]) @@ -490,6 +490,39 @@ def test_stack(): stack((a, b), dtype=np.int64, axis=1, casting="safe") +def test_unstack(): + a = np.arange(24).reshape((2, 3, 4)) + + for stacks in [np.unstack(a), + np.unstack(a, axis=0), + np.unstack(a, axis=-3)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 2 + assert_array_equal(stacks[0], a[0]) + assert_array_equal(stacks[1], a[1]) + + for stacks in [np.unstack(a, axis=1), + np.unstack(a, axis=-2)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 3 + assert_array_equal(stacks[0], a[:, 0]) + assert_array_equal(stacks[1], a[:, 1]) + assert_array_equal(stacks[2], a[:, 2]) + + for stacks in [np.unstack(a, axis=2), + np.unstack(a, axis=-1)]: + assert isinstance(stacks, tuple) + assert len(stacks) == 4 + assert_array_equal(stacks[0], a[:, :, 0]) + assert_array_equal(stacks[1], a[:, :, 1]) + assert_array_equal(stacks[2], a[:, :, 2]) + assert_array_equal(stacks[3], a[:, :, 3]) + + assert_raises(ValueError, np.unstack, a, axis=3) + assert_raises(ValueError, np.unstack, a, axis=-4) + assert_raises(ValueError, np.unstack, np.array(0), axis=0) + + @pytest.mark.parametrize("axis", [0]) @pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"]) @pytest.mark.parametrize("casting", diff --git a/numpy/_core/tests/test_stringdtype.py b/numpy/_core/tests/test_stringdtype.py index b856c667c021..f087802e310b 100644 --- a/numpy/_core/tests/test_stringdtype.py +++ b/numpy/_core/tests/test_stringdtype.py @@ -11,7 +11,7 @@ from numpy.dtypes import StringDType from numpy._core.tests._natype import pd_NA -from numpy.testing import assert_array_equal, IS_WASM +from numpy.testing import assert_array_equal, IS_WASM, IS_PYPY @pytest.fixture @@ -40,8 +40,7 @@ def na_object(request): return request.param -@pytest.fixture() -def dtype(na_object, coerce): +def get_dtype(na_object, coerce=True): # explicit is check for pd_NA because != with pd_NA returns pd_NA if na_object is pd_NA or na_object != "unset": return StringDType(na_object=na_object, coerce=coerce) @@ -49,6 +48,11 @@ def dtype(na_object, coerce): return StringDType(coerce=coerce) +@pytest.fixture() +def dtype(na_object, coerce): + return get_dtype(na_object, coerce) + + # second copy for cast tests to do a cartesian product over dtypes @pytest.fixture(params=[True, False]) def coerce2(request): @@ -225,6 +229,18 @@ def test_self_casts(dtype, dtype2, strings): else: arr.astype(dtype2, casting="safe") + if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"): + na1 = dtype.na_object + na2 = dtype2.na_object + if (na1 is not na2 and + # check for pd_NA first because bool(pd_NA) is an error + ((na1 is pd_NA or na2 is pd_NA) or + # the second check is a NaN check, spelled this way + # to avoid errors from math.isnan and np.isnan + (na1 != na2 and not (na1 != na1 and na2 != na2)))): + with pytest.raises(TypeError): + arr[:-1] == newarr[:-1] + return assert_array_equal(arr[:-1], newarr[:-1]) @@ -356,19 +372,12 @@ def test_isnan(dtype, string_list): # isnan is only true when na_object is a NaN assert_array_equal( np.isnan(sarr), - np.array([0] * len(string_list) + [1], dtype=np.bool_), + np.array([0] * len(string_list) + [1], dtype=np.bool), ) else: assert not np.any(np.isnan(sarr)) -def _pickle_load(filename): - with open(filename, "rb") as f: - res = pickle.load(f) - - return res - -@pytest.mark.skipif(IS_WASM, reason="no threading support in wasm") def test_pickle(dtype, string_list): arr = np.array(string_list, dtype=dtype) @@ -381,15 +390,6 @@ def test_pickle(dtype, string_list): assert_array_equal(res[0], arr) assert res[1] == dtype - # load the pickle in a subprocess to ensure the string data are - # actually stored in the pickle file - with concurrent.futures.ProcessPoolExecutor() as executor: - e = executor.submit(_pickle_load, f.name) - res = e.result() - - assert_array_equal(res[0], arr) - assert res[1] == dtype - os.remove(f.name) @@ -460,11 +460,51 @@ def test_sort(strings, arr_sorted): ["", "a", "😸", "ááðfáíóåéë"], ], ) -def test_nonzero(strings): - arr = np.array(strings, dtype="T") - is_nonzero = np.array([i for i, item in enumerate(arr) if len(item) != 0]) +def test_nonzero(strings, na_object): + dtype = get_dtype(na_object) + arr = np.array(strings, dtype=dtype) + is_nonzero = np.array( + [i for i, item in enumerate(strings) if len(item) != 0]) assert_array_equal(arr.nonzero()[0], is_nonzero) + if na_object is not pd_NA and na_object == 'unset': + return + + strings_with_na = np.array(strings + [na_object], dtype=dtype) + is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0] + + if is_nan: + assert strings_with_na.nonzero()[0][-1] == 4 + else: + assert strings_with_na.nonzero()[0][-1] == 3 + + # check that the casting to bool and nonzero give consistent results + assert_array_equal(strings_with_na[strings_with_na.nonzero()], + strings_with_na[strings_with_na.astype(bool)]) + + +def test_where(string_list, na_object): + dtype = get_dtype(na_object) + a = np.array(string_list, dtype=dtype) + b = a[::-1] + res = np.where([True, False, True, False, True, False], a, b) + assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]]) + + +def test_fancy_indexing(string_list): + sarr = np.array(string_list, dtype="T") + assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])]) + + # see gh-27003 and gh-27053 + for ind in [[True, True], [0, 1], ...]: + for lop in [['a'*16, 'b'*16], ['', '']]: + a = np.array(lop, dtype="T") + rop = ['d'*16, 'e'*16] + for b in [rop, np.array(rop, dtype="T")]: + a[ind] = b + assert_array_equal(a, b) + assert a[0] == 'd'*16 + def test_creation_functions(): assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""]) @@ -474,6 +514,52 @@ def test_creation_functions(): assert np.empty(3, dtype="T")[0] == "" +def test_concatenate(string_list): + sarr = np.array(string_list, dtype="T") + sarr_cat = np.array(string_list + string_list, dtype="T") + + assert_array_equal(np.concatenate([sarr], axis=0), sarr) + + +def test_resize_method(string_list): + sarr = np.array(string_list, dtype="T") + if IS_PYPY: + sarr.resize(len(string_list)+3, refcheck=False) + else: + sarr.resize(len(string_list)+3) + assert_array_equal(sarr, np.array(string_list + ['']*3, dtype="T")) + + +def test_create_with_copy_none(string_list): + arr = np.array(string_list, dtype=StringDType()) + # create another stringdtype array with an arena that has a different + # in-memory layout than the first array + arr_rev = np.array(string_list[::-1], dtype=StringDType()) + + # this should create a copy and the resulting array + # shouldn't share an allocator or arena with arr_rev, despite + # explicitly passing arr_rev.dtype + arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype) + np.testing.assert_array_equal(arr, arr_copy) + assert arr_copy.base is None + + with pytest.raises(ValueError, match="Unable to avoid copy"): + np.array(arr, copy=False, dtype=arr_rev.dtype) + + # because we're using arr's dtype instance, the view is safe + arr_view = np.array(arr, copy=None, dtype=arr.dtype) + np.testing.assert_array_equal(arr, arr) + np.testing.assert_array_equal(arr_view[::-1], arr_rev) + assert arr_view is arr + + +def test_astype_copy_false(): + orig_dt = StringDType() + arr = np.array(["hello", "world"], dtype=StringDType()) + assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce + + assert arr.astype(orig_dt, copy=False).dtype is orig_dt + @pytest.mark.parametrize( "strings", [ @@ -562,6 +648,10 @@ def test_sized_integer_casts(bitsize, signed): with pytest.raises(OverflowError): np.array(oob, dtype="T").astype(idtype) + with pytest.raises(ValueError): + np.array(["1", np.nan, "3"], + dtype=StringDType(na_object=np.nan)).astype(idtype) + @pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"]) @pytest.mark.parametrize("signed", ["", "u"]) @@ -680,6 +770,12 @@ def test_ufuncs_minmax(string_list, ufunc_name, func, use_out): res = ufunc(arr, arr) assert_array_equal(uarr, res) + assert_array_equal(getattr(arr, ufunc_name)(), func(string_list)) + + +def test_max_regression(): + arr = np.array(['y', 'y', 'z'], dtype="T") + assert arr.max() == 'z' @pytest.mark.parametrize("use_out", [True, False]) @@ -730,6 +826,78 @@ def test_ufunc_add(dtype, string_list, other_strings, use_out): np.add(arr1, arr2) +def test_ufunc_add_reduce(dtype): + values = ["a", "this is a long string", "c"] + arr = np.array(values, dtype=dtype) + out = np.empty((), dtype=dtype) + + expected = np.array("".join(values), dtype=dtype) + assert_array_equal(np.add.reduce(arr), expected) + + np.add.reduce(arr, out=out) + assert_array_equal(out, expected) + + +def test_add_promoter(string_list): + arr = np.array(string_list, dtype=StringDType()) + lresult = np.array(["hello" + s for s in string_list], dtype=StringDType()) + rresult = np.array([s + "hello" for s in string_list], dtype=StringDType()) + + for op in ["hello", np.str_("hello"), np.array(["hello"])]: + assert_array_equal(op + arr, lresult) + assert_array_equal(arr + op, rresult) + + # The promoter should be able to handle things if users pass `dtype=` + res = np.add("hello", string_list, dtype=StringDType) + assert res.dtype == StringDType() + + # The promoter should not kick in if users override the input, + # which means arr is cast, this fails because of the unknown length. + with pytest.raises(TypeError, match="cannot cast dtype"): + np.add(arr, "add", signature=("U", "U", None), casting="unsafe") + + # But it must simply reject the following: + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add(arr, "add", signature=(None, "U", None)) + + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("a", "b", signature=("U", "U", StringDType)) + + +def test_add_no_legacy_promote_with_signature(): + # Possibly misplaced, but useful to test with string DType. We check that + # if there is clearly no loop found, a stray `dtype=` doesn't break things + # Regression test for the bad error in gh-26735 + # (If legacy promotion is gone, this can be deleted...) + with pytest.raises(TypeError, match=".*did not contain a loop"): + np.add("3", 6, dtype=StringDType) + + +def test_add_promoter_reduce(): + # Exact TypeError could change, but ensure StringDtype doesn't match + with pytest.raises(TypeError, match="the resolved dtypes are not"): + np.add.reduce(np.array(["a", "b"], dtype="U")) + + # On the other hand, using `dtype=T` in the *ufunc* should work. + np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType) + + +def test_multiply_reduce(): + # At the time of writing (NumPy 2.0) this is very limited (and rather + # ridiculous anyway). But it works and actually makes some sense... + # (NumPy does not allow non-scalar initial values) + repeats = np.array([2, 3, 4]) + val = "school-🚌" + res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType) + assert res == val * np.prod(repeats) + + +def test_multiply_two_string_raises(): + arr = np.array(["hello", "world"], dtype="T") + with pytest.raises(np._core._exceptions._UFuncNoLoopError): + np.multiply(arr, arr) + + @pytest.mark.parametrize("use_out", [True, False]) @pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]]) @pytest.mark.parametrize( @@ -828,6 +996,62 @@ def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out): other * arr +def test_findlike_promoters(): + r = "Wally" + l = "Where's Wally?" + s = np.int32(3) + e = np.int8(13) + for dtypes in [("T", "U"), ("U", "T")]: + for function, answer in [ + (np.strings.index, 8), + (np.strings.endswith, True), + ]: + assert answer == function( + np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e + ) + + +def test_strip_promoter(): + arg = ["Hello!!!!", "Hello??!!"] + strip_char = "!" + answer = ["Hello", "Hello??"] + for dtypes in [("T", "U"), ("U", "T")]: + result = np.strings.strip( + np.array(arg, dtype=dtypes[0]), + np.array(strip_char, dtype=dtypes[1]) + ) + assert_array_equal(result, answer) + assert result.dtype.char == "T" + + +def test_replace_promoter(): + arg = ["Hello, planet!", "planet, Hello!"] + old = "planet" + new = "world" + answer = ["Hello, world!", "world, Hello!"] + for dtypes in itertools.product("TU", repeat=3): + if dtypes == ("U", "U", "U"): + continue + answer_arr = np.strings.replace( + np.array(arg, dtype=dtypes[0]), + np.array(old, dtype=dtypes[1]), + np.array(new, dtype=dtypes[2]), + ) + assert_array_equal(answer_arr, answer) + assert answer_arr.dtype.char == "T" + + +def test_center_promoter(): + arg = ["Hello", "planet!"] + fillchar = "/" + for dtypes in [("T", "U"), ("U", "T")]: + answer = np.strings.center( + np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1]) + ) + assert_array_equal(answer, ["//Hello//", "/planet!/"]) + assert answer.dtype.char == "T" + + DATETIME_INPUT = [ np.datetime64("1923-04-14T12:43:12"), np.datetime64("1994-06-21T14:43:15"), @@ -916,6 +1140,12 @@ def test_nat_casts(): np.array([output_object]*arr.size, dtype=dtype)) +def test_nat_conversion(): + for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]: + with pytest.raises(ValueError, match="string coercion is disabled"): + np.array(["a", nat], dtype=StringDType(coerce=False)) + + def test_growing_strings(dtype): # growing a string leads to a heap allocation, this tests to make sure # we do that bookkeeping correctly for all possible starting cases @@ -991,7 +1221,13 @@ def unicode_array(): "capitalize", "expandtabs", "lower", - "splitlines" "swapcase" "title" "upper", + "lstrip", + "rstrip", + "splitlines", + "strip", + "swapcase", + "title", + "upper", ] BOOL_OUTPUT_FUNCTIONS = [ @@ -1018,7 +1254,10 @@ def unicode_array(): "istitle", "isupper", "lower", + "lstrip", + "rstrip", "splitlines", + "strip", "swapcase", "title", "upper", @@ -1040,10 +1279,20 @@ def unicode_array(): "upper", ] +ONLY_IN_NP_CHAR = [ + "join", + "split", + "rsplit", + "splitlines" +] + @pytest.mark.parametrize("function_name", UNARY_FUNCTIONS) def test_unary(string_array, unicode_array, function_name): - func = getattr(np.char, function_name) + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) dtype = string_array.dtype sres = func(string_array) ures = func(unicode_array) @@ -1084,6 +1333,10 @@ def test_unary(string_array, unicode_array, function_name): with pytest.raises(ValueError): func(na_arr) return + if not (is_nan or is_str): + with pytest.raises(ValueError): + func(na_arr) + return res = func(na_arr) if is_nan and function_name in NAN_PRESERVING_FUNCTIONS: assert res[0] is dtype.na_object @@ -1100,32 +1353,41 @@ def test_unary(string_array, unicode_array, function_name): ("add", (None, None)), ("multiply", (None, 2)), ("mod", ("format: %s", None)), - pytest.param("center", (None, 25), marks=unicode_bug_fail), + ("center", (None, 25)), ("count", (None, "A")), ("encode", (None, "UTF-8")), ("endswith", (None, "lo")), ("find", (None, "A")), ("index", (None, "e")), ("join", ("-", None)), - pytest.param("ljust", (None, 12), marks=unicode_bug_fail), + ("ljust", (None, 12)), + ("lstrip", (None, "A")), ("partition", (None, "A")), ("replace", (None, "A", "B")), ("rfind", (None, "A")), ("rindex", (None, "e")), - pytest.param("rjust", (None, 12), marks=unicode_bug_fail), + ("rjust", (None, 12)), + ("rsplit", (None, "A")), + ("rstrip", (None, "A")), ("rpartition", (None, "A")), ("split", (None, "A")), + ("strip", (None, "A")), ("startswith", (None, "A")), - pytest.param("zfill", (None, 12), marks=unicode_bug_fail), + ("zfill", (None, 12)), ] PASSES_THROUGH_NAN_NULLS = [ "add", + "center", + "ljust", "multiply", "replace", + "rjust", "strip", "lstrip", "rstrip", + "replace" + "zfill", ] NULLS_ARE_FALSEY = [ @@ -1137,7 +1399,6 @@ def test_unary(string_array, unicode_array, function_name): "count", "find", "rfind", - "replace", ] SUPPORTS_NULLS = ( @@ -1167,10 +1428,13 @@ def call_func(func, args, array, sanitize=True): @pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS) def test_binary(string_array, unicode_array, function_name, args): - func = getattr(np.char, function_name) + if function_name in ONLY_IN_NP_CHAR: + func = getattr(np.char, function_name) + else: + func = getattr(np.strings, function_name) sres = call_func(func, args, string_array) ures = call_func(func, args, unicode_array, sanitize=False) - if sres.dtype == StringDType(): + if not isinstance(sres, tuple) and sres.dtype == StringDType(): ures = ures.astype(StringDType()) assert_array_equal(sres, ures) @@ -1205,7 +1469,28 @@ def test_binary(string_array, unicode_array, function_name, args): assert 0 -def test_strip(string_array, unicode_array): +@pytest.mark.parametrize("function, expected", [ + (np.strings.find, [[2, -1], [1, -1]]), + (np.strings.startswith, [[False, False], [True, False]])]) +@pytest.mark.parametrize("start, stop", [ + (1, 4), + (np.int8(1), np.int8(4)), + (np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))]) +def test_non_default_start_stop(function, start, stop, expected): + a = np.array([["--🐍--", "--🦜--"], + ["-🐍---", "-🦜---"]], "T") + indx = function(a, "🐍", start, stop) + assert_array_equal(indx, expected) + + +@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')]) +def test_replace_non_default_repeat(count): + a = np.array(["🐍--", "🦜-🦜-"], "T") + result = np.strings.replace(a, "🦜-", "🦜†", count) + assert_array_equal(result, np.array(["🐍--", "🦜†🦜†"], "T")) + + +def test_strip_ljust_rjust_consistency(string_array, unicode_array): rjs = np.char.rjust(string_array, 1000) rju = np.char.rjust(unicode_array, 1000) @@ -1233,6 +1518,47 @@ def test_strip(string_array, unicode_array): ) +def test_unset_na_coercion(): + # a dtype instance with an unset na object is compatible + # with a dtype that has one set + + # this test uses the "add" and "equal" ufunc but all ufuncs that + # accept more than one string argument and produce a string should + # behave this way + # TODO: generalize to more ufuncs + inp = ["hello", "world"] + arr = np.array(inp, dtype=StringDType(na_object=None)) + for op_dtype in [None, StringDType(), StringDType(coerce=False), + StringDType(na_object=None)]: + if op_dtype is None: + op = "2" + else: + op = np.array("2", dtype=op_dtype) + res = arr + op + assert_array_equal(res, ["hello2", "world2"]) + + # dtype instances with distinct explicitly set NA objects are incompatible + for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]: + op = np.array("2", dtype=op_dtype) + with pytest.raises(TypeError): + arr + op + + # comparisons only consider the na_object + for op_dtype in [None, StringDType(), StringDType(coerce=True), + StringDType(na_object=None)]: + if op_dtype is None: + op = inp + else: + op = np.array(inp, dtype=op_dtype) + assert_array_equal(arr, op) + + for op_dtype in [StringDType(na_object=pd_NA), + StringDType(na_object=np.nan)]: + op = np.array(inp, dtype=op_dtype) + with pytest.raises(TypeError): + arr == op + + class TestImplementation: """Check that strings are stored in the arena when possible. diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 53af38d6076a..a94b52939b1d 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -18,6 +18,7 @@ MAX = np.iinfo(np.int64).max +IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16) @pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS) def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym): @@ -462,13 +463,16 @@ def test_endswith(self, a, suffix, start, end, out, dt): ("xyxzx", "x", "yxzx"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["helloxyzzy", "hello"]), + (["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]), ]) def test_lstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) if chars is not None: chars = np.array(chars, dtype=dt) - out = np.array(out, dtype=dt) - assert_array_equal(np.strings.lstrip(a, chars), out) + assert_array_equal(np.strings.lstrip(a, chars), out) + else: + assert_array_equal(np.strings.lstrip(a), out) @pytest.mark.parametrize("a,chars,out", [ ("", None, ""), @@ -484,16 +488,20 @@ def test_lstrip(self, a, chars, out, dt): ("xyzzyhelloxyzzy", "xyz", "xyzzyhello"), ("hello", "xyz", "hello"), ("xyxz", "xyxz", ""), + (" ", None, ""), ("xyxzx", "x", "xyxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["xyzzyhello", "hello"]), + (["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]), ]) def test_rstrip(self, a, chars, out, dt): a = np.array(a, dtype=dt) + out = np.array(out, dtype=dt) if chars is not None: chars = np.array(chars, dtype=dt) - out = np.array(out, dtype=dt) - assert_array_equal(np.strings.rstrip(a, chars), out) + assert_array_equal(np.strings.rstrip(a, chars), out) + else: + assert_array_equal(np.strings.rstrip(a), out) @pytest.mark.parametrize("a,chars,out", [ ("", None, ""), @@ -510,6 +518,7 @@ def test_rstrip(self, a, chars, out, dt): ("xyxzx", "x", "yxz"), (["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"], ["hello", "hello"]), + (["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]), ]) def test_strip(self, a, chars, out, dt): a = np.array(a, dtype=dt) @@ -715,6 +724,140 @@ def test_expandtabs(self, buf, tabsize, res, dt): def test_expandtabs_raises_overflow(self, dt): with pytest.raises(OverflowError, match="new string is too long"): np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize) + np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61) + + FILL_ERROR = "The fill character must be exactly one character long" + + def test_center_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.center(buf, 10, fill) + + def test_ljust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.ljust(buf, 10, fill) + + def test_rjust_raises_multiple_character_fill(self, dt): + buf = np.array("abc", dtype=dt) + fill = np.array("**", dtype=dt) + with pytest.raises(TypeError, match=self.FILL_ERROR): + np.strings.rjust(buf, 10, fill) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc '), + ('abc', 6, ' ', ' abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', 10, '*', '***abc****'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', 'abc '), + ('abc', 6, ' ', 'abc '), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', 10, '*', 'abc*******'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('abc', 10, ' ', ' abc'), + ('abc', 6, ' ', ' abc'), + ('abc', 3, ' ', 'abc'), + ('abc', 2, ' ', 'abc'), + ('abc', 10, '*', '*******abc'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,res", [ + ('123', 2, '123'), + ('123', 3, '123'), + ('0123', 4, '0123'), + ('+123', 3, '+123'), + ('+123', 4, '+123'), + ('+123', 5, '+0123'), + ('+0123', 5, '+0123'), + ('-123', 3, '-123'), + ('-123', 4, '-123'), + ('-0123', 5, '-0123'), + ('000', 3, '000'), + ('34', 1, '34'), + ('0034', 4, '0034'), + ]) + def test_zfill(self, buf, width, res, dt): + buf = np.array(buf, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.zfill(buf, width), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the par", + "ti", "tion method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "http://www.python.org", "", ""), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "http://www.python.org", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "", "www.python.org", ""]), + ("mississippi", "ss", "mi", "ss", "issippi"), + ("mississippi", "i", "m", "i", "ssissippi"), + ("mississippi", "w", "mississippi", "", ""), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("this is the partition method", "ti", "this is the parti", + "ti", "on method"), + ("http://www.python.org", "://", "http", "://", "www.python.org"), + ("http://www.python.org", "?", "", "", "http://www.python.org"), + ("http://www.python.org", "http://", "", "http://", "www.python.org"), + ("http://www.python.org", "org", "http://www.python.", "org", ""), + ("http://www.python.org", ["://", "?", "http://", "org"], + ["http", "", "", "http://www.python."], + ["://", "", "http://", "org"], + ["www.python.org", "http://www.python.org", "www.python.org", ""]), + ("mississippi", "ss", "missi", "ss", "ippi"), + ("mississippi", "i", "mississipp", "i", ""), + ("mississippi", "w", "", "", "mississippi"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) @pytest.mark.parametrize("dt", ["U", "T"]) @@ -770,7 +913,7 @@ def test_replace_unicode(self, buf, old, new, count, res, dt): '\U00011066', '\U000104A0', pytest.param('\U0001F107', marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISALNUM", strict=True)), ]) @@ -787,7 +930,7 @@ def test_isalnum_unicode(self, in_, dt): ('\U0001F46F', False), ('\u2177', True), pytest.param('\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISLOWER", strict=True)), ('\U0001044E', True), @@ -805,7 +948,7 @@ def test_islower_unicode(self, in_, out, dt): ('\U0001F46F', False), ('\u2177', False), pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISUPPER", strict=True)), ('\U0001044E', False), @@ -818,12 +961,12 @@ def test_isupper_unicode(self, in_, out, dt): ('\u1FFc', True), ('Greek \u1FFcitlecases ...', True), pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISISTITLE", strict=True)), ('\U00010427\U0001044E', True), pytest.param('\U00010429', False, marks=pytest.mark.xfail( - sys.platform == 'win32' and IS_PYPY, + sys.platform == 'win32' and IS_PYPY_LT_7_3_16, reason="PYPY bug in Py_UNICODE_ISISTITLE", strict=True)), ('\U0001044E', False), @@ -856,6 +999,152 @@ def test_expandtabs(self, buf, res, dt): res = np.array(res, dtype=dt) assert_array_equal(np.strings.expandtabs(buf), res) + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', '\U0001044Ex\U0001044E'), + ('x', 4, '\U0001044E', '\U0001044Ex\U0001044E\U0001044E'), + ]) + def test_center(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.center(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', 'x\U0001044E'), + ('x', 3, '\U0001044E', 'x\U0001044E\U0001044E'), + ('x', 4, '\U0001044E', 'x\U0001044E\U0001044E\U0001044E'), + ]) + def test_ljust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.ljust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,width,fillchar,res", [ + ('x', 2, '\U0001044E', '\U0001044Ex'), + ('x', 3, '\U0001044E', '\U0001044E\U0001044Ex'), + ('x', 4, '\U0001044E', '\U0001044E\U0001044E\U0001044Ex'), + ]) + def test_rjust(self, buf, width, fillchar, res, dt): + buf = np.array(buf, dtype=dt) + fillchar = np.array(fillchar, dtype=dt) + res = np.array(res, dtype=dt) + assert_array_equal(np.strings.rjust(buf, width, fillchar), res) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "āāāāĀĀĀĀ", "", ""), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "", ""), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_partition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.partition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("buf,sep,res1,res2,res3", [ + ("āāāāĀĀĀĀ", "Ă", "", "", "āāāāĀĀĀĀ"), + ("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"), + ("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"), + ("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "", "", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"), + ("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"), + ]) + def test_rpartition(self, buf, sep, res1, res2, res3, dt): + buf = np.array(buf, dtype=dt) + sep = np.array(sep, dtype=dt) + res1 = np.array(res1, dtype=dt) + res2 = np.array(res2, dtype=dt) + res3 = np.array(res3, dtype=dt) + act1, act2, act3 = np.strings.rpartition(buf, sep) + assert_array_equal(act1, res1) + assert_array_equal(act2, res2) + assert_array_equal(act3, res3) + assert_array_equal(act1 + act2 + act3, buf) + + @pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"]) + @pytest.mark.parametrize( + "source,strip", + [ + ("λμ", "μ"), + ("λμ", "λ"), + ("λ"*5 + "μ"*2, "μ"), + ("λ" * 5 + "μ" * 2, "λ"), + ("λ" * 5 + "A" + "μ" * 2, "μλ"), + ("λμ" * 5, "μ"), + ("λμ" * 5, "λ"), + ]) + def test_strip_functions_unicode(self, source, strip, method, dt): + src_array = np.array([source], dtype=dt) + + npy_func = getattr(np.strings, method) + py_func = getattr(str, method) + + expected = np.array([py_func(source, strip)], dtype=dt) + actual = npy_func(src_array, strip) + + assert_array_equal(actual, expected) + + +class TestMixedTypeMethods: + def test_center(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("*😊*", dtype="U") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("*s*", dtype="S") + assert_array_equal(np.strings.center(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.center(buf, 3, fill) + + def test_ljust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("😊**", dtype="U") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("s**", dtype="S") + assert_array_equal(np.strings.ljust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.ljust(buf, 3, fill) + + def test_rjust(self): + buf = np.array("😊", dtype="U") + fill = np.array("*", dtype="S") + res = np.array("**😊", dtype="U") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + buf = np.array("s", dtype="S") + fill = np.array("*", dtype="U") + res = np.array("**s", dtype="S") + assert_array_equal(np.strings.rjust(buf, 3, fill), res) + + with pytest.raises(ValueError, match="'ascii' codec can't encode"): + buf = np.array("s", dtype="S") + fill = np.array("😊", dtype="U") + np.strings.rjust(buf, 3, fill) class TestUnicodeOnlyMethodsRaiseWithBytes: diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 6bdfde016cb2..26b6a1aa5c27 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -616,6 +616,31 @@ def call_ufunc(arr, **kwargs): expected = call_ufunc(arr.astype(np.float64)) # upcast assert_array_equal(expected, res) + @pytest.mark.parametrize("ufunc", [np.add, np.equal]) + def test_cast_safety_scalar(self, ufunc): + # We test add and equal, because equal has special scalar handling + # Note that the "equiv" casting behavior should maybe be considered + # a current implementation detail. + with pytest.raises(TypeError): + # this picks an integer loop, which is not safe + ufunc(3., 4., dtype=int, casting="safe") + + with pytest.raises(TypeError): + # We accept python float as float64 but not float32 for equiv. + ufunc(3., 4., dtype="float32", casting="equiv") + + # Special case for object and equal (note that equiv implies safe) + ufunc(3, 4, dtype=object, casting="equiv") + # Picks a double loop for both, first is equiv, second safe: + ufunc(np.array([3.]), 3., casting="equiv") + ufunc(np.array([3.]), 3, casting="safe") + ufunc(np.array([3]), 3, casting="equiv") + + def test_cast_safety_scalar_special(self): + # We allow this (and it succeeds) via object, although the equiv + # part may not be important. + np.equal(np.array([3]), 2**300, casting="equiv") + def test_true_divide(self): a = np.array(10) b = np.array(20) @@ -2652,8 +2677,51 @@ def test_nat_is_not_inf(self, nat): pass # ok, just not implemented +class TestGUFuncProcessCoreDims: + + def test_conv1d_full_without_out(self): + x = np.arange(5.0) + y = np.arange(13.0) + w = umt.conv1d_full(x, y) + assert_equal(w, np.convolve(x, y, mode='full')) + + def test_conv1d_full_with_out(self): + x = np.arange(5.0) + y = np.arange(13.0) + out = np.zeros(len(x) + len(y) - 1) + umt.conv1d_full(x, y, out=out) + assert_equal(out, np.convolve(x, y, mode='full')) + + def test_conv1d_full_basic_broadcast(self): + # x.shape is (3, 6) + x = np.array([[1, 3, 0, -10, 2, 2], + [0, -1, 2, 2, 10, 4], + [8, 9, 10, 2, 23, 3]]) + # y.shape is (2, 1, 7) + y = np.array([[[3, 4, 5, 20, 30, 40, 29]], + [[5, 6, 7, 10, 11, 12, -5]]]) + # result should have shape (2, 3, 12) + result = umt.conv1d_full(x, y) + assert result.shape == (2, 3, 12) + for i in range(2): + for j in range(3): + assert_equal(result[i, j], np.convolve(x[j], y[i, 0])) + + def test_bad_out_shape(self): + x = np.ones((1, 2)) + y = np.ones((2, 3)) + out = np.zeros((2, 3)) # Not the correct shape. + with pytest.raises(ValueError, match=r'does not equal m \+ n - 1'): + umt.conv1d_full(x, y, out=out) + + def test_bad_input_both_inputs_length_zero(self): + with pytest.raises(ValueError, + match='both inputs have core dimension 0'): + umt.conv1d_full([], []) + + @pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np) - if isinstance(getattr(np, x), np.ufunc)]) + if isinstance(getattr(np, x), np.ufunc)]) def test_ufunc_types(ufunc): ''' Check all ufuncs that the correct type is returned. Avoid @@ -2694,19 +2762,30 @@ def test_ufunc_noncontiguous(ufunc): continue inp, out = typ.split('->') args_c = [np.empty(6, t) for t in inp] + # non contiguous (3 step) args_n = [np.empty(18, t)[::3] for t in inp] - for a in args_c: - a.flat = range(1,7) - for a in args_n: + # alignment != itemsize is possible. So create an array with such + # an odd step manually. + args_o = [] + for t in inp: + orig_dt = np.dtype(t) + off_dt = f"S{orig_dt.alignment}" # offset by alignment + dtype = np.dtype([("_", off_dt), ("t", orig_dt)], align=False) + args_o.append(np.empty(6, dtype=dtype)["t"]) + + for a in args_c + args_n + args_o: a.flat = range(1,7) + with warnings.catch_warnings(record=True): warnings.filterwarnings("always") res_c = ufunc(*args_c) res_n = ufunc(*args_n) + res_o = ufunc(*args_o) if len(out) == 1: res_c = (res_c,) res_n = (res_n,) - for c_ar, n_ar in zip(res_c, res_n): + res_o = (res_o,) + for c_ar, n_ar, o_ar in zip(res_c, res_n, res_o): dt = c_ar.dtype if np.issubdtype(dt, np.floating): # for floating point results allow a small fuss in comparisons @@ -2715,8 +2794,10 @@ def test_ufunc_noncontiguous(ufunc): res_eps = np.finfo(dt).eps tol = 2*res_eps assert_allclose(res_c, res_n, atol=tol, rtol=tol) + assert_allclose(res_c, res_o, atol=tol, rtol=tol) else: assert_equal(c_ar, n_ar) + assert_equal(c_ar, o_ar) @pytest.mark.parametrize('ufunc', [np.sign, np.equal]) @@ -2972,6 +3053,13 @@ def test_resolve_dtypes_basic(self): with pytest.raises(TypeError): np.add.resolve_dtypes((i4, f4, None), casting="no") + def test_resolve_dtypes_comparison(self): + i4 = np.dtype("i4") + i8 = np.dtype("i8") + b = np.dtype("?") + r = np.equal.resolve_dtypes((i4, i8, None)) + assert r == (i8, i8, b) + def test_weird_dtypes(self): S0 = np.dtype("S0") # S0 is often converted by NumPy to S1, but not here: diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index e01e6dd6346b..9a300f19764c 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -18,7 +18,7 @@ assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings, _gen_alignment_data, assert_array_almost_equal_nulp, IS_WASM, IS_MUSL, - IS_PYPY + IS_PYPY, HAS_REFCOUNT ) from numpy.testing._private.utils import _glibc_older_than @@ -263,6 +263,17 @@ def __array_wrap__(self, arr, context=None, return_scalar=False): # Out argument must be tuple, since there are multiple outputs. r1, r2 = np.frexp(d, out=o1, subok=subok) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_out_wrap_no_leak(self): + # Regression test for gh-26545 + class ArrSubclass(np.ndarray): + pass + + arr = np.arange(10).view(ArrSubclass) + + arr *= 1 + assert sys.getrefcount(arr) == 2 + class TestComparisons: import operator @@ -1235,6 +1246,19 @@ def test_float_to_inf_power(self): r = np.array([1, 1, np.inf, 0, np.inf, 0, np.inf, 0], dt) assert_equal(np.power(a, b), r) + def test_power_fast_paths(self): + # gh-26055 + for dt in [np.float32, np.float64]: + a = np.array([0, 1.1, 2, 12e12, -10., np.inf, -np.inf], dt) + expected = np.array([0.0, 1.21, 4., 1.44e+26, 100, np.inf, np.inf]) + result = np.power(a, 2.) + assert_array_max_ulp(result, expected.astype(dt), maxulp=1) + + a = np.array([0, 1.1, 2, 12e12], dt) + expected = np.sqrt(a).astype(dt) + result = np.power(a, 0.5) + assert_array_max_ulp(result, expected, maxulp=1) + class TestFloat_power: def test_type_conversion(self): @@ -1380,6 +1404,36 @@ def test_log_strides(self): assert_array_almost_equal_nulp(np.log(x_f64[::jj]), y_true[::jj], nulp=2) assert_array_almost_equal_nulp(np.log(x_special[::jj]), y_special[::jj], nulp=2) + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(1 + 1e-12j, 5e-25 + 1e-12j), + (1.000000000000001 + 3e-08j, + 1.5602230246251546e-15 + 2.999999999999996e-08j), + (0.9999995000000417 + 0.0009999998333333417j, + 7.831475869017683e-18 + 0.001j), + (0.9999999999999996 + 2.999999999999999e-08j, + 5.9107901499372034e-18 + 3e-08j), + (0.99995000042 - 0.009999833j, + -7.015159763822903e-15 - 0.009999999665816696j)], + ) + def test_log_precision_float64(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-15) + + # Reference values were computed with mpmath, with mp.dps = 200. + @pytest.mark.parametrize( + 'z, wref', + [(np.complex64(1.0 + 3e-6j), np.complex64(4.5e-12+3e-06j)), + (np.complex64(1.0 - 2e-5j), np.complex64(1.9999999e-10 - 2e-5j)), + (np.complex64(0.9999999 + 1e-06j), + np.complex64(-1.192088e-07+1.0000001e-06j))], + ) + def test_log_precision_float32(self, z, wref): + w = np.log(z) + assert_allclose(w, wref, rtol=1e-6) + + class TestExp: def test_exp_values(self): x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024] @@ -1844,7 +1898,7 @@ def test_fpclass(self, stride): assert_equal(np.isnan(arr_f64[::stride]), nan[::stride]) assert_equal(np.isinf(arr_f32[::stride]), inf[::stride]) assert_equal(np.isinf(arr_f64[::stride]), inf[::stride]) - if platform.processor() == 'riscv64': + if platform.machine() == 'riscv64': # On RISC-V, many operations that produce NaNs, such as converting # a -NaN from f64 to f32, return a canonical NaN. The canonical # NaNs are always positive. See section 11.3 NaN Generation and @@ -1881,7 +1935,7 @@ def test_fp_noncontiguous(self, dtype): ncontig_out = out[1::3] contig_in = np.array(ncontig_in) - if platform.processor() == 'riscv64': + if platform.machine() == 'riscv64': # Disable the -np.nan signbit tests on riscv64. See comments in # test_fpclass for more details. data_rv = np.copy(data) @@ -1920,7 +1974,7 @@ def test_fp_noncontiguous(self, dtype): finite_split = np.array(np.array_split(finite, 2)) assert_equal(np.isnan(data_split), nan_split) assert_equal(np.isinf(data_split), inf_split) - if platform.processor() == 'riscv64': + if platform.machine() == 'riscv64': data_split_rv = np.array(np.array_split(data_rv, 2)) assert_equal(np.signbit(data_split_rv), sign_split) else: @@ -2753,10 +2807,6 @@ def test_reduction(self): def test_bitwise_count(self, input_dtype_obj, bitsize): input_dtype = input_dtype_obj.type - # bitwise_count is only in-built in 3.10+ - if sys.version_info < (3, 10) and input_dtype == np.object_: - pytest.skip("Required Python >=3.10") - for i in range(1, bitsize): num = 2**i - 1 msg = f"bitwise_count for {num}" @@ -2907,7 +2957,7 @@ def test_lower_align(self): def test_reduce_reorder(self): # gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus - # and put it before the call to an intrisic function that causes + # and put it before the call to an intrinsic function that causes # invalid status to be set. Also make sure warnings are not emitted for n in (2, 4, 8, 16, 32): for dt in (np.float32, np.float16, np.complex64): @@ -4084,6 +4134,14 @@ def test_huge_integers(self): assert_equal(np.gcd(a, b), [2**100, 2**50 * 3**5]) assert_equal(np.lcm(a, b), [2**100 * 3**5 * 5**7, 2**100 * 3**10]) + def test_inf_and_nan(self): + inf = np.array([np.inf], dtype=np.object_) + assert_raises(ValueError, np.gcd, inf, 1) + assert_raises(ValueError, np.gcd, 1, inf) + assert_raises(ValueError, np.gcd, np.nan, inf) + assert_raises(TypeError, np.gcd, 4, float(np.inf)) + + class TestRoundingFunctions: @@ -4120,6 +4178,15 @@ def test_fraction(self): assert_equal(np.ceil(f), -1) assert_equal(np.trunc(f), -1) + @pytest.mark.parametrize('func', [np.floor, np.ceil, np.trunc]) + @pytest.mark.parametrize('dtype', [np.bool, np.float64, np.float32, + np.int64, np.uint32]) + def test_output_dtype(self, func, dtype): + arr = np.array([-2, 0, 4, 8]).astype(dtype) + result = func(arr) + assert_equal(arr, result) + assert result.dtype == dtype + class TestComplexFunctions: funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh, diff --git a/numpy/_core/umath.py b/numpy/_core/umath.py index 7543560a65dd..8e51cd1694af 100644 --- a/numpy/_core/umath.py +++ b/numpy/_core/umath.py @@ -19,7 +19,8 @@ from ._multiarray_umath import ( _replace, _strip_whitespace, _lstrip_whitespace, _rstrip_whitespace, _strip_chars, _lstrip_chars, _rstrip_chars, _expandtabs_length, - _expandtabs) + _expandtabs, _center, _ljust, _rjust, _zfill, _partition, _partition_index, + _rpartition, _rpartition_index) __all__ = [ 'absolute', 'add', @@ -36,4 +37,4 @@ 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift', 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', - 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot'] + 'subtract', 'tan', 'tanh', 'true_divide', 'trunc'] diff --git a/numpy/_expired_attrs_2_0.py b/numpy/_expired_attrs_2_0.py index 1dad38c5a60f..06de514e35e4 100644 --- a/numpy/_expired_attrs_2_0.py +++ b/numpy/_expired_attrs_2_0.py @@ -75,4 +75,6 @@ "compare_chararrays": "It's still available as `np.char.compare_chararrays`.", "format_parser": "It's still available as `np.rec.format_parser`.", + "alltrue": "Use `np.all` instead.", + "sometrue": "Use `np.any` instead.", } diff --git a/numpy/_pyinstaller/hook-numpy.py b/numpy/_pyinstaller/hook-numpy.py index 0b3b46f2598a..84f3626b43d5 100644 --- a/numpy/_pyinstaller/hook-numpy.py +++ b/numpy/_pyinstaller/hook-numpy.py @@ -31,7 +31,6 @@ "pytest", "f2py", "setuptools", - "numpy.f2py", "distutils", "numpy.distutils", ] diff --git a/numpy/_pyinstaller/tests/__init__.py b/numpy/_pyinstaller/tests/__init__.py new file mode 100644 index 000000000000..f7c033bcf503 --- /dev/null +++ b/numpy/_pyinstaller/tests/__init__.py @@ -0,0 +1,16 @@ +from numpy.testing import IS_WASM, IS_EDITABLE +import pytest + + +if IS_WASM: + pytest.skip( + "WASM/Pyodide does not use or support Fortran", + allow_module_level=True + ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/numpy/_pyinstaller/pyinstaller-smoke.py b/numpy/_pyinstaller/tests/pyinstaller-smoke.py similarity index 100% rename from numpy/_pyinstaller/pyinstaller-smoke.py rename to numpy/_pyinstaller/tests/pyinstaller-smoke.py diff --git a/numpy/_pyinstaller/test_pyinstaller.py b/numpy/_pyinstaller/tests/test_pyinstaller.py similarity index 100% rename from numpy/_pyinstaller/test_pyinstaller.py rename to numpy/_pyinstaller/tests/test_pyinstaller.py diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 722d713a7076..758d1a5be5ea 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -136,7 +136,7 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]] + numpy.ndarray[typing.Any, numpy.dtype[+_ScalarType_co]] >>> print(npt.NDArray[np.float64]) numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]] diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 33255693806e..5cc501ab3ec5 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -2,7 +2,7 @@ import sys from collections.abc import Collection, Callable, Sequence -from typing import Any, Protocol, Union, TypeVar, runtime_checkable +from typing import Any, Protocol, TypeAlias, TypeVar, runtime_checkable import numpy as np from numpy import ( @@ -29,7 +29,7 @@ _DType = TypeVar("_DType", bound=dtype[Any]) _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any]) -NDArray = ndarray[Any, dtype[_ScalarType_co]] +NDArray: TypeAlias = ndarray[Any, dtype[_ScalarType_co]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned @@ -54,101 +54,102 @@ def __array_function__( # TODO: Wait until mypy supports recursive objects in combination with typevars -_FiniteNestedSequence = Union[ - _T, - Sequence[_T], - Sequence[Sequence[_T]], - Sequence[Sequence[Sequence[_T]]], - Sequence[Sequence[Sequence[Sequence[_T]]]], -] +_FiniteNestedSequence: TypeAlias = ( + _T + | Sequence[_T] + | Sequence[Sequence[_T]] + | Sequence[Sequence[Sequence[_T]]] + | Sequence[Sequence[Sequence[Sequence[_T]]]] +) # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic` -_ArrayLike = Union[ - _SupportsArray[dtype[_ScalarType]], - _NestedSequence[_SupportsArray[dtype[_ScalarType]]], -] +_ArrayLike: TypeAlias = ( + _SupportsArray[dtype[_ScalarType]] + | _NestedSequence[_SupportsArray[dtype[_ScalarType]]] +) # A union representing array-like objects; consists of two typevars: # One representing types that can be parametrized w.r.t. `np.dtype` # and another one for the rest -_DualArrayLike = Union[ - _SupportsArray[_DType], - _NestedSequence[_SupportsArray[_DType]], - _T, - _NestedSequence[_T], -] +_DualArrayLike: TypeAlias = ( + _SupportsArray[_DType] + | _NestedSequence[_SupportsArray[_DType]] + | _T + | _NestedSequence[_T] +) if sys.version_info >= (3, 12): from collections.abc import Buffer - ArrayLike = Buffer | _DualArrayLike[ + ArrayLike: TypeAlias = Buffer | _DualArrayLike[ dtype[Any], - Union[bool, int, float, complex, str, bytes], + bool | int | float | complex | str | bytes, ] else: - ArrayLike = _DualArrayLike[ + ArrayLike: TypeAlias = _DualArrayLike[ dtype[Any], - Union[bool, int, float, complex, str, bytes], + bool | int | float | complex | str | bytes, ] # `ArrayLike_co`: array-like objects that can be coerced into `X` # given the casting rules `same_kind` -_ArrayLikeBool_co = _DualArrayLike[ +_ArrayLikeBool_co: TypeAlias = _DualArrayLike[ dtype[np.bool], bool, ] -_ArrayLikeUInt_co = _DualArrayLike[ - dtype[Union[np.bool, unsignedinteger[Any]]], +_ArrayLikeUInt_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[unsignedinteger[Any]], bool, ] -_ArrayLikeInt_co = _DualArrayLike[ - dtype[Union[np.bool, integer[Any]]], - Union[bool, int], -] -_ArrayLikeFloat_co = _DualArrayLike[ - dtype[Union[np.bool, integer[Any], floating[Any]]], - Union[bool, int, float], -] -_ArrayLikeComplex_co = _DualArrayLike[ - dtype[Union[ - np.bool, - integer[Any], - floating[Any], - complexfloating[Any, Any], - ]], - Union[bool, int, float, complex], -] -_ArrayLikeNumber_co = _DualArrayLike[ - dtype[Union[np.bool, number[Any]]], - Union[bool, int, float, complex], -] -_ArrayLikeTD64_co = _DualArrayLike[ - dtype[Union[np.bool, integer[Any], timedelta64]], - Union[bool, int], -] -_ArrayLikeDT64_co = Union[ - _SupportsArray[dtype[datetime64]], - _NestedSequence[_SupportsArray[dtype[datetime64]]], -] -_ArrayLikeObject_co = Union[ - _SupportsArray[dtype[object_]], - _NestedSequence[_SupportsArray[dtype[object_]]], -] +_ArrayLikeInt_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[integer[Any]], + bool | int, +] +_ArrayLikeFloat_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[integer[Any]] | dtype[floating[Any]], + bool | int | float, +] +_ArrayLikeComplex_co: TypeAlias = _DualArrayLike[ + ( + dtype[np.bool] + | dtype[integer[Any]] + | dtype[floating[Any]] + | dtype[complexfloating[Any, Any]] + ), + bool | int | float | complex, +] +_ArrayLikeNumber_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[number[Any]], + bool | int | float | complex, +] +_ArrayLikeTD64_co: TypeAlias = _DualArrayLike[ + dtype[np.bool] | dtype[integer[Any]] | dtype[timedelta64], + bool | int, +] +_ArrayLikeDT64_co: TypeAlias = ( + _SupportsArray[dtype[datetime64]] + | _NestedSequence[_SupportsArray[dtype[datetime64]]] +) +_ArrayLikeObject_co: TypeAlias = ( + _SupportsArray[dtype[object_]] + | _NestedSequence[_SupportsArray[dtype[object_]]] +) -_ArrayLikeVoid_co = Union[ - _SupportsArray[dtype[void]], - _NestedSequence[_SupportsArray[dtype[void]]], -] -_ArrayLikeStr_co = _DualArrayLike[ +_ArrayLikeVoid_co: TypeAlias = ( + _SupportsArray[dtype[void]] + | _NestedSequence[_SupportsArray[dtype[void]]] +) +_ArrayLikeStr_co: TypeAlias = _DualArrayLike[ dtype[str_], str, ] -_ArrayLikeBytes_co = _DualArrayLike[ +_ArrayLikeBytes_co: TypeAlias = _DualArrayLike[ dtype[bytes_], bytes, ] -_ArrayLikeInt = _DualArrayLike[ +# NOTE: This includes `builtins.bool`, but not `numpy.bool`. +_ArrayLikeInt: TypeAlias = _DualArrayLike[ dtype[integer[Any]], int, ] @@ -157,11 +158,13 @@ def __array_function__( # Used as the first overload, should only match NDArray[Any], # not any actual types. # https://github.com/numpy/numpy/pull/22193 -class _UnknownType: - ... +if sys.version_info >= (3, 11): + from typing import Never as _UnknownType +else: + from typing import NoReturn as _UnknownType -_ArrayLikeUnknown = _DualArrayLike[ +_ArrayLikeUnknown: TypeAlias = _DualArrayLike[ dtype[_UnknownType], _UnknownType, ] diff --git a/numpy/_typing/_callable.pyi b/numpy/_typing/_callable.pyi index 843a0d07c2fb..2dd2233665fc 100644 --- a/numpy/_typing/_callable.pyi +++ b/numpy/_typing/_callable.pyi @@ -11,7 +11,9 @@ See the `Mypy documentation`_ on protocols for more details. from __future__ import annotations from typing import ( + TypeAlias, TypeVar, + final, overload, Any, NoReturn, @@ -48,7 +50,8 @@ _T1 = TypeVar("_T1") _T2 = TypeVar("_T2") _T1_contra = TypeVar("_T1_contra", contravariant=True) _T2_contra = TypeVar("_T2_contra", contravariant=True) -_2Tuple = tuple[_T1, _T1] + +_2Tuple: TypeAlias = tuple[_T1, _T1] _NBit1 = TypeVar("_NBit1", bound=NBitBase) _NBit2 = TypeVar("_NBit2", bound=NBitBase) @@ -317,20 +320,62 @@ class _ComplexOp(Protocol[_NBit1]): class _NumberOp(Protocol): def __call__(self, other: _NumberLike_co, /) -> Any: ... +@final class _SupportsLT(Protocol): - def __lt__(self, other: Any, /) -> object: ... + def __lt__(self, other: Any, /) -> Any: ... + +@final +class _SupportsLE(Protocol): + def __le__(self, other: Any, /) -> Any: ... +@final class _SupportsGT(Protocol): - def __gt__(self, other: Any, /) -> object: ... + def __gt__(self, other: Any, /) -> Any: ... -class _ComparisonOp(Protocol[_T1_contra, _T2_contra]): +@final +class _SupportsGE(Protocol): + def __ge__(self, other: Any, /) -> Any: ... + +@final +class _ComparisonOpLT(Protocol[_T1_contra, _T2_contra]): @overload def __call__(self, other: _T1_contra, /) -> np.bool: ... @overload def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... @overload - def __call__( - self, - other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT], - /, - ) -> Any: ... + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... + +@final +class _ComparisonOpLE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGE], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGE, /) -> np.bool: ... + +@final +class _ComparisonOpGT(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsLT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsLT, /) -> np.bool: ... + +@final +class _ComparisonOpGE(Protocol[_T1_contra, _T2_contra]): + @overload + def __call__(self, other: _T1_contra, /) -> np.bool: ... + @overload + def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _NestedSequence[_SupportsGT], /) -> NDArray[np.bool]: ... + @overload + def __call__(self, other: _SupportsGT, /) -> np.bool: ... diff --git a/numpy/_typing/_char_codes.py b/numpy/_typing/_char_codes.py index e5c4fa5d1bd2..1d36cc81e018 100644 --- a/numpy/_typing/_char_codes.py +++ b/numpy/_typing/_char_codes.py @@ -1,113 +1,141 @@ from typing import Literal -_BoolCodes = Literal["?", "=?", "?", "bool", "bool_"] +_BoolCodes = Literal["bool", "bool_", "?", "|?", "=?", "?"] -_UInt8Codes = Literal["uint8", "u1", "=u1", "u1"] -_UInt16Codes = Literal["uint16", "u2", "=u2", "u2"] -_UInt32Codes = Literal["uint32", "u4", "=u4", "u4"] -_UInt64Codes = Literal["uint64", "u8", "=u8", "u8"] +_UInt8Codes = Literal["uint8", "u1", "|u1", "=u1", "u1"] +_UInt16Codes = Literal["uint16", "u2", "|u2", "=u2", "u2"] +_UInt32Codes = Literal["uint32", "u4", "|u4", "=u4", "u4"] +_UInt64Codes = Literal["uint64", "u8", "|u8", "=u8", "u8"] -_Int8Codes = Literal["int8", "i1", "=i1", "i1"] -_Int16Codes = Literal["int16", "i2", "=i2", "i2"] -_Int32Codes = Literal["int32", "i4", "=i4", "i4"] -_Int64Codes = Literal["int64", "i8", "=i8", "i8"] +_Int8Codes = Literal["int8", "i1", "|i1", "=i1", "i1"] +_Int16Codes = Literal["int16", "i2", "|i2", "=i2", "i2"] +_Int32Codes = Literal["int32", "i4", "|i4", "=i4", "i4"] +_Int64Codes = Literal["int64", "i8", "|i8", "=i8", "i8"] -_Float16Codes = Literal["float16", "f2", "=f2", "f2"] -_Float32Codes = Literal["float32", "f4", "=f4", "f4"] -_Float64Codes = Literal["float64", "f8", "=f8", "f8"] +_Float16Codes = Literal["float16", "f2", "|f2", "=f2", "f2"] +_Float32Codes = Literal["float32", "f4", "|f4", "=f4", "f4"] +_Float64Codes = Literal["float64", "f8", "|f8", "=f8", "f8"] -_Complex64Codes = Literal["complex64", "c8", "=c8", "c8"] -_Complex128Codes = Literal["complex128", "c16", "=c16", "c16"] +_Complex64Codes = Literal["complex64", "c8", "|c8", "=c8", "c8"] +_Complex128Codes = Literal["complex128", "c16", "|c16", "=c16", "c16"] -_ByteCodes = Literal["byte", "b", "=b", "b"] -_ShortCodes = Literal["short", "h", "=h", "h"] -_IntCCodes = Literal["intc", "i", "=i", "i"] -_IntPCodes = Literal["intp", "int", "int_", "n", "=n", "n"] -_LongCodes = Literal["long", "l", "=l", "l"] +_ByteCodes = Literal["byte", "b", "|b", "=b", "b"] +_ShortCodes = Literal["short", "h", "|h", "=h", "h"] +_IntCCodes = Literal["intc", "i", "|i", "=i", "i"] +_IntPCodes = Literal["intp", "int", "int_", "n", "|n", "=n", "n"] +_LongCodes = Literal["long", "l", "|l", "=l", "l"] _IntCodes = _IntPCodes -_LongLongCodes = Literal["longlong", "q", "=q", "q"] +_LongLongCodes = Literal["longlong", "q", "|q", "=q", "q"] -_UByteCodes = Literal["ubyte", "B", "=B", "B"] -_UShortCodes = Literal["ushort", "H", "=H", "H"] -_UIntCCodes = Literal["uintc", "I", "=I", "I"] -_UIntPCodes = Literal["uintp", "uint", "N", "=N", "N"] -_ULongCodes = Literal["ulong", "L", "=L", "L"] +_UByteCodes = Literal["ubyte", "B", "|B", "=B", "B"] +_UShortCodes = Literal["ushort", "H", "|H", "=H", "H"] +_UIntCCodes = Literal["uintc", "I", "|I", "=I", "I"] +_UIntPCodes = Literal["uintp", "uint", "N", "|N", "=N", "N"] +_ULongCodes = Literal["ulong", "L", "|L", "=L", "L"] _UIntCodes = _UIntPCodes -_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"] +_ULongLongCodes = Literal["ulonglong", "Q", "|Q", "=Q", "Q"] -_HalfCodes = Literal["half", "e", "=e", "e"] -_SingleCodes = Literal["single", "f", "=f", "f"] -_DoubleCodes = Literal["double", "float", "d", "=d", "d"] -_LongDoubleCodes = Literal["longdouble", "g", "=g", "g"] +_HalfCodes = Literal["half", "e", "|e", "=e", "e"] +_SingleCodes = Literal["single", "f", "|f", "=f", "f"] +_DoubleCodes = Literal["double", "float", "d", "|d", "=d", "d"] +_LongDoubleCodes = Literal["longdouble", "g", "|g", "=g", "g"] -_CSingleCodes = Literal["csingle", "F", "=F", "F"] -_CDoubleCodes = Literal["cdouble", "complex", "D", "=D", "D"] -_CLongDoubleCodes = Literal["clongdouble", "G", "=G", "G"] +_CSingleCodes = Literal["csingle", "F", "|F", "=F", "F"] +_CDoubleCodes = Literal["cdouble", "complex", "D", "|D", "=D", "D"] +_CLongDoubleCodes = Literal["clongdouble", "G", "|G", "=G", "G"] -_StrCodes = Literal["str", "str_", "unicode", "U", "=U", "U"] -_BytesCodes = Literal["bytes", "bytes_", "S", "=S", "S"] -_VoidCodes = Literal["void", "V", "=V", "V"] -_ObjectCodes = Literal["object", "object_", "O", "=O", "O"] +_StrCodes = Literal["str", "str_", "unicode", "U", "|U", "=U", "U"] +_BytesCodes = Literal["bytes", "bytes_", "S", "|S", "=S", "S"] +_VoidCodes = Literal["void", "V", "|V", "=V", "V"] +_ObjectCodes = Literal["object", "object_", "O", "|O", "=O", "O"] _DT64Codes = Literal[ - "datetime64", "=datetime64", "datetime64", - "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]", - "datetime64[M]", "=datetime64[M]", "datetime64[M]", - "datetime64[W]", "=datetime64[W]", "datetime64[W]", - "datetime64[D]", "=datetime64[D]", "datetime64[D]", - "datetime64[h]", "=datetime64[h]", "datetime64[h]", - "datetime64[m]", "=datetime64[m]", "datetime64[m]", - "datetime64[s]", "=datetime64[s]", "datetime64[s]", - "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]", - "datetime64[us]", "=datetime64[us]", "datetime64[us]", - "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]", - "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]", - "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]", - "datetime64[as]", "=datetime64[as]", "datetime64[as]", - "M", "=M", "M", - "M8", "=M8", "M8", - "M8[Y]", "=M8[Y]", "M8[Y]", - "M8[M]", "=M8[M]", "M8[M]", - "M8[W]", "=M8[W]", "M8[W]", - "M8[D]", "=M8[D]", "M8[D]", - "M8[h]", "=M8[h]", "M8[h]", - "M8[m]", "=M8[m]", "M8[m]", - "M8[s]", "=M8[s]", "M8[s]", - "M8[ms]", "=M8[ms]", "M8[ms]", - "M8[us]", "=M8[us]", "M8[us]", - "M8[ns]", "=M8[ns]", "M8[ns]", - "M8[ps]", "=M8[ps]", "M8[ps]", - "M8[fs]", "=M8[fs]", "M8[fs]", - "M8[as]", "=M8[as]", "M8[as]", + "datetime64", "|datetime64", "=datetime64", + "datetime64", + "datetime64[Y]", "|datetime64[Y]", "=datetime64[Y]", + "datetime64[Y]", + "datetime64[M]", "|datetime64[M]", "=datetime64[M]", + "datetime64[M]", + "datetime64[W]", "|datetime64[W]", "=datetime64[W]", + "datetime64[W]", + "datetime64[D]", "|datetime64[D]", "=datetime64[D]", + "datetime64[D]", + "datetime64[h]", "|datetime64[h]", "=datetime64[h]", + "datetime64[h]", + "datetime64[m]", "|datetime64[m]", "=datetime64[m]", + "datetime64[m]", + "datetime64[s]", "|datetime64[s]", "=datetime64[s]", + "datetime64[s]", + "datetime64[ms]", "|datetime64[ms]", "=datetime64[ms]", + "datetime64[ms]", + "datetime64[us]", "|datetime64[us]", "=datetime64[us]", + "datetime64[us]", + "datetime64[ns]", "|datetime64[ns]", "=datetime64[ns]", + "datetime64[ns]", + "datetime64[ps]", "|datetime64[ps]", "=datetime64[ps]", + "datetime64[ps]", + "datetime64[fs]", "|datetime64[fs]", "=datetime64[fs]", + "datetime64[fs]", + "datetime64[as]", "|datetime64[as]", "=datetime64[as]", + "datetime64[as]", + "M", "|M", "=M", "M", + "M8", "|M8", "=M8", "M8", + "M8[Y]", "|M8[Y]", "=M8[Y]", "M8[Y]", + "M8[M]", "|M8[M]", "=M8[M]", "M8[M]", + "M8[W]", "|M8[W]", "=M8[W]", "M8[W]", + "M8[D]", "|M8[D]", "=M8[D]", "M8[D]", + "M8[h]", "|M8[h]", "=M8[h]", "M8[h]", + "M8[m]", "|M8[m]", "=M8[m]", "M8[m]", + "M8[s]", "|M8[s]", "=M8[s]", "M8[s]", + "M8[ms]", "|M8[ms]", "=M8[ms]", "M8[ms]", + "M8[us]", "|M8[us]", "=M8[us]", "M8[us]", + "M8[ns]", "|M8[ns]", "=M8[ns]", "M8[ns]", + "M8[ps]", "|M8[ps]", "=M8[ps]", "M8[ps]", + "M8[fs]", "|M8[fs]", "=M8[fs]", "M8[fs]", + "M8[as]", "|M8[as]", "=M8[as]", "M8[as]", ] _TD64Codes = Literal[ - "timedelta64", "=timedelta64", "timedelta64", - "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]", - "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]", - "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]", - "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]", - "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]", - "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]", - "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]", - "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]", - "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]", - "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]", - "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]", - "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]", - "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]", - "m", "=m", "m", - "m8", "=m8", "m8", - "m8[Y]", "=m8[Y]", "m8[Y]", - "m8[M]", "=m8[M]", "m8[M]", - "m8[W]", "=m8[W]", "m8[W]", - "m8[D]", "=m8[D]", "m8[D]", - "m8[h]", "=m8[h]", "m8[h]", - "m8[m]", "=m8[m]", "m8[m]", - "m8[s]", "=m8[s]", "m8[s]", - "m8[ms]", "=m8[ms]", "m8[ms]", - "m8[us]", "=m8[us]", "m8[us]", - "m8[ns]", "=m8[ns]", "m8[ns]", - "m8[ps]", "=m8[ps]", "m8[ps]", - "m8[fs]", "=m8[fs]", "m8[fs]", - "m8[as]", "=m8[as]", "m8[as]", + "timedelta64", "|timedelta64", "=timedelta64", + "timedelta64", + "timedelta64[Y]", "|timedelta64[Y]", "=timedelta64[Y]", + "timedelta64[Y]", + "timedelta64[M]", "|timedelta64[M]", "=timedelta64[M]", + "timedelta64[M]", + "timedelta64[W]", "|timedelta64[W]", "=timedelta64[W]", + "timedelta64[W]", + "timedelta64[D]", "|timedelta64[D]", "=timedelta64[D]", + "timedelta64[D]", + "timedelta64[h]", "|timedelta64[h]", "=timedelta64[h]", + "timedelta64[h]", + "timedelta64[m]", "|timedelta64[m]", "=timedelta64[m]", + "timedelta64[m]", + "timedelta64[s]", "|timedelta64[s]", "=timedelta64[s]", + "timedelta64[s]", + "timedelta64[ms]", "|timedelta64[ms]", "=timedelta64[ms]", + "timedelta64[ms]", + "timedelta64[us]", "|timedelta64[us]", "=timedelta64[us]", + "timedelta64[us]", + "timedelta64[ns]", "|timedelta64[ns]", "=timedelta64[ns]", + "timedelta64[ns]", + "timedelta64[ps]", "|timedelta64[ps]", "=timedelta64[ps]", + "timedelta64[ps]", + "timedelta64[fs]", "|timedelta64[fs]", "=timedelta64[fs]", + "timedelta64[fs]", + "timedelta64[as]", "|timedelta64[as]", "=timedelta64[as]", + "timedelta64[as]", + "m", "|m", "=m", "m", + "m8", "|m8", "=m8", "m8", + "m8[Y]", "|m8[Y]", "=m8[Y]", "m8[Y]", + "m8[M]", "|m8[M]", "=m8[M]", "m8[M]", + "m8[W]", "|m8[W]", "=m8[W]", "m8[W]", + "m8[D]", "|m8[D]", "=m8[D]", "m8[D]", + "m8[h]", "|m8[h]", "=m8[h]", "m8[h]", + "m8[m]", "|m8[m]", "=m8[m]", "m8[m]", + "m8[s]", "|m8[s]", "=m8[s]", "m8[s]", + "m8[ms]", "|m8[ms]", "=m8[ms]", "m8[ms]", + "m8[us]", "|m8[us]", "=m8[us]", "m8[us]", + "m8[ns]", "|m8[ns]", "=m8[ns]", "m8[ns]", + "m8[ps]", "|m8[ps]", "=m8[ps]", "m8[ps]", + "m8[fs]", "|m8[fs]", "=m8[fs]", "m8[fs]", + "m8[as]", "|m8[as]", "=m8[as]", "m8[as]", ] diff --git a/numpy/_typing/_dtype_like.py b/numpy/_typing/_dtype_like.py index 73a5f7d7b5a7..b68b5337219d 100644 --- a/numpy/_typing/_dtype_like.py +++ b/numpy/_typing/_dtype_like.py @@ -1,8 +1,7 @@ from collections.abc import Sequence from typing import ( Any, - Sequence, - Union, + TypeAlias, TypeVar, Protocol, TypedDict, @@ -60,7 +59,7 @@ _SCT = TypeVar("_SCT", bound=np.generic) _DType_co = TypeVar("_DType_co", covariant=True, bound=np.dtype[Any]) -_DTypeLikeNested = Any # TODO: wait for support for recursive types +_DTypeLikeNested: TypeAlias = Any # TODO: wait for support for recursive types # Mandatory keys @@ -87,164 +86,164 @@ def dtype(self) -> _DType_co: ... # A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic` -_DTypeLike = Union[ - np.dtype[_SCT], - type[_SCT], - _SupportsDType[np.dtype[_SCT]], -] +_DTypeLike: TypeAlias = ( + np.dtype[_SCT] + | type[_SCT] + | _SupportsDType[np.dtype[_SCT]] +) # Would create a dtype[np.void] -_VoidDTypeLike = Union[ +_VoidDTypeLike: TypeAlias = ( # (flexible_dtype, itemsize) - tuple[_DTypeLikeNested, int], + tuple[_DTypeLikeNested, int] # (fixed_dtype, shape) - tuple[_DTypeLikeNested, _ShapeLike], + | tuple[_DTypeLikeNested, _ShapeLike] # [(field_name, field_dtype, field_shape), ...] # # The type here is quite broad because NumPy accepts quite a wide # range of inputs inside the list; see the tests for some # examples. - list[Any], + | list[Any] # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ..., # 'itemsize': ...} - _DTypeDict, + | _DTypeDict # (base_dtype, new_dtype) - tuple[_DTypeLikeNested, _DTypeLikeNested], -] + | tuple[_DTypeLikeNested, _DTypeLikeNested] +) # Anything that can be coerced into numpy.dtype. # Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html -DTypeLike = Union[ - np.dtype[Any], +DTypeLike: TypeAlias = ( + np.dtype[Any] # default data type (float64) - None, + | None # array-scalar types and generic types - type[Any], # NOTE: We're stuck with `type[Any]` due to object dtypes + | type[Any] # NOTE: We're stuck with `type[Any]` due to object dtypes # anything with a dtype attribute - _SupportsDType[np.dtype[Any]], + | _SupportsDType[np.dtype[Any]] # character codes, type strings or comma-separated fields, e.g., 'float64' - str, - _VoidDTypeLike, -] + | str + | _VoidDTypeLike +) # NOTE: while it is possible to provide the dtype as a dict of # dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`), # this syntax is officially discourged and -# therefore not included in the Union defining `DTypeLike`. +# therefore not included in the type-union defining `DTypeLike`. # # See https://github.com/numpy/numpy/issues/16891 for more details. # Aliases for commonly used dtype-like objects. # Note that the precision of `np.number` subclasses is ignored herein. -_DTypeLikeBool = Union[ - type[bool], - type[np.bool], - np.dtype[np.bool], - _SupportsDType[np.dtype[np.bool]], - _BoolCodes, -] -_DTypeLikeUInt = Union[ - type[np.unsignedinteger], - np.dtype[np.unsignedinteger], - _SupportsDType[np.dtype[np.unsignedinteger]], - _UInt8Codes, - _UInt16Codes, - _UInt32Codes, - _UInt64Codes, - _UByteCodes, - _UShortCodes, - _UIntCCodes, - _LongCodes, - _ULongLongCodes, - _UIntPCodes, - _UIntCodes, -] -_DTypeLikeInt = Union[ - type[int], - type[np.signedinteger], - np.dtype[np.signedinteger], - _SupportsDType[np.dtype[np.signedinteger]], - _Int8Codes, - _Int16Codes, - _Int32Codes, - _Int64Codes, - _ByteCodes, - _ShortCodes, - _IntCCodes, - _LongCodes, - _LongLongCodes, - _IntPCodes, - _IntCodes, -] -_DTypeLikeFloat = Union[ - type[float], - type[np.floating], - np.dtype[np.floating], - _SupportsDType[np.dtype[np.floating]], - _Float16Codes, - _Float32Codes, - _Float64Codes, - _HalfCodes, - _SingleCodes, - _DoubleCodes, - _LongDoubleCodes, -] -_DTypeLikeComplex = Union[ - type[complex], - type[np.complexfloating], - np.dtype[np.complexfloating], - _SupportsDType[np.dtype[np.complexfloating]], - _Complex64Codes, - _Complex128Codes, - _CSingleCodes, - _CDoubleCodes, - _CLongDoubleCodes, -] -_DTypeLikeDT64 = Union[ - type[np.timedelta64], - np.dtype[np.timedelta64], - _SupportsDType[np.dtype[np.timedelta64]], - _TD64Codes, -] -_DTypeLikeTD64 = Union[ - type[np.datetime64], - np.dtype[np.datetime64], - _SupportsDType[np.dtype[np.datetime64]], - _DT64Codes, -] -_DTypeLikeStr = Union[ - type[str], - type[np.str_], - np.dtype[np.str_], - _SupportsDType[np.dtype[np.str_]], - _StrCodes, -] -_DTypeLikeBytes = Union[ - type[bytes], - type[np.bytes_], - np.dtype[np.bytes_], - _SupportsDType[np.dtype[np.bytes_]], - _BytesCodes, -] -_DTypeLikeVoid = Union[ - type[np.void], - np.dtype[np.void], - _SupportsDType[np.dtype[np.void]], - _VoidCodes, - _VoidDTypeLike, -] -_DTypeLikeObject = Union[ - type, - np.dtype[np.object_], - _SupportsDType[np.dtype[np.object_]], - _ObjectCodes, -] - -_DTypeLikeComplex_co = Union[ - _DTypeLikeBool, - _DTypeLikeUInt, - _DTypeLikeInt, - _DTypeLikeFloat, - _DTypeLikeComplex, -] +_DTypeLikeBool: TypeAlias = ( + type[bool] + | type[np.bool] + | np.dtype[np.bool] + | _SupportsDType[np.dtype[np.bool]] + | _BoolCodes +) +_DTypeLikeUInt: TypeAlias = ( + type[np.unsignedinteger] + | np.dtype[np.unsignedinteger] + | _SupportsDType[np.dtype[np.unsignedinteger]] + | _UInt8Codes + | _UInt16Codes + | _UInt32Codes + | _UInt64Codes + | _UByteCodes + | _UShortCodes + | _UIntCCodes + | _LongCodes + | _ULongLongCodes + | _UIntPCodes + | _UIntCodes +) +_DTypeLikeInt: TypeAlias = ( + type[int] + | type[np.signedinteger] + | np.dtype[np.signedinteger] + | _SupportsDType[np.dtype[np.signedinteger]] + | _Int8Codes + | _Int16Codes + | _Int32Codes + | _Int64Codes + | _ByteCodes + | _ShortCodes + | _IntCCodes + | _LongCodes + | _LongLongCodes + | _IntPCodes + | _IntCodes +) +_DTypeLikeFloat: TypeAlias = ( + type[float] + | type[np.floating] + | np.dtype[np.floating] + | _SupportsDType[np.dtype[np.floating]] + | _Float16Codes + | _Float32Codes + | _Float64Codes + | _HalfCodes + | _SingleCodes + | _DoubleCodes + | _LongDoubleCodes +) +_DTypeLikeComplex: TypeAlias = ( + type[complex] + | type[np.complexfloating] + | np.dtype[np.complexfloating] + | _SupportsDType[np.dtype[np.complexfloating]] + | _Complex64Codes + | _Complex128Codes + | _CSingleCodes + | _CDoubleCodes + | _CLongDoubleCodes +) +_DTypeLikeDT64: TypeAlias = ( + type[np.timedelta64] + | np.dtype[np.timedelta64] + | _SupportsDType[np.dtype[np.timedelta64]] + | _TD64Codes +) +_DTypeLikeTD64: TypeAlias = ( + type[np.datetime64] + | np.dtype[np.datetime64] + | _SupportsDType[np.dtype[np.datetime64]] + | _DT64Codes +) +_DTypeLikeStr: TypeAlias = ( + type[str] + | type[np.str_] + | np.dtype[np.str_] + | _SupportsDType[np.dtype[np.str_]] + | _StrCodes +) +_DTypeLikeBytes: TypeAlias = ( + type[bytes] + | type[np.bytes_] + | np.dtype[np.bytes_] + | _SupportsDType[np.dtype[np.bytes_]] + | _BytesCodes +) +_DTypeLikeVoid: TypeAlias = ( + type[np.void] + | np.dtype[np.void] + | _SupportsDType[np.dtype[np.void]] + | _VoidCodes + | _VoidDTypeLike +) +_DTypeLikeObject: TypeAlias = ( + type + | np.dtype[np.object_] + | _SupportsDType[np.dtype[np.object_]] + | _ObjectCodes +) + +_DTypeLikeComplex_co: TypeAlias = ( + _DTypeLikeBool + | _DTypeLikeUInt + | _DTypeLikeInt + | _DTypeLikeFloat + | _DTypeLikeComplex +) diff --git a/numpy/_typing/_scalars.py b/numpy/_typing/_scalars.py index b9274e867c83..97316d0209ba 100644 --- a/numpy/_typing/_scalars.py +++ b/numpy/_typing/_scalars.py @@ -1,30 +1,27 @@ -from typing import Union, Any +from typing import Any, TypeAlias import numpy as np # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and # `np.bytes_` are already subclasses of their builtin counterpart -_CharLike_co = Union[str, bytes] +_CharLike_co: TypeAlias = str | bytes # The 6 `Like_co` type-aliases below represent all scalars that can be # coerced into `` (with the casting rule `same_kind`) -_BoolLike_co = Union[bool, np.bool] -_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]] -_IntLike_co = Union[_BoolLike_co, int, np.integer[Any]] -_FloatLike_co = Union[_IntLike_co, float, np.floating[Any]] -_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]] -_TD64Like_co = Union[_IntLike_co, np.timedelta64] +_BoolLike_co: TypeAlias = bool | np.bool +_UIntLike_co: TypeAlias = np.unsignedinteger[Any] | _BoolLike_co +_IntLike_co: TypeAlias = int | np.integer[Any] | _BoolLike_co +_FloatLike_co: TypeAlias = float | np.floating[Any] | _IntLike_co +_ComplexLike_co: TypeAlias = ( + complex + | np.complexfloating[Any, Any] + | _FloatLike_co +) +_TD64Like_co: TypeAlias = np.timedelta64 | _IntLike_co -_NumberLike_co = Union[int, float, complex, np.number[Any], np.bool] -_ScalarLike_co = Union[ - int, - float, - complex, - str, - bytes, - np.generic, -] +_NumberLike_co: TypeAlias = int | float | complex | np.number[Any] | np.bool +_ScalarLike_co: TypeAlias = int | float | complex | str | bytes | np.generic # `_VoidLike_co` is technically not a scalar, but it's close enough -_VoidLike_co = Union[tuple[Any, ...], np.void] +_VoidLike_co: TypeAlias = tuple[Any, ...] | np.void diff --git a/numpy/_typing/_shape.py b/numpy/_typing/_shape.py index 4f1204e47c6a..2b854d65153a 100644 --- a/numpy/_typing/_shape.py +++ b/numpy/_typing/_shape.py @@ -1,7 +1,7 @@ from collections.abc import Sequence -from typing import Union, SupportsIndex +from typing import SupportsIndex, TypeAlias -_Shape = tuple[int, ...] +_Shape: TypeAlias = tuple[int, ...] # Anything that can be coerced to a shape tuple -_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]] +_ShapeLike: TypeAlias = SupportsIndex | Sequence[SupportsIndex] diff --git a/numpy/_typing/_ufunc.pyi b/numpy/_typing/_ufunc.pyi index f693341b521c..9495321e2c20 100644 --- a/numpy/_typing/_ufunc.pyi +++ b/numpy/_typing/_ufunc.pyi @@ -15,7 +15,9 @@ from typing import ( Literal, SupportsIndex, Protocol, + NoReturn, ) +from typing_extensions import LiteralString from numpy import ufunc, _CastingKind, _OrderKACF from numpy.typing import NDArray @@ -30,9 +32,10 @@ _2Tuple = tuple[_T, _T] _3Tuple = tuple[_T, _T, _T] _4Tuple = tuple[_T, _T, _T, _T] -_NTypes = TypeVar("_NTypes", bound=int) -_IDType = TypeVar("_IDType", bound=Any) -_NameType = TypeVar("_NameType", bound=str) +_NTypes = TypeVar("_NTypes", bound=int, covariant=True) +_IDType = TypeVar("_IDType", covariant=True) +_NameType = TypeVar("_NameType", bound=LiteralString, covariant=True) +_Signature = TypeVar("_Signature", bound=LiteralString, covariant=True) class _SupportsArrayUFunc(Protocol): @@ -47,10 +50,10 @@ class _SupportsArrayUFunc(Protocol): # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for # ufuncs that don't accept two input arguments and return one output argument. -# In such cases the respective methods are simply typed as `None`. +# In such cases the respective methods return `NoReturn` # NOTE: Similarly, `at` won't be defined for ufuncs that return -# multiple outputs; in such cases `at` is typed as `None` +# multiple outputs; in such cases `at` is typed to return `NoReturn` # NOTE: If 2 output types are returned then `out` must be a # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable @@ -70,14 +73,6 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[2]: ... @property def signature(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( @@ -126,6 +121,12 @@ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i /, ) -> None: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + + class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @@ -252,16 +253,6 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[3]: ... @property def signature(self) -> None: ... - @property - def at(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( @@ -308,6 +299,12 @@ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _3Tuple[None | str] = ..., ) -> _2Tuple[Any]: ... + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @@ -323,16 +320,6 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i def nargs(self) -> Literal[4]: ... @property def signature(self) -> None: ... - @property - def at(self) -> None: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... @overload def __call__( @@ -366,7 +353,13 @@ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: i signature: str | _4Tuple[None | str] = ..., ) -> _2Tuple[NDArray[Any]]: ... -class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc] + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... + +class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType, _Signature]): # type: ignore[misc] @property def __name__(self) -> _NameType: ... @property @@ -379,21 +372,8 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: def nout(self) -> Literal[1]: ... @property def nargs(self) -> Literal[3]: ... - - # NOTE: In practice the only gufunc in the main namespace is `matmul`, - # so we can use its signature here @property - def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ... - @property - def reduce(self) -> None: ... - @property - def accumulate(self) -> None: ... - @property - def reduceat(self) -> None: ... - @property - def outer(self) -> None: ... - @property - def at(self) -> None: ... + def signature(self) -> _Signature: ... # Scalar for 1D array-likes; ndarray otherwise @overload @@ -424,3 +404,9 @@ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: signature: str | _3Tuple[None | str] = ..., axes: list[_2Tuple[SupportsIndex]] = ..., ) -> NDArray[Any]: ... + + def at(self, *args, **kwargs) -> NoReturn: ... + def reduce(self, *args, **kwargs) -> NoReturn: ... + def accumulate(self, *args, **kwargs) -> NoReturn: ... + def reduceat(self, *args, **kwargs) -> NoReturn: ... + def outer(self, *args, **kwargs) -> NoReturn: ... diff --git a/numpy/conftest.py b/numpy/conftest.py index a6c329790e16..677537e206f0 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -2,13 +2,23 @@ Pytest configuration and fixtures for the Numpy test suite. """ import os +import sys import tempfile +from contextlib import contextmanager +import warnings import hypothesis import pytest import numpy from numpy._core._multiarray_tests import get_fpu_mode +from numpy.testing._private.utils import NOGIL_BUILD + +try: + from scipy_doctest.conftest import dt_config + HAVE_SCPDT = True +except ModuleNotFoundError: + HAVE_SCPDT = False _old_fpu_mode = None @@ -64,12 +74,31 @@ def pytest_addoption(parser): "automatically.")) +gil_enabled_at_start = True +if NOGIL_BUILD: + gil_enabled_at_start = sys._is_gil_enabled() + + def pytest_sessionstart(session): available_mem = session.config.getoption('available_memory') if available_mem is not None: os.environ['NPY_AVAILABLE_MEM'] = available_mem +def pytest_terminal_summary(terminalreporter, exitstatus, config): + if NOGIL_BUILD and not gil_enabled_at_start and sys._is_gil_enabled(): + tr = terminalreporter + tr.ensure_newline() + tr.section("GIL re-enabled", sep="=", red=True, bold=True) + tr.line("The GIL was re-enabled at runtime during the tests.") + tr.line("This can happen with no test failures if the RuntimeWarning") + tr.line("raised by Python when this happens is filtered by a test.") + tr.line("") + tr.line("Please ensure all new C modules declare support for running") + tr.line("without the GIL. Any new tests that intentionally imports ") + tr.line("code that re-enables the GIL should do so in a subprocess.") + pytest.exit("GIL re-enabled during tests", returncode=1) + #FIXME when yield tests are gone. @pytest.hookimpl() def pytest_itemcollected(item): @@ -136,3 +165,83 @@ def weak_promotion(request): yield request.param numpy._set_promotion_state(state) + + +if HAVE_SCPDT: + + @contextmanager + def warnings_errors_and_rng(test=None): + """Filter out the wall of DeprecationWarnings. + """ + msgs = ["The numpy.linalg.linalg", + "The numpy.fft.helper", + "dep_util", + "pkg_resources", + "numpy.core.umath", + "msvccompiler", + "Deprecated call", + "numpy.core", + "`np.compat`", + "Importing from numpy.matlib", + "This function is deprecated.", # random_integers + "Data type alias 'a'", # numpy.rec.fromfile + "Arrays of 2-dimensional vectors", # matlib.cross + "`in1d` is deprecated", ] + msg = "|".join(msgs) + + msgs_r = [ + "invalid value encountered", + "divide by zero encountered" + ] + msg_r = "|".join(msgs_r) + + with warnings.catch_warnings(): + warnings.filterwarnings( + 'ignore', category=DeprecationWarning, message=msg + ) + warnings.filterwarnings( + 'ignore', category=RuntimeWarning, message=msg_r + ) + yield + + # find and check doctests under this context manager + dt_config.user_context_mgr = warnings_errors_and_rng + + # numpy specific tweaks from refguide-check + dt_config.rndm_markers.add('#uninitialized') + dt_config.rndm_markers.add('# uninitialized') + + import doctest + dt_config.optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS + + # recognize the StringDType repr + dt_config.check_namespace['StringDType'] = numpy.dtypes.StringDType + + # temporary skips + dt_config.skiplist = set([ + 'numpy.savez', # unclosed file + 'numpy.matlib.savez', + 'numpy.__array_namespace_info__', + 'numpy.matlib.__array_namespace_info__', + ]) + + # xfail problematic tutorials + dt_config.pytest_extra_xfail = { + 'how-to-verify-bug.rst': '', + 'c-info.ufunc-tutorial.rst': '', + 'basics.interoperability.rst': 'needs pandas', + 'basics.dispatch.rst': 'errors out in /testing/overrides.py', + 'basics.subclassing.rst': '.. testcode:: admonitions not understood', + 'misc.rst': 'manipulates warnings', + } + + # ignores are for things fail doctest collection (optionals etc) + dt_config.pytest_extra_ignore = [ + 'numpy/distutils', + 'numpy/_core/cversions.py', + 'numpy/_pyinstaller', + 'numpy/random/_examples', + 'numpy/compat', + 'numpy/f2py/_backends/_distutils.py', + ] + diff --git a/numpy/core/_multiarray_umath.py b/numpy/core/_multiarray_umath.py index a77e1557ba62..04cc88229aac 100644 --- a/numpy/core/_multiarray_umath.py +++ b/numpy/core/_multiarray_umath.py @@ -14,7 +14,7 @@ def __getattr__(attr_name): from ._utils import _raise_warning if attr_name in {"_ARRAY_API", "_UFUNC_API"}: - from numpy.version import short_version, release + from numpy.version import short_version import textwrap import traceback import sys @@ -22,42 +22,26 @@ def __getattr__(attr_name): msg = textwrap.dedent(f""" A module that was compiled using NumPy 1.x cannot be run in NumPy {short_version} as it may crash. To support both 1.x and 2.x - versions of NumPy, modules must be compiled against NumPy 2.0. + versions of NumPy, modules must be compiled with NumPy 2.0. + Some module may need to rebuild instead e.g. with 'pybind11>=2.12'. If you are a user of the module, the easiest solution will be to - either downgrade NumPy or update the failing module (if available). + downgrade to 'numpy<2' or try to upgrade the affected module. + We expect that some modules will need time to support NumPy 2. """) - if not release and short_version.startswith("2.0.0"): - # TODO: Can remove this after the release. - msg += textwrap.dedent("""\ - NOTE: When testing against pre-release versions of NumPy 2.0 - or building nightly wheels for it, it is necessary to ensure - the NumPy pre-release is used at build time. - The main way to ensure this is using no build isolation - and installing dependencies manually with NumPy. - - If your dependencies have the issue, check whether they - build nightly wheels build against NumPy 2.0. - - pybind11 note: If you see this message and do not see - any errors raised, it's possible this is due to a - package using an old version of pybind11 that should be - updated. - - """) - msg += "Traceback (most recent call last):" + tb_msg = "Traceback (most recent call last):" for line in traceback.format_stack()[:-1]: if "frozen importlib" in line: continue - msg += line - # Only print the message. This has two reasons (for now!): - # 1. Old NumPy replaced the error here making it never actually show - # in practice, thus raising alone would not be helpful. - # 2. pybind11 simply reaches into NumPy internals and requires a - # new release that includes the fix. That is missing as of 2023-11. - # But, it "conveniently" ignores the ABI version. - sys.stderr.write(msg) + tb_msg += line + + # Also print the message (with traceback). This is because old versions + # of NumPy unfortunately set up the import to replace (and hide) the + # error. The traceback shouldn't be needed, but e.g. pytest plugins + # seem to swallow it and we should be failing anyway... + sys.stderr.write(msg + tb_msg) + raise ImportError(msg) ret = getattr(_multiarray_umath, attr_name, None) if ret is None: diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 8faf9415375b..ea94ad30852e 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -499,6 +499,22 @@ def as_ctypes_type(dtype): `ctypes.Structure`\ s - insert padding fields + Examples + -------- + Converting a simple dtype: + + >>> dt = np.dtype('int8') + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + + Converting a structured dtype: + + >>> dt = np.dtype([('x', 'i4'), ('y', 'f4')]) + >>> ctype = np.ctypeslib.as_ctypes_type(dt) + >>> ctype + + """ return _ctype_from_dtype(_dtype(dtype)) diff --git a/numpy/distutils/checks/cpu_rvv.c b/numpy/distutils/checks/cpu_rvv.c new file mode 100644 index 000000000000..45545d88dcd1 --- /dev/null +++ b/numpy/distutils/checks/cpu_rvv.c @@ -0,0 +1,13 @@ +#ifndef __riscv_vector + #error RVV not supported +#endif + +#include + +int main(void) +{ + size_t vlmax = __riscv_vsetvlmax_e32m1(); + vuint32m1_t a = __riscv_vmv_v_x_u32m1(0, vlmax); + vuint32m1_t b = __riscv_vadd_vv_u32m1(a, a, vlmax); + return __riscv_vmv_x_s_u32m1_u32(b); +} diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py index 3ede013e0f3c..06e6441e65df 100644 --- a/numpy/distutils/extension.py +++ b/numpy/distutils/extension.py @@ -93,15 +93,9 @@ def __init__( return def has_cxx_sources(self): - for source in self.sources: - if cxx_ext_re(str(source)): - return True - return False + return any(cxx_ext_re(str(source)) for source in self.sources) def has_f2py_sources(self): - for source in self.sources: - if fortran_pyf_ext_re(source): - return True - return False + return any(fortran_pyf_ext_re(source) for source in self.sources) # class Extension diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 4763f41ad326..ac0c206f96cf 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -24,7 +24,13 @@ import distutils.cygwinccompiler from distutils.unixccompiler import UnixCCompiler -from distutils.msvccompiler import get_build_version as get_build_msvc_version + +try: + from distutils.msvccompiler import get_build_version as get_build_msvc_version +except ImportError: + def get_build_msvc_version(): + return None + from distutils.errors import UnknownFileError from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 776eb8d3928b..09145e1ddf52 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -489,10 +489,7 @@ def is_string(s): def all_strings(lst): """Return True if all items in lst are string objects. """ - for item in lst: - if not is_string(item): - return False - return True + return all(is_string(item) for item in lst) def is_sequence(seq): if is_string(seq): @@ -527,17 +524,11 @@ def get_language(sources): def has_f_sources(sources): """Return True if sources contains Fortran files """ - for source in sources: - if fortran_ext_match(source): - return True - return False + return any(fortran_ext_match(source) for source in sources) def has_cxx_sources(sources): """Return True if sources contains C++ files """ - for source in sources: - if cxx_ext_match(source): - return True - return False + return any(cxx_ext_match(source) for source in sources) def filter_sources(sources): """Return four lists of filenames containing @@ -1420,7 +1411,7 @@ def paths(self,*paths,**kws): """Apply glob to paths and prepend local_path if needed. Applies glob.glob(...) to each path in the sequence (if needed) and - pre-pends the local_path if needed. Because this is called on all + prepends the local_path if needed. Because this is called on all source lists, this allows wildcard characters to be specified in lists of sources for extension modules and libraries and scripts and allows path-names be relative to the source directory. diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py index 605c80483b77..40e7606eeb76 100644 --- a/numpy/distutils/tests/test_misc_util.py +++ b/numpy/distutils/tests/test_misc_util.py @@ -1,10 +1,12 @@ from os.path import join, sep, dirname +import pytest + from numpy.distutils.misc_util import ( appendpath, minrelpath, gpaths, get_shared_lib_extension, get_info ) from numpy.testing import ( - assert_, assert_equal + assert_, assert_equal, IS_EDITABLE ) ajoin = lambda *paths: join(*((sep,)+paths)) @@ -73,6 +75,10 @@ def test_get_shared_lib_extension(self): assert_(get_shared_lib_extension(is_python_ext=True)) +@pytest.mark.skipif( + IS_EDITABLE, + reason="`get_info` .ini lookup method incompatible with editable install" +) def test_installed_npymath_ini(): # Regression test for gh-7707. If npymath.ini wasn't installed, then this # will give an error. diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py index c99e9abc99a5..7324168e1dc8 100644 --- a/numpy/doc/ufuncs.py +++ b/numpy/doc/ufuncs.py @@ -113,7 +113,8 @@ output argument is used, the ufunc still returns a reference to the result. >>> x = np.arange(2) - >>> np.add(np.arange(2),np.arange(2.),x) + >>> np.add(np.arange(2, dtype=float), np.arange(2, dtype=float), x, + ... casting='unsafe') array([0, 2]) >>> x array([0, 2]) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index cea64282252b..706e538c8bea 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,43 +1,600 @@ +from typing import ( + Any, + Final, + Generic, + Literal as L, + NoReturn, + TypeAlias, + TypeVar, + final, +) +from typing_extensions import LiteralString + import numpy as np +__all__ = [ + 'BoolDType', + 'Int8DType', + 'ByteDType', + 'UInt8DType', + 'UByteDType', + 'Int16DType', + 'ShortDType', + 'UInt16DType', + 'UShortDType', + 'Int32DType', + 'IntDType', + 'UInt32DType', + 'UIntDType', + 'Int64DType', + 'LongDType', + 'UInt64DType', + 'ULongDType', + 'LongLongDType', + 'ULongLongDType', + 'Float16DType', + 'Float32DType', + 'Float64DType', + 'LongDoubleDType', + 'Complex64DType', + 'Complex128DType', + 'CLongDoubleDType', + 'ObjectDType', + 'BytesDType', + 'StrDType', + 'VoidDType', + 'DateTime64DType', + 'TimeDelta64DType', + 'StringDType', +] + +# Helper base classes (typing-only) + +_SelfT = TypeVar("_SelfT", bound=np.dtype[Any]) +_SCT_co = TypeVar("_SCT_co", bound=np.generic, covariant=True) + +class _SimpleDType(Generic[_SCT_co], np.dtype[_SCT_co]): # type: ignore[misc] + names: None # pyright: ignore[reportIncompatibleVariableOverride] + def __new__(cls: type[_SelfT], /) -> _SelfT: ... + def __getitem__(self, key: Any, /) -> NoReturn: ... + @property + def base(self) -> np.dtype[_SCT_co]: ... + @property + def fields(self) -> None: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + +class _LiteralDType(Generic[_SCT_co], _SimpleDType[_SCT_co]): + @property + def flags(self) -> L[0]: ... + @property + def hasobject(self) -> L[False]: ... + +# Helper mixins (typing-only): + +_KindT_co = TypeVar("_KindT_co", bound=LiteralString, covariant=True) +_CharT_co = TypeVar("_CharT_co", bound=LiteralString, covariant=True) +_NumT_co = TypeVar("_NumT_co", bound=int, covariant=True) -__all__: list[str] +class _TypeCodes(Generic[_KindT_co, _CharT_co, _NumT_co]): + @final + @property + def kind(self) -> _KindT_co: ... + @final + @property + def char(self) -> _CharT_co: ... + @final + @property + def num(self) -> _NumT_co: ... + +class _NoOrder: + @final + @property + def byteorder(self) -> L["|"]: ... + +class _NativeOrder: + @final + @property + def byteorder(self) -> L["="]: ... + +_DataSize_co = TypeVar("_DataSize_co", bound=int, covariant=True) +_ItemSize_co = TypeVar("_ItemSize_co", bound=int, covariant=True) + +class _NBit(Generic[_DataSize_co, _ItemSize_co]): + @final + @property + def alignment(self) -> _DataSize_co: ... + @final + @property + def itemsize(self) -> _ItemSize_co: ... + +class _8Bit(_NoOrder, _NBit[L[1], L[1]]): ... # Boolean: -BoolDType = np.dtype[np.bool] + +@final +class BoolDType( + _TypeCodes[L["b"], L["?"], L[0]], + _8Bit, + _LiteralDType[np.bool], +): + @property + def name(self) -> L["bool"]: ... + @property + def str(self) -> L["|b1"]: ... + # Sized integers: -Int8DType = np.dtype[np.int8] -UInt8DType = np.dtype[np.uint8] -Int16DType = np.dtype[np.int16] -UInt16DType = np.dtype[np.uint16] -Int32DType = np.dtype[np.int32] -UInt32DType = np.dtype[np.uint32] -Int64DType = np.dtype[np.int64] -UInt64DType = np.dtype[np.uint64] + +@final +class Int8DType( + _TypeCodes[L["i"], L["b"], L[1]], + _8Bit, + _LiteralDType[np.int8], +): + @property + def name(self) -> L["int8"]: ... + @property + def str(self) -> L["|i1"]: ... + +@final +class UInt8DType( + _TypeCodes[L["u"], L["B"], L[2]], + _8Bit, + _LiteralDType[np.uint8], +): + @property + def name(self) -> L["uint8"]: ... + @property + def str(self) -> L["|u1"]: ... + +@final +class Int16DType( + _TypeCodes[L["i"], L["h"], L[3]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.int16], +): + @property + def name(self) -> L["int16"]: ... + @property + def str(self) -> L["i2"]: ... + +@final +class UInt16DType( + _TypeCodes[L["u"], L["H"], L[4]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.uint16], +): + @property + def name(self) -> L["uint16"]: ... + @property + def str(self) -> L["u2"]: ... + +@final +class Int32DType( + _TypeCodes[L["i"], L["i", "l"], L[5, 7]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.int32], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UInt32DType( + _TypeCodes[L["u"], L["I", "L"], L[6, 8]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.uint32], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class Int64DType( + _TypeCodes[L["i"], L["l", "q"], L[7, 9]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.int64], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class UInt64DType( + _TypeCodes[L["u"], L["L", "Q"], L[8, 10]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.uint64], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + # Standard C-named version/alias: -ByteDType = np.dtype[np.byte] -UByteDType = np.dtype[np.ubyte] -ShortDType = np.dtype[np.short] -UShortDType = np.dtype[np.ushort] -IntDType = np.dtype[np.intc] -UIntDType = np.dtype[np.uintc] -LongDType = np.dtype[np.long] -ULongDType = np.dtype[np.ulong] -LongLongDType = np.dtype[np.longlong] -ULongLongDType = np.dtype[np.ulonglong] -# Floats -Float16DType = np.dtype[np.float16] -Float32DType = np.dtype[np.float32] -Float64DType = np.dtype[np.float64] -LongDoubleDType = np.dtype[np.longdouble] +ByteDType: Final = Int8DType +UByteDType: Final = UInt8DType +ShortDType: Final = Int16DType +UShortDType: Final = UInt16DType + +@final +class IntDType( + _TypeCodes[L["i"], L["i"], L[5]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.intc], +): + @property + def name(self) -> L["int32"]: ... + @property + def str(self) -> L["i4"]: ... + +@final +class UIntDType( + _TypeCodes[L["u"], L["I"], L[6]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.uintc], +): + @property + def name(self) -> L["uint32"]: ... + @property + def str(self) -> L["u4"]: ... + +@final +class LongDType( + _TypeCodes[L["i"], L["l"], L[7]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _LiteralDType[np.long], +): + @property + def name(self) -> L["int32", "int64"]: ... + @property + def str(self) -> L["i4", "i8"]: ... + +@final +class ULongDType( + _TypeCodes[L["u"], L["L"], L[8]], + _NativeOrder, + _NBit[L[4, 8], L[4, 8]], + _LiteralDType[np.ulong], +): + @property + def name(self) -> L["uint32", "uint64"]: ... + @property + def str(self) -> L["u4", "u8"]: ... + +@final +class LongLongDType( + _TypeCodes[L["i"], L["q"], L[9]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.longlong], +): + @property + def name(self) -> L["int64"]: ... + @property + def str(self) -> L["i8"]: ... + +@final +class ULongLongDType( + _TypeCodes[L["u"], L["Q"], L[10]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.ulonglong], +): + @property + def name(self) -> L["uint64"]: ... + @property + def str(self) -> L["u8"]: ... + +# Floats: + +@final +class Float16DType( + _TypeCodes[L["f"], L["e"], L[23]], + _NativeOrder, + _NBit[L[2], L[2]], + _LiteralDType[np.float16], +): + @property + def name(self) -> L["float16"]: ... + @property + def str(self) -> L["f2"]: ... + +@final +class Float32DType( + _TypeCodes[L["f"], L["f"], L[11]], + _NativeOrder, + _NBit[L[4], L[4]], + _LiteralDType[np.float32], +): + @property + def name(self) -> L["float32"]: ... + @property + def str(self) -> L["f4"]: ... + +@final +class Float64DType( + _TypeCodes[L["f"], L["d"], L[12]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.float64], +): + @property + def name(self) -> L["float64"]: ... + @property + def str(self) -> L["f8"]: ... + +@final +class LongDoubleDType( + _TypeCodes[L["f"], L["g"], L[13]], + _NativeOrder, + _NBit[L[8, 12, 16], L[8, 12, 16]], + _LiteralDType[np.longdouble], +): + @property + def name(self) -> L["float64", "float96", "float128"]: ... + @property + def str(self) -> L["f8", "f12", "f16"]: ... + # Complex: -Complex64DType = np.dtype[np.complex64] -Complex128DType = np.dtype[np.complex128] -CLongDoubleDType = np.dtype[np.clongdouble] -# Others: -ObjectDType = np.dtype[np.object_] -BytesDType = np.dtype[np.bytes_] -StrDType = np.dtype[np.str_] -VoidDType = np.dtype[np.void] -DateTime64DType = np.dtype[np.datetime64] -TimeDelta64DType = np.dtype[np.timedelta64] + +@final +class Complex64DType( + _TypeCodes[L["c"], L["F"], L[14]], + _NativeOrder, + _NBit[L[4], L[8]], + _LiteralDType[np.complex64], +): + @property + def name(self) -> L["complex64"]: ... + @property + def str(self) -> L["c8"]: ... + +@final +class Complex128DType( + _TypeCodes[L["c"], L["D"], L[15]], + _NativeOrder, + _NBit[L[8], L[16]], + _LiteralDType[np.complex128], +): + @property + def name(self) -> L["complex128"]: ... + @property + def str(self) -> L["c16"]: ... + +@final +class CLongDoubleDType( + _TypeCodes[L["c"], L["G"], L[16]], + _NativeOrder, + _NBit[L[8, 12, 16], L[16, 24, 32]], + _LiteralDType[np.clongdouble], +): + @property + def name(self) -> L["complex128", "complex192", "complex256"]: ... + @property + def str(self) -> L["c16", "c24", "c32"]: ... + +# Python objects: + +@final +class ObjectDType( + _TypeCodes[L["O"], L["O"], L[17]], + _NoOrder, + _NBit[L[8], L[8]], + _SimpleDType[np.object_], +): + @property + def hasobject(self) -> L[True]: ... + @property + def name(self) -> L["object"]: ... + @property + def str(self) -> L["|O"]: ... + +# Flexible: + +@final +class BytesDType( + Generic[_ItemSize_co], + _TypeCodes[L["S"], L["S"], L[18]], + _NoOrder, + _NBit[L[1],_ItemSize_co], + _SimpleDType[np.bytes_], +): + def __new__(cls, size: _ItemSize_co, /) -> BytesDType[_ItemSize_co]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class StrDType( + Generic[_ItemSize_co], + _TypeCodes[L["U"], L["U"], L[19]], + _NativeOrder, + _NBit[L[4],_ItemSize_co], + _SimpleDType[np.str_], +): + def __new__(cls, size: _ItemSize_co, /) -> StrDType[_ItemSize_co]: ... + @property + def hasobject(self) -> L[False]: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +@final +class VoidDType( + Generic[_ItemSize_co], + _TypeCodes[L["V"], L["V"], L[20]], + _NoOrder, + _NBit[L[1], _ItemSize_co], + np.dtype[np.void], # type: ignore[misc] +): + # NOTE: `VoidDType(...)` raises a `TypeError` at the moment + def __new__(cls, length: _ItemSize_co, /) -> NoReturn: ... + @property + def base(self: _SelfT) -> _SelfT: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def subdtype(self) -> None: ... + @property + def name(self) -> LiteralString: ... + @property + def str(self) -> LiteralString: ... + +# Other: + +_DateUnit: TypeAlias = L["Y", "M", "W", "D"] +_TimeUnit: TypeAlias = L["h", "m", "s", "ms", "us", "ns", "ps", "fs", "as"] +_DateTimeUnit: TypeAlias = _DateUnit | _TimeUnit + +@final +class DateTime64DType( + _TypeCodes[L["M"], L["M"], L[21]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.datetime64], +): + # NOTE: `DateTime64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget the`unit: L["μs"]` overload. + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... + @property + def name(self) -> L[ + "datetime64", + "datetime64[Y]", + "datetime64[M]", + "datetime64[W]", + "datetime64[D]", + "datetime64[h]", + "datetime64[m]", + "datetime64[s]", + "datetime64[ms]", + "datetime64[us]", + "datetime64[ns]", + "datetime64[ps]", + "datetime64[fs]", + "datetime64[as]", + ]: ... + @property + def str(self) -> L[ + "M8", + "M8[Y]", + "M8[M]", + "M8[W]", + "M8[D]", + "M8[h]", + "M8[m]", + "M8[s]", + "M8[ms]", + "M8[us]", + "M8[ns]", + "M8[ps]", + "M8[fs]", + "M8[as]", + ]: ... + +@final +class TimeDelta64DType( + _TypeCodes[L["m"], L["m"], L[22]], + _NativeOrder, + _NBit[L[8], L[8]], + _LiteralDType[np.timedelta64], +): + # NOTE: `TimeDelta64DType(...)` raises a `TypeError` at the moment + # TODO: Once implemented, don't forget to overload on `unit: L["μs"]`. + def __new__(cls, unit: _DateTimeUnit, /) -> NoReturn: ... + @property + def name(self) -> L[ + "timedelta64", + "timedelta64[Y]", + "timedelta64[M]", + "timedelta64[W]", + "timedelta64[D]", + "timedelta64[h]", + "timedelta64[m]", + "timedelta64[s]", + "timedelta64[ms]", + "timedelta64[us]", + "timedelta64[ns]", + "timedelta64[ps]", + "timedelta64[fs]", + "timedelta64[as]", + ]: ... + @property + def str(self) -> L[ + "m8", + "m8[Y]", + "m8[M]", + "m8[W]", + "m8[D]", + "m8[h]", + "m8[m]", + "m8[s]", + "m8[ms]", + "m8[us]", + "m8[ns]", + "m8[ps]", + "m8[fs]", + "m8[as]", + ]: ... + +@final +class StringDType( + _TypeCodes[L["T"], L["T"], L[2056]], + _NativeOrder, + _NBit[L[8], L[16]], + # TODO: Replace the (invalid) `str` with the scalar type, once implemented + np.dtype[str], # type: ignore[misc] +): + def __new__(cls, /) -> StringDType: ... + def __getitem__(self, key: Any, /) -> NoReturn: ... + @property + def base(self) -> StringDType: ... + @property + def fields(self) -> None: ... + @property + def hasobject(self) -> L[True]: ... + @property + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... + @property + def name(self) -> L["StringDType64", "StringDType128"]: ... + @property + def ndim(self) -> L[0]: ... + @property + def shape(self) -> tuple[()]: ... + @property + def str(self) -> L["|T8", "|T16"]: ... + @property + def subdtype(self) -> None: ... + @property + def type(self) -> type[str]: ... diff --git a/numpy/exceptions.py b/numpy/exceptions.py index b7df57c69fbd..adf88c754b66 100644 --- a/numpy/exceptions.py +++ b/numpy/exceptions.py @@ -146,6 +146,7 @@ class AxisError(ValueError, IndexError): Examples -------- + >>> import numpy as np >>> array_1d = np.arange(10) >>> np.cumsum(array_1d, axis=1) Traceback (most recent call last): @@ -222,7 +223,10 @@ class DTypePromotionError(TypeError): Datetimes and complex numbers are incompatible classes and cannot be promoted: - >>> np.result_type(np.dtype("M8[s]"), np.complex128) + >>> import numpy as np + >>> np.result_type(np.dtype("M8[s]"), np.complex128) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... DTypePromotionError: The DType could not be promoted by . This means that no common DType exists for the given inputs. For example they cannot be stored in a @@ -235,9 +239,11 @@ class DTypePromotionError(TypeError): >>> dtype1 = np.dtype([("field1", np.float64), ("field2", np.int64)]) >>> dtype2 = np.dtype([("field1", np.float64)]) - >>> np.promote_types(dtype1, dtype2) + >>> np.promote_types(dtype1, dtype2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... DTypePromotionError: field names `('field1', 'field2')` and `('field1',)` mismatch. - """ + """ # NOQA pass diff --git a/numpy/f2py/_backends/_distutils.py b/numpy/f2py/_backends/_distutils.py index e9b22a3921a5..f2436f86a7e6 100644 --- a/numpy/f2py/_backends/_distutils.py +++ b/numpy/f2py/_backends/_distutils.py @@ -13,9 +13,9 @@ class DistutilsBackend(Backend): def __init__(sef, *args, **kwargs): warnings.warn( - "distutils has been deprecated since NumPy 1.26.x" + "\ndistutils has been deprecated since NumPy 1.26.x\n" "Use the Meson backend instead, or generate wrappers" - "without -c and use a custom build script", + " without -c and use a custom build script", VisibleDeprecationWarning, stacklevel=2, ) diff --git a/numpy/f2py/_backends/_meson.py b/numpy/f2py/_backends/_meson.py index 20df79a1c71d..b438ed223433 100644 --- a/numpy/f2py/_backends/_meson.py +++ b/numpy/f2py/_backends/_meson.py @@ -28,7 +28,7 @@ def __init__( include_dirs: list[Path], object_files: list[Path], linker_args: list[str], - c_args: list[str], + fortran_args: list[str], build_type: str, python_exe: str, ): @@ -46,12 +46,18 @@ def __init__( self.include_dirs = [] self.substitutions = {} self.objects = object_files + # Convert args to '' wrapped variant for meson + self.fortran_args = [ + f"'{x}'" if not (x.startswith("'") and x.endswith("'")) else x + for x in fortran_args + ] self.pipeline = [ self.initialize_template, self.sources_substitution, self.deps_substitution, self.include_substitution, self.libraries_substitution, + self.fortran_args_substitution, ] self.build_type = build_type self.python_exe = python_exe @@ -73,8 +79,8 @@ def initialize_template(self) -> None: self.substitutions["python"] = self.python_exe def sources_substitution(self) -> None: - self.substitutions["source_list"] = f",\n{self.indent}".join( - [f"{self.indent}'{source}'," for source in self.sources] + self.substitutions["source_list"] = ",\n".join( + [f"{self.indent}'''{source}'''," for source in self.sources] ) def deps_substitution(self) -> None: @@ -85,20 +91,20 @@ def deps_substitution(self) -> None: def libraries_substitution(self) -> None: self.substitutions["lib_dir_declarations"] = "\n".join( [ - f"lib_dir_{i} = declare_dependency(link_args : ['-L{lib_dir}'])" + f"lib_dir_{i} = declare_dependency(link_args : ['''-L{lib_dir}'''])" for i, lib_dir in enumerate(self.library_dirs) ] ) self.substitutions["lib_declarations"] = "\n".join( [ - f"{lib} = declare_dependency(link_args : ['-l{lib}'])" + f"{lib.replace('.','_')} = declare_dependency(link_args : ['-l{lib}'])" for lib in self.libraries ] ) self.substitutions["lib_list"] = f"\n{self.indent}".join( - [f"{self.indent}{lib}," for lib in self.libraries] + [f"{self.indent}{lib.replace('.','_')}," for lib in self.libraries] ) self.substitutions["lib_dir_list"] = f"\n{self.indent}".join( [f"{self.indent}lib_dir_{i}," for i in range(len(self.library_dirs))] @@ -106,15 +112,23 @@ def libraries_substitution(self) -> None: def include_substitution(self) -> None: self.substitutions["inc_list"] = f",\n{self.indent}".join( - [f"{self.indent}'{inc}'," for inc in self.include_dirs] + [f"{self.indent}'''{inc}'''," for inc in self.include_dirs] ) + def fortran_args_substitution(self) -> None: + if self.fortran_args: + self.substitutions["fortran_args"] = ( + f"{self.indent}fortran_args: [{', '.join([arg for arg in self.fortran_args])}]," + ) + else: + self.substitutions["fortran_args"] = "" + def generate_meson_build(self): for node in self.pipeline: node() template = Template(self.meson_build_template()) meson_build = template.substitute(self.substitutions) - meson_build = re.sub(r',,', ',', meson_build) + meson_build = re.sub(r",,", ",", meson_build) return meson_build @@ -126,6 +140,7 @@ def __init__(self, *args, **kwargs): self.build_type = ( "debug" if any("debug" in flag for flag in self.fc_flags) else "release" ) + self.fc_flags = _get_flags(self.fc_flags) def _move_exec_to_root(self, build_dir: Path): walk_dir = Path(build_dir) / self.meson_build_dir @@ -203,3 +218,17 @@ def _prepare_sources(mname, sources, bdir): if not Path(source).suffix == ".pyf" ] return extended_sources + + +def _get_flags(fc_flags): + flag_values = [] + flag_pattern = re.compile(r"--f(77|90)flags=(.*)") + for flag in fc_flags: + match_result = flag_pattern.match(flag) + if match_result: + values = match_result.group(2).strip().split() + values = [val.strip("'\"") for val in values] + flag_values.extend(values) + # Hacky way to preserve order of flags + unique_flags = list(dict.fromkeys(flag_values)) + return unique_flags diff --git a/numpy/f2py/_backends/meson.build.template b/numpy/f2py/_backends/meson.build.template index 8e34fdc8d4d6..fdcc1b17ce21 100644 --- a/numpy/f2py/_backends/meson.build.template +++ b/numpy/f2py/_backends/meson.build.template @@ -8,7 +8,7 @@ project('${modulename}', ]) fc = meson.get_compiler('fortran') -py = import('python').find_installation('${python}', pure: false) +py = import('python').find_installation('''${python}''', pure: false) py_dep = py.dependency() incdir_numpy = run_command(py, @@ -51,4 +51,5 @@ ${dep_list} ${lib_list} ${lib_dir_list} ], +${fortran_args} install : true) diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 13a1074b447e..88a9ff552343 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -17,6 +17,7 @@ from . import __version__ from . import cfuncs +from .cfuncs import errmess __all__ = [ 'applyrules', 'debugcapi', 'dictappend', 'errmess', 'gentitle', @@ -35,23 +36,21 @@ 'isintent_nothide', 'isintent_out', 'isintent_overwrite', 'islogical', 'islogicalfunction', 'islong_complex', 'islong_double', 'islong_doublefunction', 'islong_long', 'islong_longfunction', - 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isrequired', - 'isroutine', 'isscalar', 'issigned_long_longarray', 'isstring', - 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', - 'issubroutine', 'get_f2py_modulename', - 'issubroutine_wrap', 'isthreadsafe', 'isunsigned', 'isunsigned_char', - 'isunsigned_chararray', 'isunsigned_long_long', - 'isunsigned_long_longarray', 'isunsigned_short', - 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', - 'replace', 'show', 'stripcomma', 'throw_error', 'isattr_value', - 'getuseblocks', 'process_f2cmap_dict' + 'ismodule', 'ismoduleroutine', 'isoptional', 'isprivate', 'isvariable', + 'isrequired', 'isroutine', 'isscalar', 'issigned_long_longarray', + 'isstring', 'isstringarray', 'isstring_or_stringarray', 'isstringfunction', + 'issubroutine', 'get_f2py_modulename', 'issubroutine_wrap', 'isthreadsafe', + 'isunsigned', 'isunsigned_char', 'isunsigned_chararray', + 'isunsigned_long_long', 'isunsigned_long_longarray', 'isunsigned_short', + 'isunsigned_shortarray', 'l_and', 'l_not', 'l_or', 'outmess', 'replace', + 'show', 'stripcomma', 'throw_error', 'isattr_value', 'getuseblocks', + 'process_f2cmap_dict' ] f2py_version = __version__.version -errmess = sys.stderr.write show = pprint.pprint options = {} @@ -518,6 +517,15 @@ def isprivate(var): return 'attrspec' in var and 'private' in var['attrspec'] +def isvariable(var): + # heuristic to find public/private declarations of filtered subroutines + if len(var) == 1 and 'attrspec' in var and \ + var['attrspec'][0] in ('public', 'private'): + is_var = False + else: + is_var = True + return is_var + def hasinitvalue(var): return '=' in var @@ -701,9 +709,9 @@ def getcallprotoargument(rout, cb_map={}): else: if not isattr_value(var): ctype = ctype + '*' - if ((isstring(var) + if (isstring(var) or isarrayofstrings(var) # obsolete? - or isstringarray(var))): + or isstringarray(var)): arg_types2.append('size_t') arg_types.append(ctype) diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py index fa477a5b9aca..8a8939d7260a 100644 --- a/numpy/f2py/capi_maps.py +++ b/numpy/f2py/capi_maps.py @@ -689,6 +689,8 @@ def modsign2map(m): else: ret['interface_usercode'] = '' ret['pymethoddef'] = getpymethoddef(m) or '' + if 'gil_used' in m: + ret['gil_used'] = m['gil_used'] if 'coutput' in m: ret['coutput'] = m['coutput'] if 'f2py_wrapper_output' in m: diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py index 721e075b6c73..faf8dd401301 100644 --- a/numpy/f2py/cb_rules.py +++ b/numpy/f2py/cb_rules.py @@ -122,7 +122,7 @@ #setdims# #ifdef PYPY_VERSION #define CAPI_ARGLIST_SETITEM(idx, value) PyList_SetItem((PyObject *)capi_arglist_list, idx, value) - capi_arglist_list = PySequence_List(capi_arglist); + capi_arglist_list = PySequence_List((PyObject *)capi_arglist); if (capi_arglist_list == NULL) goto capi_fail; #else #define CAPI_ARGLIST_SETITEM(idx, value) PyTuple_SetItem((PyObject *)capi_arglist, idx, value) diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py index 4328a6e5004c..1dc3247323d5 100644 --- a/numpy/f2py/cfuncs.py +++ b/numpy/f2py/cfuncs.py @@ -16,7 +16,16 @@ from . import __version__ f2py_version = __version__.version -errmess = sys.stderr.write + + +def errmess(s: str) -> None: + """ + Write an error message to stderr. + + This indirection is needed because sys.stderr might not always be available (see #26862). + """ + if sys.stderr is not None: + sys.stderr.write(s) ##################### Definitions ################## diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index 8d3fc27608bd..68ef46c05fc0 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -425,11 +425,11 @@ def readfortrancode(ffile, dowithline=show, istop=1): if l[-1] not in "\n\r\f": break l = l[:-1] - if not strictf77: - (l, rl) = split_by_unquoted(l, '!') - l += ' ' - if rl[:5].lower() == '!f2py': # f2py directive - l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') + # Unconditionally remove comments + (l, rl) = split_by_unquoted(l, '!') + l += ' ' + if rl[:5].lower() == '!f2py': # f2py directive + l, _ = split_by_unquoted(l + 4 * ' ' + rl[5:], '!') if l.strip() == '': # Skip empty line if sourcecodeform == 'free': # In free form, a statement continues in the next line @@ -466,25 +466,13 @@ def readfortrancode(ffile, dowithline=show, istop=1): finalline = '' origfinalline = '' else: - if not strictf77: - # F90 continuation - r = cont1.match(l) - if r: - l = r.group('line') # Continuation follows .. - if cont: - ll = ll + cont2.match(l).group('line') - finalline = '' - origfinalline = '' - else: - # clean up line beginning from possible digits. - l = ' ' + l[5:] - if localdolowercase: - finalline = ll.lower() - else: - finalline = ll - origfinalline = ll - ll = l - cont = (r is not None) + r = cont1.match(l) + if r: + l = r.group('line') # Continuation follows .. + if cont: + ll = ll + cont2.match(l).group('line') + finalline = '' + origfinalline = '' else: # clean up line beginning from possible digits. l = ' ' + l[5:] @@ -818,7 +806,7 @@ def crackline(line, reset=0): raise Exception('crackline: groupcounter(=%s) is nonpositive. ' 'Check the blocks.' % (groupcounter)) - m1 = beginpattern[0].match((line)) + m1 = beginpattern[0].match(line) if (m1) and (not m1.group('this') == groupname[groupcounter]): raise Exception('crackline: End group %s does not match with ' 'previous Begin group %s\n\t%s' % @@ -2551,7 +2539,7 @@ def get_parameters(vars, global_params={}): outmess(f'get_parameters[TODO]: ' f'implement evaluation of complex expression {v}\n') - dimspec = ([s.lstrip('dimension').strip() + dimspec = ([s.removeprefix('dimension').strip() for s in vars[n]['attrspec'] if s.startswith('dimension')] or [None])[0] @@ -2747,8 +2735,8 @@ def analyzevars(block): d = param_parse(d, params) except (ValueError, IndexError, KeyError): outmess( - ('analyzevars: could not parse dimension for ' - f'variable {d!r}\n') + 'analyzevars: could not parse dimension for ' + f'variable {d!r}\n' ) dim_char = ':' if d == ':' else '*' @@ -2828,9 +2816,9 @@ def compute_deps(v, deps): compute_deps(v1, deps) all_deps = set() compute_deps(v, all_deps) - if ((v in n_deps + if (v in n_deps or '=' in vars[v] - or 'depend' in vars[v])): + or 'depend' in vars[v]): # Skip a variable that # - n depends on # - has user-defined initialization expression diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py index f5fab23ab867..f9fa29806e3e 100755 --- a/numpy/f2py/f2py2e.py +++ b/numpy/f2py/f2py2e.py @@ -28,11 +28,12 @@ from . import f90mod_rules from . import __version__ from . import capi_maps +from .cfuncs import errmess from numpy.f2py._backends import f2py_build_generator f2py_version = __version__.version numpy_version = __version__.version -errmess = sys.stderr.write + # outmess=sys.stdout.write show = pprint.pprint outmess = auxfuncs.outmess @@ -106,6 +107,14 @@ functions. --wrap-functions is default because it ensures maximum portability/compiler independence. + --[no-]freethreading-compatible Create a module that declares it does or + doesn't require the GIL. The default is + --freethreading-compatible for backward + compatibility. Inspect the Fortran code you are wrapping for + thread safety issues before passing + --no-freethreading-compatible, as f2py does not analyze + fortran code for thread safety issues. + --include-paths ::... Search include files from the given directories. @@ -199,7 +208,7 @@ def scaninputline(inputline): dorestdoc = 0 wrapfuncs = 1 buildpath = '.' - include_paths, inputline = get_includes(inputline) + include_paths, freethreading_compatible, inputline = get_newer_options(inputline) signsfile, modulename = None, None options = {'buildpath': buildpath, 'coutput': None, @@ -327,6 +336,7 @@ def scaninputline(inputline): options['wrapfuncs'] = wrapfuncs options['buildpath'] = buildpath options['include_paths'] = include_paths + options['requires_gil'] = not freethreading_compatible options.setdefault('f2cmap_file', None) return files, options @@ -364,6 +374,11 @@ def callcrackfortran(files, options): else: for mod in postlist: mod["f2py_wrapper_output"] = options["f2py_wrapper_output"] + for mod in postlist: + if options["requires_gil"]: + mod['gil_used'] = 'Py_MOD_GIL_USED' + else: + mod['gil_used'] = 'Py_MOD_GIL_NOT_USED' return postlist @@ -534,21 +549,22 @@ def __call__(self, parser, namespace, values, option_string=None): include_paths_set.add(values) setattr(namespace, 'include_paths', list(include_paths_set)) -def include_parser(): +def f2py_parser(): parser = argparse.ArgumentParser(add_help=False) parser.add_argument("-I", dest="include_paths", action=CombineIncludePaths) parser.add_argument("--include-paths", dest="include_paths", action=CombineIncludePaths) parser.add_argument("--include_paths", dest="include_paths", action=CombineIncludePaths) + parser.add_argument("--freethreading-compatible", dest="ftcompat", action=argparse.BooleanOptionalAction) return parser -def get_includes(iline): +def get_newer_options(iline): iline = (' '.join(iline)).split() - parser = include_parser() + parser = f2py_parser() args, remain = parser.parse_known_args(iline) ipaths = args.include_paths if args.include_paths is None: ipaths = [] - return ipaths, remain + return ipaths, args.ftcompat, remain def make_f2py_compile_parser(): parser = argparse.ArgumentParser(add_help=False) @@ -615,7 +631,7 @@ def run_compile(): sysinfo_flags = [f[7:] for f in sysinfo_flags] _reg2 = re.compile( - r'--((no-|)(wrap-functions|lower)|debug-capi|quiet|skip-empty-wrappers)|-include') + r'--((no-|)(wrap-functions|lower|freethreading-compatible)|debug-capi|quiet|skip-empty-wrappers)|-include') f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in f2py_flags] f2py_flags2 = [] @@ -635,10 +651,14 @@ def run_compile(): r'--((f(90)?compiler(-exec|)|compiler)=|help-compiler)') flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)] sys.argv = [_m for _m in sys.argv if _m not in flib_flags] - _reg4 = re.compile( - r'--((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') - fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)] - sys.argv = [_m for _m in sys.argv if _m not in fc_flags] + # TODO: Once distutils is dropped completely, i.e. min_ver >= 3.12, unify into --fflags + reg_f77_f90_flags = re.compile(r'--f(77|90)flags=') + reg_distutils_flags = re.compile(r'--((f(77|90)exec|opt|arch)=|(debug|noopt|noarch|help-fcompiler))') + fc_flags = [_m for _m in sys.argv[1:] if reg_f77_f90_flags.match(_m)] + distutils_flags = [_m for _m in sys.argv[1:] if reg_distutils_flags.match(_m)] + if not (MESON_ONLY_VER or backend_key == 'meson'): + fc_flags.extend(distutils_flags) + sys.argv = [_m for _m in sys.argv if _m not in (fc_flags + distutils_flags)] del_list = [] for s in flib_flags: @@ -713,7 +733,7 @@ def run_compile(): run_main(f" {' '.join(f2py_flags)} {' '.join(pyf_files)}".split()) # Order matters here, includes are needed for run_main above - include_dirs, sources = get_includes(sources) + include_dirs, _, sources = get_newer_options(sources) # Now use the builder builder = build_backend( modulename, diff --git a/numpy/f2py/f90mod_rules.py b/numpy/f2py/f90mod_rules.py index db53beaf616b..9c52938f08da 100644 --- a/numpy/f2py/f90mod_rules.py +++ b/numpy/f2py/f90mod_rules.py @@ -110,11 +110,16 @@ def dadd(line, s=doc): notvars.append(b['name']) for n in m['vars'].keys(): var = m['vars'][n] - if (n not in notvars) and (not l_or(isintent_hide, isprivate)(var)): + + if (n not in notvars and isvariable(var)) and (not l_or(isintent_hide, isprivate)(var)): onlyvars.append(n) mfargs.append(n) outmess('\t\tConstructing F90 module support for "%s"...\n' % (m['name'])) + if len(onlyvars) == 0 and len(notvars) == 1 and m['name'] in notvars: + outmess(f"\t\t\tSkipping {m['name']} since there are no public vars/func in this module...\n") + continue + if m['name'] in usenames and not contains_functions_or_subroutines: outmess(f"\t\t\tSkipping {m['name']} since it is in 'use'...\n") continue diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py index 009365e04761..7566e1ececeb 100755 --- a/numpy/f2py/rules.py +++ b/numpy/f2py/rules.py @@ -236,6 +236,11 @@ #initcommonhooks# #interface_usercode# +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m , #gil_used#); +#endif + #ifdef F2PY_REPORT_ATEXIT if (! PyErr_Occurred()) on_exit(f2py_report_on_exit,(void*)\"#modulename#\"); diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py index 67120d79a51e..6884a473b43b 100644 --- a/numpy/f2py/symbolic.py +++ b/numpy/f2py/symbolic.py @@ -1084,9 +1084,9 @@ def as_factors(obj): if coeff == 1: return Expr(Op.FACTORS, {term: 1}) return Expr(Op.FACTORS, {term: 1, Expr.number(coeff): 1}) - if ((obj.op is Op.APPLY + if (obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV - and not obj.data[2])): + and not obj.data[2]): return Expr(Op.FACTORS, {obj.data[1][0]: 1, obj.data[1][1]: -1}) return Expr(Op.FACTORS, {obj: 1}) raise OpError(f'cannot convert {type(obj)} to terms Expr') diff --git a/numpy/f2py/tests/__init__.py b/numpy/f2py/tests/__init__.py index b07a4e724282..5ecb68077b94 100644 --- a/numpy/f2py/tests/__init__.py +++ b/numpy/f2py/tests/__init__.py @@ -1,4 +1,4 @@ -from numpy.testing import IS_WASM +from numpy.testing import IS_WASM, IS_EDITABLE import pytest if IS_WASM: @@ -6,3 +6,10 @@ "WASM/Pyodide does not use or support Fortran", allow_module_level=True ) + + +if IS_EDITABLE: + pytest.skip( + "Editable install doesn't support tests with a compile step", + allow_module_level=True + ) diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c index f3bffdc1c220..b66672a43e21 100644 --- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c +++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c @@ -115,7 +115,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self, PyArray_DESCR(arr)->type, PyArray_TYPE(arr), PyArray_ITEMSIZE(arr), - PyArray_DESCR(arr)->alignment, + PyDataType_ALIGNMENT(PyArray_DESCR(arr)), PyArray_FLAGS(arr), PyArray_ITEMSIZE(arr)); } @@ -214,7 +214,7 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT); ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL); -#undef ADDCONST( +#undef ADDCONST if (PyErr_Occurred()) Py_FatalError("can't initialize module wrap"); @@ -223,6 +223,11 @@ PyMODINIT_FUNC PyInit_test_array_from_pyobj_ext(void) { on_exit(f2py_report_on_exit,(void*)"array_from_pyobj.wrap.call"); #endif +#if Py_GIL_DISABLED + // signal whether this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } #ifdef __cplusplus diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 new file mode 100644 index 000000000000..07adce591f35 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_no_public_entities.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + private mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + private :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 new file mode 100644 index 000000000000..b7fb95b010a6 --- /dev/null +++ b/numpy/f2py/tests/src/modules/gh26920/two_mods_with_one_public_routine.f90 @@ -0,0 +1,21 @@ + module mod2 + implicit none + PUBLIC :: mod2_func1 + contains + + subroutine mod2_func1() + print*, "mod2_func1" + end subroutine mod2_func1 + + end module mod2 + + module mod1 + implicit none + PUBLIC :: mod1_func1 + contains + + subroutine mod1_func1() + print*, "mod1_func1" + end subroutine mod1_func1 + + end module mod1 diff --git a/numpy/f2py/tests/src/regression/f77comments.f b/numpy/f2py/tests/src/regression/f77comments.f new file mode 100644 index 000000000000..452a01a14439 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77comments.f @@ -0,0 +1,26 @@ + SUBROUTINE TESTSUB( + & INPUT1, INPUT2, !Input + & OUTPUT1, OUTPUT2) !Output + + IMPLICIT NONE + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + + OUTPUT1 = INPUT1 + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 + + RETURN + END SUBROUTINE TESTSUB + + SUBROUTINE TESTSUB2(OUTPUT) + IMPLICIT NONE + INTEGER, PARAMETER :: N = 10 ! Array dimension + REAL, INTENT(OUT) :: OUTPUT(N) + INTEGER :: I + + DO I = 1, N + OUTPUT(I) = I * 2.0 + END DO + + RETURN + END diff --git a/numpy/f2py/tests/src/regression/f77fixedform.f95 b/numpy/f2py/tests/src/regression/f77fixedform.f95 new file mode 100644 index 000000000000..e47a13f7e851 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f77fixedform.f95 @@ -0,0 +1,5 @@ +C This is an invalid file, but it does compile with -ffixed-form + subroutine mwe( + & x) + real x + end subroutine mwe diff --git a/numpy/f2py/tests/src/regression/f90continuation.f90 b/numpy/f2py/tests/src/regression/f90continuation.f90 new file mode 100644 index 000000000000..879e716bbec6 --- /dev/null +++ b/numpy/f2py/tests/src/regression/f90continuation.f90 @@ -0,0 +1,9 @@ +SUBROUTINE TESTSUB(INPUT1, & ! Hello +! commenty +INPUT2, OUTPUT1, OUTPUT2) ! more comments + INTEGER, INTENT(IN) :: INPUT1, INPUT2 + INTEGER, INTENT(OUT) :: OUTPUT1, OUTPUT2 + OUTPUT1 = INPUT1 + & + INPUT2 + OUTPUT2 = INPUT1 * INPUT2 +END SUBROUTINE TESTSUB diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index d5ae235e7d82..c10fe75a04cf 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -90,10 +90,7 @@ def __repr__(self): return "Intent(%r)" % (self.intent_list) def is_intent(self, *names): - for name in names: - if name not in self.intent_list: - return False - return True + return all(name in self.intent_list for name in names) def is_intent_exact(self, *names): return len(self.intent_list) == len(names) and self.is_intent(*names) diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py index 1caa4147c2d7..4986cfbdc4c7 100644 --- a/numpy/f2py/tests/test_crackfortran.py +++ b/numpy/f2py/tests/test_crackfortran.py @@ -66,7 +66,7 @@ def test_nowrap_private_proceedures(self, tmp_path): pyf = crackfortran.crack2fortran(mod) assert 'bar' not in pyf -class TestModuleProcedure(): +class TestModuleProcedure: def test_moduleOperators(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "operators.f90") mod = crackfortran.crackfortran([str(fpath)]) @@ -347,14 +347,14 @@ def test_end_if_comment(self): assert False, f"'crackfortran.crackfortran' raised an exception {exc}" -class TestF77CommonBlockReader(): +class TestF77CommonBlockReader: def test_gh22648(self, tmp_path): fpath = util.getpath("tests", "src", "crackfortran", "gh22648.pyf") with contextlib.redirect_stdout(io.StringIO()) as stdout_f2py: mod = crackfortran.crackfortran([str(fpath)]) assert "Mismatch" not in stdout_f2py.getvalue() -class TestParamEval(): +class TestParamEval: # issue gh-11612, array parameter parsing def test_param_eval_nested(self): v = '(/3.14, 4./)' diff --git a/numpy/f2py/tests/test_f2py2e.py b/numpy/f2py/tests/test_f2py2e.py index 744049a2422d..ce0046eb1b4b 100644 --- a/numpy/f2py/tests/test_f2py2e.py +++ b/numpy/f2py/tests/test_f2py2e.py @@ -7,6 +7,19 @@ from . import util from numpy.f2py.f2py2e import main as f2pycli +from numpy.testing._private.utils import NOGIL_BUILD + +####################### +# F2PY Test utilities # +###################### + +# Tests for CLI commands which call meson will fail if no compilers are present, these are to be skipped + +def compiler_check_f2pycli(): + if not util.has_fortran_compiler(): + pytest.skip("CLI command needs a Fortran compiler") + else: + f2pycli() ######################### # CLI utils and classes # @@ -49,9 +62,9 @@ def get_io_paths(fname_inp, mname="untitled"): ) -############## -# CLI Fixtures and Tests # -############# +################ +# CLI Fixtures # +################ @pytest.fixture(scope="session") @@ -109,6 +122,9 @@ def f2cmap_f90(tmpdir_factory): fmap.write_text(f2cmap, encoding="ascii") return fn +######### +# Tests # +######### def test_gh22819_cli(capfd, gh22819_cli, monkeypatch): """Check that module names are handled correctly @@ -198,8 +214,7 @@ def test_gen_pyf_no_overwrite(capfd, hello_world_f90, monkeypatch): assert "Use --overwrite-signature to overwrite" in err -@pytest.mark.skipif((platform.system() != 'Linux') or (sys.version_info <= (3, 12)), - reason='Compiler and 3.12 required') +@pytest.mark.skipif(sys.version_info <= (3, 12), reason="Python 3.12 required") def test_untitled_cli(capfd, hello_world_f90, monkeypatch): """Check that modules are named correctly @@ -208,7 +223,7 @@ def test_untitled_cli(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) monkeypatch.setattr(sys, "argv", f"f2py --backend meson -c {ipath}".split()) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() out, _ = capfd.readouterr() assert "untitledmodule.c" in out @@ -225,7 +240,7 @@ def test_no_py312_distutils_fcompiler(capfd, hello_world_f90, monkeypatch): sys, "argv", f"f2py {ipath} -c --fcompiler=gfortran -m {MNAME}".split() ) with util.switchdir(ipath.parent): - f2pycli() + compiler_check_f2pycli() out, _ = capfd.readouterr() assert "--fcompiler cannot be used with meson" in out monkeypatch.setattr( @@ -573,7 +588,7 @@ def test_debugcapi_bld(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd_run = shlex.split("python3 -c \"import blah; blah.hi()\"") + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' eerr = textwrap.dedent("""\ @@ -742,16 +757,64 @@ def test_npdistop(hello_world_f90, monkeypatch): with util.switchdir(ipath.parent): f2pycli() - cmd_run = shlex.split("python -c \"import blah; blah.hi()\"") + cmd_run = shlex.split(f"{sys.executable} -c \"import blah; blah.hi()\"") rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') eout = ' Hello World\n' assert rout.stdout == eout +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_no_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --no-freethreading-compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --no-freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is True\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + if NOGIL_BUILD: + assert "The global interpreter lock (GIL) has been enabled to load module 'blah'" in rout.stderr + assert rout.returncode == 0 + + +@pytest.mark.skipif((platform.system() != 'Linux') or sys.version_info <= (3, 12), + reason='Compiler and Python 3.12 or newer required') +def test_freethreading_compatible(hello_world_f90, monkeypatch): + """ + CLI :: --freethreading_compatible + """ + ipath = Path(hello_world_f90) + monkeypatch.setattr(sys, "argv", f'f2py -m blah {ipath} -c --freethreading-compatible'.split()) + + with util.switchdir(ipath.parent): + compiler_check_f2pycli() + cmd = f"{sys.executable} -c \"import blah; blah.hi();" + if NOGIL_BUILD: + cmd += "import sys; assert sys._is_gil_enabled() is False\"" + else: + cmd += "\"" + cmd_run = shlex.split(cmd) + rout = subprocess.run(cmd_run, capture_output=True, encoding='UTF-8') + eout = ' Hello World\n' + assert rout.stdout == eout + assert rout.stderr == "" + assert rout.returncode == 0 + + # Numpy distutils flags # TODO: These should be tested separately - def test_npd_fcompiler(): """ CLI :: -c --fcompiler diff --git a/numpy/f2py/tests/test_modules.py b/numpy/f2py/tests/test_modules.py index 009ae3365cd5..436e0c700017 100644 --- a/numpy/f2py/tests/test_modules.py +++ b/numpy/f2py/tests/test_modules.py @@ -5,6 +5,37 @@ from numpy.testing import IS_PYPY +@pytest.mark.slow +class TestModuleFilterPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_one_public_routine.f90" + ) + ] + # we filter the only public function mod2 + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + +@pytest.mark.slow +class TestModuleWithoutPublicEntities(util.F2PyTest): + sources = [ + util.getpath( + "tests", "src", "modules", "gh26920", + "two_mods_with_no_public_entities.f90" + ) + ] + only = ["mod1_func1", ] + + def test_gh26920(self): + # if it compiles and can be loaded, things are fine + pass + + @pytest.mark.slow class TestModuleDocString(util.F2PyTest): sources = [util.getpath("tests", "src", "modules", "module_data_docstring.f90")] diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index c0a8045d91b9..e11ed1a0efa3 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -1,7 +1,9 @@ import os import pytest +import platform import numpy as np +import numpy.testing as npt from . import util @@ -76,3 +78,64 @@ def test_gh25344(self): exp = 7.0 res = self.module.add(3.0, 4.0) assert exp == res + +class TestF77Comments(util.F2PyTest): + # Check that comments are stripped from F77 continuation lines + sources = [util.getpath("tests", "src", "regression", "f77comments.f")] + + @pytest.mark.slow + def test_gh26148(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) + + @pytest.mark.slow + def test_gh26466(self): + # Check that comments after PARAMETER directions are stripped + expected = np.arange(1, 11, dtype=np.float32)*2 + res=self.module.testsub2() + npt.assert_allclose(expected, res) + +class TestF90Contiuation(util.F2PyTest): + # Check that comments are stripped from F90 continuation lines + sources = [util.getpath("tests", "src", "regression", "f90continuation.f90")] + + @pytest.mark.slow + def test_gh26148b(self): + x1 = np.array(3, dtype=np.int32) + x2 = np.array(5, dtype=np.int32) + res=self.module.testsub(x1, x2) + assert(res[0] == 8) + assert(res[1] == 15) + +@pytest.mark.slow +def test_gh26623(): + # Including libraries with . should not generate an incorrect meson.build + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f90continuation.f90")], + ["-lfoo.bar"], + module_name="Blah", + ) + except RuntimeError as rerr: + assert "lparen got assign" not in str(rerr) + + +@pytest.mark.slow +@pytest.mark.skipif(platform.system() not in ['Linux', 'Darwin'], reason='Unsupported on this platform for now') +def test_gh25784(): + # Compile dubious file using passed flags + try: + aa = util.build_module( + [util.getpath("tests", "src", "regression", "f77fixedform.f95")], + options=[ + # Meson will collect and dedup these to pass to fortran_args: + "--f77flags='-ffixed-form -O2'", + "--f90flags=\"-ffixed-form -Og\"", + ], + module_name="Blah", + ) + except ImportError as rerr: + assert "unknown_subroutine_" in str(rerr) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index faedd4cc1597..9cad71a9cf5c 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -26,6 +26,102 @@ from importlib import import_module from numpy.f2py._backends._meson import MesonBackend +# +# Check if compilers are available at all... +# + +def check_language(lang, code_snippet=None): + if sys.platform == "win32": + pytest.skip("No Fortran tests on Windows (Issue #25134)", allow_module_level=True) + tmpdir = tempfile.mkdtemp() + try: + meson_file = os.path.join(tmpdir, "meson.build") + with open(meson_file, "w") as f: + f.write("project('check_compilers')\n") + f.write(f"add_languages('{lang}')\n") + if code_snippet: + f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") + f.write(f"{lang}_code = '''{code_snippet}'''\n") + f.write( + f"_have_{lang}_feature =" + f"{lang}_compiler.compiles({lang}_code," + f" name: '{lang} feature check')\n" + ) + try: + runmeson = subprocess.run( + ["meson", "setup", "btmp"], + check=False, + cwd=tmpdir, + capture_output=True, + ) + except subprocess.CalledProcessError: + pytest.skip("meson not present, skipping compiler dependent test", allow_module_level=True) + return runmeson.returncode == 0 + finally: + shutil.rmtree(tmpdir) + return False + + +fortran77_code = ''' +C Example Fortran 77 code + PROGRAM HELLO + PRINT *, 'Hello, Fortran 77!' + END +''' + +fortran90_code = ''' +! Example Fortran 90 code +program hello90 + type :: greeting + character(len=20) :: text + end type greeting + + type(greeting) :: greet + greet%text = 'hello, fortran 90!' + print *, greet%text +end program hello90 +''' + +# Dummy class for caching relevant checks +class CompilerChecker: + def __init__(self): + self.compilers_checked = False + self.has_c = False + self.has_f77 = False + self.has_f90 = False + + def check_compilers(self): + if (not self.compilers_checked) and (not sys.platform == "cygwin"): + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = [ + executor.submit(check_language, "c"), + executor.submit(check_language, "fortran", fortran77_code), + executor.submit(check_language, "fortran", fortran90_code) + ] + + self.has_c = futures[0].result() + self.has_f77 = futures[1].result() + self.has_f90 = futures[2].result() + + self.compilers_checked = True + +if not IS_WASM: + checker = CompilerChecker() + checker.check_compilers() + +def has_c_compiler(): + return checker.has_c + +def has_f77_compiler(): + return checker.has_f77 + +def has_f90_compiler(): + return checker.has_f90 + +def has_fortran_compiler(): + return (checker.has_f90 and checker.has_f77) + + # # Maintaining a temporary module directory # @@ -109,6 +205,9 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): code = f"import sys; sys.path = {sys.path!r}; import numpy.f2py; numpy.f2py.main()" d = get_module_dir() + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") # Copy files dst_sources = [] @@ -121,7 +220,7 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): dst_sources.append(dst) base, ext = os.path.splitext(dst) - if ext in (".f90", ".f", ".c", ".pyf"): + if ext in (".f90", ".f95", ".f", ".c", ".pyf"): f2py_sources.append(dst) assert f2py_sources @@ -129,7 +228,11 @@ def build_module(source_files, options=[], skip=[], only=[], module_name=None): # Prepare options if module_name is None: module_name = get_temp_module_name() - f2py_opts = ["-c", "-m", module_name] + options + f2py_sources + gil_options = [] + if '--freethreading-compatible' not in options and '--no-freethreading-compatible' not in options: + # default to disabling the GIL if unset in options + gil_options = ['--freethreading-compatible'] + f2py_opts = ["-c", "-m", module_name] + options + gil_options + f2py_sources f2py_opts += ["--backend", "meson"] if skip: f2py_opts += ["skip:"] + skip @@ -195,96 +298,6 @@ def build_code(source_code, module_name=module_name) -# -# Check if compilers are available at all... -# - -def check_language(lang, code_snippet=None): - tmpdir = tempfile.mkdtemp() - try: - meson_file = os.path.join(tmpdir, "meson.build") - with open(meson_file, "w") as f: - f.write("project('check_compilers')\n") - f.write(f"add_languages('{lang}')\n") - if code_snippet: - f.write(f"{lang}_compiler = meson.get_compiler('{lang}')\n") - f.write(f"{lang}_code = '''{code_snippet}'''\n") - f.write( - f"_have_{lang}_feature =" - f"{lang}_compiler.compiles({lang}_code," - f" name: '{lang} feature check')\n" - ) - runmeson = subprocess.run( - ["meson", "setup", "btmp"], - check=False, - cwd=tmpdir, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - if runmeson.returncode == 0: - return True - else: - return False - finally: - shutil.rmtree(tmpdir) - return False - -fortran77_code = ''' -C Example Fortran 77 code - PROGRAM HELLO - PRINT *, 'Hello, Fortran 77!' - END -''' - -fortran90_code = ''' -! Example Fortran 90 code -program hello90 - type :: greeting - character(len=20) :: text - end type greeting - - type(greeting) :: greet - greet%text = 'hello, fortran 90!' - print *, greet%text -end program hello90 -''' - -# Dummy class for caching relevant checks -class CompilerChecker: - def __init__(self): - self.compilers_checked = False - self.has_c = False - self.has_f77 = False - self.has_f90 = False - - def check_compilers(self): - if (not self.compilers_checked) and (not sys.platform == "cygwin"): - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [ - executor.submit(check_language, "c"), - executor.submit(check_language, "fortran", fortran77_code), - executor.submit(check_language, "fortran", fortran90_code) - ] - - self.has_c = futures[0].result() - self.has_f77 = futures[1].result() - self.has_f90 = futures[2].result() - - self.compilers_checked = True - -if not IS_WASM: - checker = CompilerChecker() - checker.check_compilers() - -def has_c_compiler(): - return checker.has_c - -def has_f77_compiler(): - return checker.has_f77 - -def has_f90_compiler(): - return checker.has_f90 - # # Building with meson # @@ -303,6 +316,11 @@ def build_meson(source_files, module_name=None, **kwargs): """ Build a module via Meson and import it. """ + + # gh-27045 : Skip if no compilers are found + if not has_fortran_compiler(): + pytest.skip("No Fortran compiler available") + build_dir = get_module_dir() if module_name is None: module_name = get_temp_module_name() @@ -327,13 +345,7 @@ def build_meson(source_files, module_name=None, **kwargs): extra_dat=kwargs.get("extra_dat", {}), ) - # Compile the module - # NOTE: Catch-all since without distutils it is hard to determine which - # compiler stack is on the CI - try: - backend.compile() - except: - pytest.skip("Failed to compile module") + backend.compile() # Import the compiled module sys.path.insert(0, f"{build_dir}/{backend.meson_build_dir}") @@ -369,6 +381,7 @@ def setup_class(cls): F2PyTest._has_c_compiler = has_c_compiler() F2PyTest._has_f77_compiler = has_f77_compiler() F2PyTest._has_f90_compiler = has_f90_compiler() + F2PyTest._has_fortran_compiler = has_fortran_compiler() def setup_method(self): if self.module is not None: @@ -386,7 +399,7 @@ def setup_method(self): pytest.skip("No Fortran 77 compiler available") if needs_f90 and not self._has_f90_compiler: pytest.skip("No Fortran 90 compiler available") - if needs_pyf and not (self._has_f90_compiler or self._has_f77_compiler): + if needs_pyf and not self._has_fortran_compiler: pytest.skip("No Fortran compiler available") # Build the module diff --git a/numpy/fft/_helper.py b/numpy/fft/_helper.py index 9f4512f90715..f6c114bab18d 100644 --- a/numpy/fft/_helper.py +++ b/numpy/fft/_helper.py @@ -42,6 +42,7 @@ def fftshift(x, axes=None): Examples -------- + >>> import numpy as np >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs array([ 0., 1., 2., ..., -3., -2., -1.]) @@ -97,6 +98,7 @@ def ifftshift(x, axes=None): Examples -------- + >>> import numpy as np >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3) >>> freqs array([[ 0., 1., 2.], @@ -153,6 +155,7 @@ def fftfreq(n, d=1.0, device=None): Examples -------- + >>> import numpy as np >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float) >>> fourier = np.fft.fft(signal) >>> n = signal.size @@ -211,6 +214,7 @@ def rfftfreq(n, d=1.0, device=None): Examples -------- + >>> import numpy as np >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) >>> fourier = np.fft.rfft(signal) >>> n = signal.size diff --git a/numpy/fft/_pocketfft.py b/numpy/fft/_pocketfft.py index 5972a346de20..4edeecc075ad 100644 --- a/numpy/fft/_pocketfft.py +++ b/numpy/fft/_pocketfft.py @@ -34,7 +34,7 @@ import warnings from numpy.lib.array_utils import normalize_axis_index -from numpy._core import (asarray, empty, zeros, swapaxes, result_type, +from numpy._core import (asarray, empty_like, result_type, conjugate, take, sqrt, reciprocal) from . import _pocketfft_umath as pfu from numpy._core import overrides @@ -85,8 +85,8 @@ def _raw_fft(a, n, axis, is_real, is_forward, norm, out=None): out_dtype = real_dtype else: # Others, complex output. out_dtype = result_type(a.dtype, 1j) - out = empty(a.shape[:axis] + (n_out,) + a.shape[axis+1:], - dtype=out_dtype) + out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis+1:], + dtype=out_dtype) elif ((shape := getattr(out, "shape", None)) is not None and (len(shape) != a.ndim or shape[axis] != n_out)): raise ValueError("output array has wrong shape.") @@ -185,6 +185,7 @@ def fft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, @@ -291,6 +292,7 @@ def ifft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.ifft([0, 4, 0, 0]) array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary @@ -398,6 +400,7 @@ def rfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.fft([0, 1, 0, 0]) array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary >>> np.fft.rfft([0, 1, 0, 0]) @@ -506,6 +509,7 @@ def irfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> np.fft.ifft([1, -1j, -1, 1j]) array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary >>> np.fft.irfft([1, -1j, -1]) @@ -601,6 +605,7 @@ def hfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> signal = np.array([1, 2, 3, 4, 3, 2]) >>> np.fft.fft(signal) array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary @@ -686,6 +691,7 @@ def ihfft(a, n=None, axis=-1, norm=None, out=None): Examples -------- + >>> import numpy as np >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary @@ -855,6 +861,7 @@ def fftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:3, :3, :3][0] >>> np.fft.fftn(a, axes=(1, 2)) array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -996,6 +1003,7 @@ def ifftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -1127,6 +1135,7 @@ def fft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.fft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary @@ -1256,6 +1265,7 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = 4 * np.eye(4) >>> np.fft.ifft2(a) array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary @@ -1373,6 +1383,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.ones((2, 2, 2)) >>> np.fft.rfftn(a) array([[[8.+0.j, 0.+0.j], # may vary @@ -1390,7 +1401,7 @@ def rfftn(a, s=None, axes=None, norm=None, out=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm, out=out) - for ii in range(len(axes)-1): + for ii in range(len(axes)-2, -1, -1): a = fft(a, s[ii], axes[ii], norm, out=out) return a @@ -1465,6 +1476,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> np.fft.rfft2(a) array([[ 50. +0.j , 0. +0.j , 0. +0.j ], @@ -1597,6 +1609,7 @@ def irfftn(a, s=None, axes=None, norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 2, 2)) >>> a[0, 0, 0] = 3 * 2 * 2 >>> np.fft.irfftn(a) @@ -1689,6 +1702,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None, out=None): Examples -------- + >>> import numpy as np >>> a = np.mgrid[:5, :5][0] >>> A = np.fft.rfft2(a) >>> np.fft.irfft2(A, s=a.shape) diff --git a/numpy/fft/_pocketfft_umath.cpp b/numpy/fft/_pocketfft_umath.cpp index 127ebfdb6149..848888710d6c 100644 --- a/numpy/fft/_pocketfft_umath.cpp +++ b/numpy/fft/_pocketfft_umath.cpp @@ -172,6 +172,7 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, auto plan = pocketfft::detail::get_plan>(npts); auto buffered = (step_out != sizeof(std::complex)); pocketfft::detail::arr> buff(buffered ? nout : 0); + auto nin_used = nin <= npts ? nin : npts; for (size_t i = 0; i < n_outer; i++, ip += si, fp += sf, op += so) { std::complex *op_or_buff = buffered ? buff.data() : (std::complex *)op; /* @@ -183,10 +184,10 @@ rfft_impl(char **args, npy_intp const *dimensions, npy_intp const *steps, * Pocketfft uses FFTpack order, R0,R1,I1,...Rn-1,In-1,Rn[,In] (last * for npts odd only). To make unpacking easy, we place the real data * offset by one in the buffer, so that we just have to move R0 and - * create I0=0. Note that copy_data will zero the In component for + * create I0=0. Note that copy_input will zero the In component for * even number of points. */ - copy_input(ip, step_in, nin, &((T *)op_or_buff)[1], nout*2 - 1); + copy_input(ip, step_in, nin_used, &((T *)op_or_buff)[1], nout*2 - 1); plan->exec(&((T *)op_or_buff)[1], *(T *)fp, pocketfft::FORWARD); op_or_buff[0] = op_or_buff[0].imag(); // I0->R0, I0=0 if (buffered) { @@ -297,17 +298,17 @@ static PyUFuncGenericFunction fft_functions[] = { wrap_legacy_cpp_ufunc>, wrap_legacy_cpp_ufunc> }; -static char fft_types[] = { +static const char fft_types[] = { NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_CLONGDOUBLE, NPY_LONGDOUBLE, NPY_CLONGDOUBLE }; -static void *fft_data[] = { +static void *const fft_data[] = { (void*)&pocketfft::FORWARD, (void*)&pocketfft::FORWARD, (void*)&pocketfft::FORWARD }; -static void *ifft_data[] = { +static void *const ifft_data[] = { (void*)&pocketfft::BACKWARD, (void*)&pocketfft::BACKWARD, (void*)&pocketfft::BACKWARD @@ -323,7 +324,7 @@ static PyUFuncGenericFunction rfft_n_odd_functions[] = { wrap_legacy_cpp_ufunc>, wrap_legacy_cpp_ufunc> }; -static char rfft_types[] = { +static const char rfft_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_FLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_LONGDOUBLE, NPY_LONGDOUBLE, NPY_CLONGDOUBLE @@ -334,7 +335,7 @@ static PyUFuncGenericFunction irfft_functions[] = { wrap_legacy_cpp_ufunc>, wrap_legacy_cpp_ufunc> }; -static char irfft_types[] = { +static const char irfft_types[] = { NPY_CDOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_FLOAT, NPY_CLONGDOUBLE, NPY_LONGDOUBLE, NPY_LONGDOUBLE @@ -418,5 +419,10 @@ PyMODINIT_FUNC PyInit__pocketfft_umath(void) return NULL; } +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/fft/meson.build b/numpy/fft/meson.build index 854a9a0a4d6f..751b5dc74d30 100644 --- a/numpy/fft/meson.build +++ b/numpy/fft/meson.build @@ -34,5 +34,6 @@ py.install_sources( 'tests/test_helper.py', 'tests/test_pocketfft.py', ], - subdir: 'numpy/fft/tests' + subdir: 'numpy/fft/tests', + install_tag: 'tests' ) diff --git a/numpy/fft/pocketfft b/numpy/fft/pocketfft index 0f7aa1225b06..33ae5dc94c9c 160000 --- a/numpy/fft/pocketfft +++ b/numpy/fft/pocketfft @@ -1 +1 @@ -Subproject commit 0f7aa1225b065938fc263b7914df16b8c1cbc9d7 +Subproject commit 33ae5dc94c9cdc7f1c78346504a85de87cadaa12 diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py index 500d97282cde..fc6592e4f4f6 100644 --- a/numpy/fft/tests/test_pocketfft.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -38,7 +38,7 @@ def test_identity_long_short(self, dtype): # Test with explicitly given number of points, both for n # smaller and for n larger than the input size. maxlen = 16 - atol = 4 * np.spacing(np.array(1., dtype=dtype)) + atol = 5 * np.spacing(np.array(1., dtype=dtype)) x = random(maxlen).astype(dtype) + 1j*random(maxlen).astype(dtype) xx = np.concatenate([x, np.zeros_like(x)]) xr = random(maxlen).astype(dtype) @@ -183,7 +183,6 @@ def test_fft_bad_out(self): with pytest.raises(TypeError, match="Cannot cast"): np.fft.fft(x, out=np.zeros_like(x, dtype=float)) - @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward')) def test_ifft(self, norm): x = random(30) + 1j*random(30) @@ -258,6 +257,17 @@ def test_rfft(self): np.fft.rfft(x, n=n) / n, np.fft.rfft(x, n=n, norm="forward"), atol=1e-6) + def test_rfft_even(self): + x = np.arange(8) + n = 4 + y = np.fft.rfft(x, n) + assert_allclose(y, np.fft.fft(x[:n])[:n//2 + 1], rtol=1e-14) + + def test_rfft_odd(self): + x = np.array([1, 0, 2, 3, -3]) + y = np.fft.rfft(x) + assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14) + def test_irfft(self): x = random(30) assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6) @@ -297,6 +307,14 @@ def test_rfftn(self): np.fft.rfftn(x, norm="ortho"), atol=1e-6) assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.), np.fft.rfftn(x, norm="forward"), atol=1e-6) + # Regression test for gh-27159 + x = np.ones((2, 3)) + result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40)) + assert result.shape == (10, 21) + expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40), + axis=0, n=20), axis=0, n=10) + assert expected.shape == (10, 21) + assert_allclose(result, expected, atol=1e-6) def test_irfftn(self): x = random((30, 20, 10)) @@ -487,6 +505,16 @@ def test_fft_with_order(dtype, order, fft): raise ValueError() +@pytest.mark.parametrize("order", ["F", "C"]) +@pytest.mark.parametrize("n", [None, 7, 12]) +def test_fft_output_order(order, n): + rng = np.random.RandomState(42) + x = rng.rand(10) + x = np.asarray(x, dtype=np.complex64, order=order) + res = np.fft.fft(x, n=n) + assert res.flags.c_contiguous == x.flags.c_contiguous + assert res.flags.f_contiguous == x.flags.f_contiguous + @pytest.mark.skipif(IS_WASM, reason="Cannot start thread") class TestFFTThreadSafe: threads = 16 diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py index 22ad35e93c35..f048b9e2818f 100644 --- a/numpy/lib/__init__.py +++ b/numpy/lib/__init__.py @@ -67,7 +67,8 @@ def __getattr__(attr): raise AttributeError( "numpy.lib.emath was an alias for emath module that was removed " "in NumPy 2.0. Replace usages of numpy.lib.emath with " - "numpy.emath." + "numpy.emath.", + name=None ) elif attr in ( "histograms", "type_check", "nanfunctions", "function_base", @@ -77,12 +78,14 @@ def __getattr__(attr): raise AttributeError( f"numpy.lib.{attr} is now private. If you are using a public " "function, it should be available in the main numpy namespace, " - "otherwise check the NumPy 2.0 migration guide." + "otherwise check the NumPy 2.0 migration guide.", + name=None ) elif attr == "arrayterator": raise AttributeError( "numpy.lib.arrayterator submodule is now private. To access " - "Arrayterator class use numpy.lib.Arrayterator." + "Arrayterator class use numpy.lib.Arrayterator.", + name=None ) else: raise AttributeError("module {!r} has no attribute " diff --git a/numpy/lib/_array_utils_impl.py b/numpy/lib/_array_utils_impl.py index 3e9d96e93dd9..d5f778160358 100644 --- a/numpy/lib/_array_utils_impl.py +++ b/numpy/lib/_array_utils_impl.py @@ -29,6 +29,7 @@ def byte_bounds(a): Examples -------- + >>> import numpy as np >>> I = np.eye(2, dtype='f'); I.dtype dtype('float32') >>> low, high = np.lib.array_utils.byte_bounds(I) diff --git a/numpy/lib/_arraypad_impl.py b/numpy/lib/_arraypad_impl.py index 7ec52167f1c0..8bdb1b992195 100644 --- a/numpy/lib/_arraypad_impl.py +++ b/numpy/lib/_arraypad_impl.py @@ -49,7 +49,7 @@ def _slice_at_axis(sl, axis): Examples -------- - >>> _slice_at_axis(slice(None, 3, -1), 1) + >>> np._slice_at_axis(slice(None, 3, -1), 1) (slice(None, None, None), slice(None, 3, -1), (...,)) """ return (slice(None),) * axis + (sl,) + (...,) @@ -293,7 +293,8 @@ def _get_stats(padded, axis, width_pair, length_pair, stat_func): return left_stat, right_stat -def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): +def _set_reflect_both(padded, axis, width_pair, method, + original_period, include_edge=False): """ Pad `axis` of `arr` with reflection. @@ -308,6 +309,8 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): dimension. method : str Controls method of reflection; options are 'even' or 'odd'. + original_period : int + Original length of data on `axis` of `arr`. include_edge : bool If true, edge value is included in reflection, otherwise the edge value forms the symmetric axis to the reflection. @@ -320,11 +323,20 @@ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): """ left_pad, right_pad = width_pair old_length = padded.shape[axis] - right_pad - left_pad - + if include_edge: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = old_length // original_period * original_period # Edge is included, we need to offset the pad amount by 1 edge_offset = 1 else: + # Avoid wrapping with only a subset of the original area + # by ensuring period can only be a multiple of the original + # area's length. + old_length = ((old_length - 1) // (original_period - 1) + * (original_period - 1) + 1) edge_offset = 0 # Edge is not included, no need to offset pad amount old_length -= 1 # but must be omitted from the chunk @@ -672,6 +684,7 @@ def pad(array, pad_width, mode='constant', **kwargs): Examples -------- + >>> import numpy as np >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) array([4, 4, 1, ..., 6, 6, 6]) @@ -848,7 +861,7 @@ def pad(array, pad_width, mode='constant', **kwargs): elif mode in {"reflect", "symmetric"}: method = kwargs.get("reflect_type", "even") - include_edge = True if mode == "symmetric" else False + include_edge = mode == "symmetric" for axis, (left_index, right_index) in zip(axes, pad_width): if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): # Extending singleton dimension for 'reflect' is legacy @@ -865,7 +878,7 @@ def pad(array, pad_width, mode='constant', **kwargs): # the length of the original values in the current dimension. left_index, right_index = _set_reflect_both( roi, axis, (left_index, right_index), - method, include_edge + method, array.shape[axis], include_edge ) elif mode == "wrap": diff --git a/numpy/lib/_arraysetops_impl.py b/numpy/lib/_arraysetops_impl.py index c8e1fa888295..3de2128c1d5c 100644 --- a/numpy/lib/_arraysetops_impl.py +++ b/numpy/lib/_arraysetops_impl.py @@ -68,6 +68,7 @@ def ediff1d(ary, to_end=None, to_begin=None): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 4, 7, 0]) >>> np.ediff1d(x) array([ 1, 2, 3, -7]) @@ -228,11 +229,17 @@ def unique(ar, return_index=False, return_inverse=False, .. versionchanged: 2.0 For multi-dimensional inputs, ``unique_inverse`` is reshaped such that the input can be reconstructed using - ``np.take(unique, unique_inverse)`` when ``axis = None``, and - ``np.take_along_axis(unique, unique_inverse, axis=axis)`` otherwise. + ``np.take(unique, unique_inverse, axis=axis)``. The result is + now not 1-dimensional when ``axis=None``. + + Note that in NumPy 2.0.0 a higher dimensional array was returned also + when ``axis`` was not ``None``. This was reverted, but + ``inverse.reshape(-1)`` can be used to ensure compatibility with both + versions. Examples -------- + >>> import numpy as np >>> np.unique([1, 1, 2, 2, 3, 3]) array([1, 2, 3]) >>> a = np.array([[1, 1], [2, 3]]) @@ -282,7 +289,7 @@ def unique(ar, return_index=False, return_inverse=False, ar = np.asanyarray(ar) if axis is None: ret = _unique1d(ar, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=ar.shape) + equal_nan=equal_nan, inverse_shape=ar.shape, axis=None) return _unpack_tuple(ret) # axis was specified and not None @@ -328,13 +335,15 @@ def reshape_uniq(uniq): output = _unique1d(consolidated, return_index, return_inverse, return_counts, - equal_nan=equal_nan, inverse_shape=inverse_shape) + equal_nan=equal_nan, inverse_shape=inverse_shape, + axis=axis) output = (reshape_uniq(output[0]),) + output[1:] return _unpack_tuple(output) def _unique1d(ar, return_index=False, return_inverse=False, - return_counts=False, *, equal_nan=True, inverse_shape=None): + return_counts=False, *, equal_nan=True, inverse_shape=None, + axis=None): """ Find the unique elements of an array, ignoring shape. """ @@ -371,7 +380,7 @@ def _unique1d(ar, return_index=False, return_inverse=False, imask = np.cumsum(mask) - 1 inv_idx = np.empty(mask.shape, dtype=np.intp) inv_idx[perm] = imask - ret += (inv_idx.reshape(inverse_shape),) + ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,) if return_counts: idx = np.concatenate(np.nonzero(mask) + ([mask.size],)) ret += (np.diff(idx),) @@ -433,6 +442,15 @@ def unique_all(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> import numpy as np + >>> np.unique_all([1, 1, 2]) + UniqueAllResult(values=array([1, 2]), + indices=array([0, 2]), + inverse_indices=array([0, 0, 1]), + counts=array([2, 1])) + """ result = unique( x, @@ -476,6 +494,12 @@ def unique_counts(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> import numpy as np + >>> np.unique_counts([1, 1, 2]) + UniqueCountsResult(values=array([1, 2]), counts=array([2, 1])) + """ result = unique( x, @@ -520,6 +544,12 @@ def unique_inverse(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> import numpy as np + >>> np.unique_inverse([1, 1, 2]) + UniqueInverseResult(values=array([1, 2]), inverse_indices=array([0, 0, 1])) + """ result = unique( x, @@ -560,6 +590,12 @@ def unique_values(x): -------- unique : Find the unique elements of an array. + Examples + -------- + >>> import numpy as np + >>> np.unique_values([1, 1, 2]) + array([1, 2]) + """ return unique( x, @@ -611,6 +647,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): Examples -------- + >>> import numpy as np >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) array([1, 3]) @@ -686,7 +723,7 @@ def setxor1d(ar1, ar2, assume_unique=False): Input arrays. assume_unique : bool If True, the input arrays are both assumed to be unique, which - can speed up the calculation. Default is False. + can speed up the calculation. Default is False. Returns ------- @@ -696,6 +733,7 @@ def setxor1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4]) >>> b = np.array([2, 3, 5, 7, 5]) >>> np.setxor1d(a,b) @@ -706,7 +744,7 @@ def setxor1d(ar1, ar2, assume_unique=False): ar1 = unique(ar1) ar2 = unique(ar2) - aux = np.concatenate((ar1, ar2)) + aux = np.concatenate((ar1, ar2), axis=None) if aux.size == 0: return aux @@ -799,6 +837,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): Examples -------- + >>> import numpy as np >>> test = np.array([0, 1, 2, 5, 0]) >>> states = [0, 2] >>> mask = np.in1d(test, states) @@ -853,30 +892,16 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): if ar2.dtype == bool: ar2 = ar2.astype(np.uint8) - ar2_min = np.min(ar2) - ar2_max = np.max(ar2) + ar2_min = int(np.min(ar2)) + ar2_max = int(np.max(ar2)) - ar2_range = int(ar2_max) - int(ar2_min) + ar2_range = ar2_max - ar2_min # Constraints on whether we can actually use the table method: # 1. Assert memory usage is not too large below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size) # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max - # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype - if ar1.size > 0: - ar1_min = np.min(ar1) - ar1_max = np.max(ar1) - - # After masking, the range of ar1 is guaranteed to be - # within the range of ar2: - ar1_upper = min(int(ar1_max), int(ar2_max)) - ar1_lower = max(int(ar1_min), int(ar2_min)) - - range_safe_from_overflow &= all(( - ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max, - ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min - )) # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. @@ -906,8 +931,25 @@ def _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None): # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) - outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - - ar2_min] + in_range_ar1 = ar1[basic_mask] + if in_range_ar1.size == 0: + # Nothing more to do, since all values are out of range. + return outgoing_array + + # Unfortunately, ar2_min can be out of range for `intp` even + # if the calculation result must fit in range (and be positive). + # In that case, use ar2.dtype which must work for all unmasked + # values. + try: + ar2_min = np.array(ar2_min, dtype=np.intp) + dtype = np.intp + except OverflowError: + dtype = ar2.dtype + + out = np.empty_like(in_range_ar1, dtype=np.intp) + outgoing_array[basic_mask] = isin_helper_ar[ + np.subtract(in_range_ar1, ar2_min, dtype=dtype, + out=out, casting="unsafe")] return outgoing_array elif kind == 'table': # not range_safe_from_overflow @@ -1051,6 +1093,7 @@ def isin(element, test_elements, assume_unique=False, invert=False, *, Examples -------- + >>> import numpy as np >>> element = 2*np.arange(4).reshape((2, 2)) >>> element array([[0, 2], @@ -1120,6 +1163,7 @@ def union1d(ar1, ar2): Examples -------- + >>> import numpy as np >>> np.union1d([-1, 0, 1], [-2, 0, 2]) array([-2, -1, 0, 1, 2]) @@ -1162,6 +1206,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 8b21a6086638..146161d0236d 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -66,6 +66,7 @@ class Arrayterator: Examples -------- + >>> import numpy as np >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) >>> a_itor = np.lib.Arrayterator(a, 2) >>> a_itor.shape diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 9b455513ac89..e3d85b854941 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -271,10 +271,7 @@ def _iswritemode(self, mode): # Currently only used to test the bz2 files. _writemodes = ("w", "+") - for c in mode: - if c in _writemodes: - return True - return False + return any(c in _writemodes for c in mode) def _splitzipext(self, filename): """Split zip extension from filename and return filename. @@ -423,7 +420,7 @@ def _sanitize_relative_path(self, path): last = path # Note: os.path.join treats '/' as os.sep on Windows path = path.lstrip(os.sep).lstrip('/') - path = path.lstrip(os.pardir).lstrip('..') + path = path.lstrip(os.pardir).removeprefix('..') drive, path = os.path.splitdrive(path) # for Windows return path diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 65bc7c592b29..840b501bacae 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -192,6 +192,7 @@ def rot90(m, k=1, axes=(0, 1)): Examples -------- + >>> import numpy as np >>> m = np.array([[1,2],[3,4]], int) >>> m array([[1, 2], @@ -297,6 +298,7 @@ def flip(m, axis=None): Examples -------- + >>> import numpy as np >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], @@ -323,7 +325,8 @@ def flip(m, axis=None): [7, 6]], [[1, 0], [3, 2]]]) - >>> A = np.random.randn(3,4,5) + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(3,4,5)) >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) True """ @@ -359,6 +362,7 @@ def iterable(y): Examples -------- + >>> import numpy as np >>> np.iterable([1, 2, 3]) True >>> np.iterable(2) @@ -501,6 +505,7 @@ def average(a, axis=None, weights=None, returned=False, *, Examples -------- + >>> import numpy as np >>> data = np.arange(1, 5) >>> data array([1, 2, 3, 4]) @@ -626,7 +631,9 @@ class ndarray is returned. Examples -------- - Convert a list into an array. If all elements are finite + >>> import numpy as np + + Convert a list into an array. If all elements are finite, then ``asarray_chkfinite`` is identical to ``asarray``. >>> a = [1, 2] @@ -727,6 +734,8 @@ def piecewise(x, condlist, funclist, *args, **kw): Examples -------- + >>> import numpy as np + Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. >>> x = np.linspace(-2.5, 2.5, 6) @@ -815,6 +824,8 @@ def select(condlist, choicelist, default=0): Examples -------- + >>> import numpy as np + Beginning with an array of integers from 0 to 5 (inclusive), elements less than ``3`` are negated, elements greater than ``3`` are squared, and elements not meeting either of these conditions @@ -937,6 +948,8 @@ def copy(a, order='K', subok=False): Examples -------- + >>> import numpy as np + Create an array x, with a reference y and a copy z: >>> x = np.array([1, 2, 3]) @@ -999,7 +1012,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): 4. Any combination of N scalars/arrays with the meaning of 2. and 3. If `axis` is given, the number of varargs must equal the number of axes. - Default: 1. + Default: 1. (see Examples below). edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences @@ -1017,14 +1030,15 @@ def gradient(f, *varargs, axis=None, edge_order=1): Returns ------- - gradient : ndarray or list of ndarray - A list of ndarrays (or a single ndarray if there is only one dimension) - corresponding to the derivatives of f with respect to each dimension. - Each derivative has the same shape as f. + gradient : ndarray or tuple of ndarray + A tuple of ndarrays (or a single ndarray if there is only one + dimension) corresponding to the derivatives of f with respect + to each dimension. Each derivative has the same shape as f. Examples -------- - >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) + >>> import numpy as np + >>> f = np.array([1, 2, 4, 7, 11, 16]) >>> np.gradient(f) array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) @@ -1040,7 +1054,7 @@ def gradient(f, *varargs, axis=None, edge_order=1): Or a non uniform one: - >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.]) >>> np.gradient(f, x) array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) @@ -1048,20 +1062,22 @@ def gradient(f, *varargs, axis=None, edge_order=1): axis. In this example the first array stands for the gradient in rows and the second one in columns direction: - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) - [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], - [1. , 1. , 1. ]])] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]])) + (array([[ 2., 2., -1.], + [ 2., 2., -1.]]), + array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])) In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. >>> y = [1., 1.5, 3.5] - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) - [array([[ 1. , 1. , -0.5], - [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], - [2. , 1.7, 0.5]])] + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y) + (array([[ 1. , 1. , -0.5], + [ 1. , 1. , -0.5]]), + array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])) It is possible to specify how boundaries are treated using `edge_order` @@ -1075,10 +1091,56 @@ def gradient(f, *varargs, axis=None, edge_order=1): The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated - >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) + The `varargs` argument defines the spacing between sample points in the + input array. It can take two forms: + + 1. An array, specifying coordinates, which may be unevenly spaced: + + >>> x = np.array([0., 2., 3., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, x, edge_order=2) + array([ 0., 4., 6., 12., 16.]) + + 2. A scalar, representing the fixed sample distance: + + >>> dx = 2 + >>> x = np.array([0., 2., 4., 6., 8.]) + >>> y = x ** 2 + >>> np.gradient(y, dx, edge_order=2) + array([ 0., 4., 8., 12., 16.]) + + It's possible to provide different data for spacing along each dimension. + The number of arguments must match the number of dimensions in the input + data. + + >>> dx = 2 + >>> dy = 3 + >>> x = np.arange(0, 6, dx) + >>> y = np.arange(0, 9, dy) + >>> xs, ys = np.meshgrid(x, y) + >>> zs = xs + 2 * ys + >>> np.gradient(zs, dy, dx) # Passing two scalars + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + + Mixing scalars and arrays is also allowed: + + >>> np.gradient(zs, y, dx) # Passing one array and one scalar + (array([[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]), + array([[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]])) + Notes ----- Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous @@ -1388,6 +1450,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) @@ -1522,6 +1585,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): Examples -------- + >>> import numpy as np >>> xp = [1, 2, 3] >>> fp = [3, 2, 0] >>> np.interp(2.5, xp, fp) @@ -1637,6 +1701,7 @@ def angle(z, deg=False): Examples -------- + >>> import numpy as np >>> np.angle([1.0, 1.0j, 1+1j]) # in radians array([ 0. , 1.57079633, 0.78539816]) # may vary >>> np.angle(1+1j, deg=True) # in degrees @@ -1711,6 +1776,7 @@ def unwrap(p, discont=None, axis=-1, *, period=2*pi): Examples -------- + >>> import numpy as np >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase @@ -1780,6 +1846,7 @@ def sort_complex(a): Examples -------- + >>> import numpy as np >>> np.sort_complex([5, 3, 6, 2, 1]) array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) @@ -1825,6 +1892,7 @@ def trim_zeros(filt, trim='fb'): Examples -------- + >>> import numpy as np >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) >>> np.trim_zeros(a) array([1, 2, 3, 0, 2, 1]) @@ -1890,6 +1958,7 @@ def extract(condition, arr): Examples -------- + >>> import numpy as np >>> arr = np.arange(12).reshape((3, 4)) >>> arr array([[ 0, 1, 2, 3], @@ -1947,6 +2016,7 @@ def place(arr, mask, vals): Examples -------- + >>> import numpy as np >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr @@ -1982,6 +2052,8 @@ def disp(mesg, device=None, linefeed=True): Examples -------- + >>> import numpy as np + Besides ``sys.stdout``, a file-like object can also be used as it has both required methods: @@ -2132,6 +2204,12 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, return arrays +def _get_vectorize_dtype(dtype): + if dtype.char in "SU": + return dtype.char + return dtype + + @set_module('numpy') class vectorize: """ @@ -2215,6 +2293,7 @@ class vectorize: Examples -------- + >>> import numpy as np >>> def myfunc(a, b): ... "Return a-b if a>b, otherwise return a+b" ... if a > b: @@ -2330,7 +2409,7 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, if char not in typecodes['All']: raise ValueError("Invalid otype specified: %s" % (char,)) elif iterable(otypes): - otypes = ''.join([_nx.dtype(x).char for x in otypes]) + otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] elif otypes is not None: raise ValueError("Invalid otype specification") self.otypes = otypes @@ -2636,6 +2715,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, Examples -------- + >>> import numpy as np + Consider two variables, :math:`x_0` and :math:`x_1`, which correlate perfectly, but in opposite directions: @@ -2839,6 +2920,8 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, Examples -------- + >>> import numpy as np + In this example we generate two random arrays, ``xarr`` and ``yarr``, and compute the row-wise and column-wise Pearson correlation coefficients, ``R``. Since ``rowvar`` is true by default, we first find the row-wise @@ -2976,6 +3059,7 @@ def blackman(M): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.blackman(12) array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary @@ -3084,6 +3168,7 @@ def bartlett(M): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.bartlett(12) array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary @@ -3185,6 +3270,7 @@ def hanning(M): Examples -------- + >>> import numpy as np >>> np.hanning(12) array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, @@ -3284,6 +3370,7 @@ def hamming(M): Examples -------- + >>> import numpy as np >>> np.hamming(12) array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, @@ -3463,6 +3550,7 @@ def i0(x): Examples -------- + >>> import numpy as np >>> np.i0(0.) array(1.0) >>> np.i0([0, 1, 2, 3]) @@ -3559,6 +3647,7 @@ def kaiser(M, beta): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> np.kaiser(12, 14) array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary @@ -3661,6 +3750,7 @@ def sinc(x): Examples -------- + >>> import numpy as np >>> import matplotlib.pyplot as plt >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) @@ -3839,6 +3929,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4044,147 +4135,13 @@ def percentile(a, Notes ----- - In general, the percentile at percentage level :math:`q` of a cumulative - distribution function :math:`F(y)=P(Y \\leq y)` with probability measure - :math:`P` is defined as any number :math:`x` that fulfills the - *coverage conditions* - - .. math:: P(Y < x) \\leq q/100 \\quad\\text{and} - \\quad P(Y \\leq x) \\geq q/100 - - with random variable :math:`Y\\sim P`. - Sample percentiles, the result of ``percentile``, provide nonparametric - estimation of the underlying population counterparts, represented by the - unknown :math:`F`, given a data vector ``a`` of length ``n``. - - One type of estimators arises when one considers :math:`F` as the empirical - distribution function of the data, i.e. - :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. - Then, different methods correspond to different choices of :math:`x` that - fulfill the above inequalities. Methods that follow this approach are - ``inverted_cdf`` and ``averaged_inverted_cdf``. - - A more general way to define sample percentile estimators is as follows. - The empirical q-percentile of ``a`` is the ``n * q/100``-th value of the - way from the minimum to the maximum in a sorted copy of ``a``. The values - and distances of the two nearest neighbors as well as the `method` - parameter will determine the percentile if the normalized ranking does not - match the location of ``n * q/100`` exactly. This function is the same as - the median if ``q=50``, the same as the minimum if ``q=0`` and the same - as the maximum if ``q=100``. - - The optional `method` parameter specifies the method to use when the - desired percentile lies between two indexes ``i`` and ``j = i + 1``. - In that case, we first determine ``i + g``, a virtual index that lies - between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the - fractional part of the index. The final result is, then, an interpolation - of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, - ``i`` and ``j`` are modified using correction constants ``alpha`` and - ``beta`` whose choices depend on the ``method`` used. Finally, note that - since Python uses 0-based indexing, the code subtracts another 1 from the - index internally. - - The following formula determines the virtual index ``i + g``, the location - of the percentile in the sorted sample: - - .. math:: - i + g = (q / 100) * ( n - alpha - beta + 1 ) + alpha - - The different methods then work as follows - - inverted_cdf: - method 1 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then take i - - averaged_inverted_cdf: - method 2 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then average between bounds - - closest_observation: - method 3 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 and index is odd ; then take j - * if g = 0 and index is even ; then take i - - interpolated_inverted_cdf: - method 4 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 1 - - hazen: - method 5 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1/2 - * beta = 1/2 - - weibull: - method 6 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 0 - - linear: - method 7 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1 - * beta = 1 - - median_unbiased: - method 8 of H&F [1]_. - This method is probably the best method if the sample - distribution function is unknown (see reference). - This method gives continuous results using: - - * alpha = 1/3 - * beta = 1/3 - - normal_unbiased: - method 9 of H&F [1]_. - This method is probably the best method if the sample - distribution function is known to be normal. - This method gives continuous results using: - - * alpha = 3/8 - * beta = 3/8 - - lower: - NumPy method kept for backwards compatibility. - Takes ``i`` as the interpolation point. - - higher: - NumPy method kept for backwards compatibility. - Takes ``j`` as the interpolation point. - - nearest: - NumPy method kept for backwards compatibility. - Takes ``i`` or ``j``, whichever is nearest. - - midpoint: - NumPy method kept for backwards compatibility. - Uses ``(i + j) / 2``. - - For weighted percentiles, the above coverage conditions still hold. The - empirical cumulative distribution is simply replaced by its weighted - version, i.e. - :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. - Only ``method="inverted_cdf"`` supports weights. - + The behavior of `numpy.percentile` with percentage `q` is + that of `numpy.quantile` with argument ``q/100``. + For more information, please see `numpy.quantile`. Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4309,7 +4266,7 @@ def quantile(a, a : array_like of real numbers Input array or object that can be converted to an array. q : array_like of float - Probability or sequence of probabilities for the quantiles to compute. + Probability or sequence of probabilities of the quantiles to compute. Values must be between 0 and 1 inclusive. axis : {int, tuple of int, None}, optional Axis or axes along which the quantiles are computed. The default is @@ -4326,8 +4283,7 @@ def quantile(a, method : str, optional This parameter specifies the method to use for estimating the quantile. There are many different methods, some unique to NumPy. - See the notes for explanation. The options sorted by their R type - as summarized in the H&F paper [1]_ are: + The recommended options, numbered as they appear in [1]_, are: 1. 'inverted_cdf' 2. 'averaged_inverted_cdf' @@ -4339,14 +4295,17 @@ def quantile(a, 8. 'median_unbiased' 9. 'normal_unbiased' - The first three methods are discontinuous. NumPy further defines the - following discontinuous variations of the default 'linear' (7.) option: + The first three methods are discontinuous. For backward compatibility + with previous versions of NumPy, the following discontinuous variations + of the default 'linear' (7.) option are available: * 'lower' * 'higher', * 'midpoint' * 'nearest' + See Notes for details. + .. versionchanged:: 1.22.0 This argument was previously called "interpolation" and only offered the "linear" default and last four options. @@ -4394,7 +4353,66 @@ def quantile(a, Notes ----- - In general, the quantile at probability level :math:`q` of a cumulative + Given a sample `a` from an underlying distribution, `quantile` provides a + nonparametric estimate of the inverse cumulative distribution function. + + By default, this is done by interpolating between adjacent elements in + ``y``, a sorted copy of `a`:: + + (1-g)*y[j] + g*y[j+1] + + where the index ``j`` and coefficient ``g`` are the integral and + fractional components of ``q * (n-1)``, and ``n`` is the number of + elements in the sample. + + This is a special case of Equation 1 of H&F [1]_. More generally, + + - ``j = (q*n + m - 1) // 1``, and + - ``g = (q*n + m - 1) % 1``, + + where ``m`` may be defined according to several different conventions. + The preferred convention may be selected using the ``method`` parameter: + + =============================== =============== =============== + ``method`` number in H&F ``m`` + =============================== =============== =============== + ``interpolated_inverted_cdf`` 4 ``0`` + ``hazen`` 5 ``1/2`` + ``weibull`` 6 ``q`` + ``linear`` (default) 7 ``1 - q`` + ``median_unbiased`` 8 ``q/3 + 1/3`` + ``normal_unbiased`` 9 ``q/4 + 3/8`` + =============================== =============== =============== + + Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to + ``n - 1`` when the results of the formula would be outside the allowed + range of non-negative indices. The ``- 1`` in the formulas for ``j`` and + ``g`` accounts for Python's 0-based indexing. + + The table above includes only the estimators from H&F that are continuous + functions of probability `q` (estimators 4-9). NumPy also provides the + three discontinuous estimators from H&F (estimators 1-3), where ``j`` is + defined as above, ``m`` is defined as follows, and ``g`` is a function + of the real-valued ``index = q*n + m - 1`` and ``j``. + + 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` + 2. ``averaged_inverted_cdf``: ``m = 0`` and + ``g = (1 + int(index - j > 0)) / 2`` + 3. ``closest_observation``: ``m = -1/2`` and + ``g = 1 - int((index == j) & (j%2 == 1))`` + + For backward compatibility with previous versions of NumPy, `quantile` + provides four additional discontinuous estimators. Like + ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, + but ``g`` is defined as follows. + + - ``lower``: ``g = 0`` + - ``midpoint``: ``g = 0.5`` + - ``higher``: ``g = 1`` + - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` + + **Weighted quantiles:** + More formally, the quantile at probability level :math:`q` of a cumulative distribution function :math:`F(y)=P(Y \\leq y)` with probability measure :math:`P` is defined as any number :math:`x` that fulfills the *coverage conditions* @@ -4402,131 +4420,18 @@ def quantile(a, .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q with random variable :math:`Y\\sim P`. - Sample quantiles, the result of ``quantile``, provide nonparametric + Sample quantiles, the result of `quantile`, provide nonparametric estimation of the underlying population counterparts, represented by the - unknown :math:`F`, given a data vector ``a`` of length ``n``. + unknown :math:`F`, given a data vector `a` of length ``n``. - One type of estimators arises when one considers :math:`F` as the empirical - distribution function of the data, i.e. + Some of the estimators above arise when one considers :math:`F` as the + empirical distribution function of the data, i.e. :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. Then, different methods correspond to different choices of :math:`x` that - fulfill the above inequalities. Methods that follow this approach are - ``inverted_cdf`` and ``averaged_inverted_cdf``. - - A more general way to define sample quantile estimators is as follows. - The empirical q-quantile of ``a`` is the ``n * q``-th value of the - way from the minimum to the maximum in a sorted copy of ``a``. The values - and distances of the two nearest neighbors as well as the `method` - parameter will determine the quantile if the normalized ranking does not - match the location of ``n * q`` exactly. This function is the same as - the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and the same - as the maximum if ``q=1.0``. - - The optional `method` parameter specifies the method to use when the - desired quantile lies between two indexes ``i`` and ``j = i + 1``. - In that case, we first determine ``i + g``, a virtual index that lies - between ``i`` and ``j``, where ``i`` is the floor and ``g`` is the - fractional part of the index. The final result is, then, an interpolation - of ``a[i]`` and ``a[j]`` based on ``g``. During the computation of ``g``, - ``i`` and ``j`` are modified using correction constants ``alpha`` and - ``beta`` whose choices depend on the ``method`` used. Finally, note that - since Python uses 0-based indexing, the code subtracts another 1 from the - index internally. - - The following formula determines the virtual index ``i + g``, the location - of the quantile in the sorted sample: - - .. math:: - i + g = q * ( n - alpha - beta + 1 ) + alpha - - The different methods then work as follows - - inverted_cdf: - method 1 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then take i - - averaged_inverted_cdf: - method 2 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 ; then average between bounds - - closest_observation: - method 3 of H&F [1]_. - This method gives discontinuous results: - - * if g > 0 ; then take j - * if g = 0 and index is odd ; then take j - * if g = 0 and index is even ; then take i + fulfill the above coverage conditions. Methods that follow this approach + are ``inverted_cdf`` and ``averaged_inverted_cdf``. - interpolated_inverted_cdf: - method 4 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 1 - - hazen: - method 5 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1/2 - * beta = 1/2 - - weibull: - method 6 of H&F [1]_. - This method gives continuous results using: - - * alpha = 0 - * beta = 0 - - linear: - method 7 of H&F [1]_. - This method gives continuous results using: - - * alpha = 1 - * beta = 1 - - median_unbiased: - method 8 of H&F [1]_. - This method is probably the best method if the sample - distribution function is unknown (see reference). - This method gives continuous results using: - - * alpha = 1/3 - * beta = 1/3 - - normal_unbiased: - method 9 of H&F [1]_. - This method is probably the best method if the sample - distribution function is known to be normal. - This method gives continuous results using: - - * alpha = 3/8 - * beta = 3/8 - - lower: - NumPy method kept for backwards compatibility. - Takes ``i`` as the interpolation point. - - higher: - NumPy method kept for backwards compatibility. - Takes ``j`` as the interpolation point. - - nearest: - NumPy method kept for backwards compatibility. - Takes ``i`` or ``j``, whichever is nearest. - - midpoint: - NumPy method kept for backwards compatibility. - Uses ``(i + j) / 2``. - - **Weighted quantiles:** - For weighted quantiles, the above coverage conditions still hold. The + For weighted quantiles, the coverage conditions still hold. The empirical cumulative distribution is simply replaced by its weighted version, i.e. :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. @@ -4534,6 +4439,7 @@ def quantile(a, Examples -------- + >>> import numpy as np >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], @@ -4742,7 +4648,9 @@ def _discret_interpolation_to_boundaries(index, gamma_condition_fun): def _closest_observation(n, quantiles): - gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0) + # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). + # Order is 1-based so for zero-based indexing round to nearest odd index. + gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, gamma_fun) @@ -4962,6 +4870,13 @@ def _quantile( # returns 2 instead of 1 because 0.4 is not binary representable. if quantiles.dtype.kind == "f": cdf = cdf.astype(quantiles.dtype) + # Weights must be non-negative, so we might have zero weights at the + # beginning leading to some leading zeros in cdf. The call to + # np.searchsorted for quantiles=0 will then pick the first element, + # but should pick the first one larger than zero. We + # therefore simply set 0 values in cdf to -1. + if np.any(cdf[0, ...] == 0): + cdf[cdf == 0] = -1 def find_cdf_1d(arr, cdf): indices = np.searchsorted(cdf, quantiles, side="left") @@ -5067,6 +4982,8 @@ def trapezoid(y, x=None, dx=1.0, axis=-1): Examples -------- + >>> import numpy as np + Use the trapezoidal rule on evenly spaced points: >>> np.trapezoid([1, 2, 3]) @@ -5247,6 +5164,7 @@ def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): Examples -------- + >>> import numpy as np >>> nx, ny = (3, 2) >>> x = np.linspace(0, 1, nx) >>> y = np.linspace(0, 1, ny) @@ -5374,6 +5292,7 @@ def delete(arr, obj, axis=None): Examples -------- + >>> import numpy as np >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1, 2, 3, 4], @@ -5553,43 +5472,50 @@ def insert(arr, obj, values, axis=None): ----- Note that for higher dimensional inserts ``obj=0`` behaves very different from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from - ``arr[:,[0],:] = values``. + ``arr[:,[0],:] = values``. This is because of the difference between basic + and advanced :ref:`indexing `. Examples -------- - >>> a = np.array([[1, 1], [2, 2], [3, 3]]) + >>> import numpy as np + >>> a = np.arange(6).reshape(3, 2) >>> a - array([[1, 1], - [2, 2], - [3, 3]]) - >>> np.insert(a, 1, 5) - array([1, 5, 1, ..., 2, 3, 3]) - >>> np.insert(a, 1, 5, axis=1) - array([[1, 5, 1], - [2, 5, 2], - [3, 5, 3]]) - - Difference between sequence and scalars: - - >>> np.insert(a, [1], [[1],[2],[3]], axis=1) - array([[1, 1, 1], - [2, 2, 2], - [3, 3, 3]]) - >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), - ... np.insert(a, [1], [[1],[2],[3]], axis=1)) + array([[0, 1], + [2, 3], + [4, 5]]) + >>> np.insert(a, 1, 6) + array([0, 6, 1, 2, 3, 4, 5]) + >>> np.insert(a, 1, 6, axis=1) + array([[0, 6, 1], + [2, 6, 3], + [4, 6, 5]]) + + Difference between sequence and scalars, + showing how ``obj=[1]`` behaves different from ``obj=1``: + + >>> np.insert(a, [1], [[7],[8],[9]], axis=1) + array([[0, 7, 1], + [2, 8, 3], + [4, 9, 5]]) + >>> np.insert(a, 1, [[7],[8],[9]], axis=1) + array([[0, 7, 8, 9, 1], + [2, 7, 8, 9, 3], + [4, 7, 8, 9, 5]]) + >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1), + ... np.insert(a, [1], [[7],[8],[9]], axis=1)) True >>> b = a.flatten() >>> b - array([1, 1, 2, 2, 3, 3]) - >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) + array([0, 1, 2, 3, 4, 5]) + >>> np.insert(b, [2, 2], [6, 7]) + array([0, 1, 6, 7, 2, 3, 4, 5]) - >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, ..., 2, 3, 3]) + >>> np.insert(b, slice(2, 4), [7, 8]) + array([0, 1, 7, 2, 8, 3, 4, 5]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, ..., 2, 3, 3]) + array([0, 1, 7, 0, 2, 3, 4, 5]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) @@ -5728,6 +5654,7 @@ def append(arr, values, axis=None): Examples -------- + >>> import numpy as np >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) array([1, 2, 3, ..., 7, 8, 9]) @@ -5737,6 +5664,7 @@ def append(arr, values, axis=None): array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): ... @@ -5744,6 +5672,16 @@ def append(arr, values, axis=None): the array at index 0 has 2 dimension(s) and the array at index 1 has 1 dimension(s) + >>> a = np.array([1, 2], dtype=int) + >>> c = np.append(a, []) + >>> c + array([1., 2.]) + >>> c.dtype + float64 + + Default dtype for empty ndarrays is `float64` thus making the output of dtype + `float64` when appended with dtype `int64` + """ arr = asanyarray(arr) if axis is None: @@ -5829,6 +5767,7 @@ def digitize(x, bins, right=False): Examples -------- + >>> import numpy as np >>> x = np.array([0.2, 6.4, 3.0, 1.6]) >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) >>> inds = np.digitize(x, bins) diff --git a/numpy/lib/_function_base_impl.pyi b/numpy/lib/_function_base_impl.pyi index 2650568d3923..5dee76e172e5 100644 --- a/numpy/lib/_function_base_impl.pyi +++ b/numpy/lib/_function_base_impl.pyi @@ -1,4 +1,3 @@ -import sys from collections.abc import Sequence, Iterator, Callable, Iterable from typing import ( Literal as L, @@ -8,17 +7,13 @@ from typing import ( Protocol, SupportsIndex, SupportsInt, + TypeGuard ) -if sys.version_info >= (3, 10): - from typing import TypeGuard -else: - from typing_extensions import TypeGuard - from numpy import ( vectorize as vectorize, - ufunc, generic, + integer, floating, complexfloating, intp, @@ -27,6 +22,7 @@ from numpy import ( timedelta64, datetime64, object_, + bool as bool_, _OrderKACF, ) @@ -318,12 +314,6 @@ def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ... def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ... -def disp( - mesg: object, - device: None | _SupportsWriteFlush = ..., - linefeed: bool = ..., -) -> None: ... - @overload def cov( m: _ArrayLikeFloat_co, @@ -472,10 +462,20 @@ def median( keepdims: bool = ..., ) -> Any: ... @overload +def median( + a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + axis: None | _ShapeLike, + out: _ArrayType, + /, + overwrite_input: bool = ..., + keepdims: bool = ..., +) -> _ArrayType: ... +@overload def median( a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + *, + out: _ArrayType, overwrite_input: bool = ..., keepdims: bool = ..., ) -> _ArrayType: ... @@ -632,19 +632,94 @@ def percentile( def percentile( a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, q: _ArrayLikeFloat_co, - axis: None | _ShapeLike = ..., - out: _ArrayType = ..., + axis: None | _ShapeLike, + out: _ArrayType, + /, overwrite_input: bool = ..., method: _MethodKind = ..., keepdims: bool = ..., *, weights: None | _ArrayLikeFloat_co = ..., ) -> _ArrayType: ... +@overload +def percentile( + a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + q: _ArrayLikeFloat_co, + axis: None | _ShapeLike = ..., + *, + out: _ArrayType, + overwrite_input: bool = ..., + method: _MethodKind = ..., + keepdims: bool = ..., + weights: None | _ArrayLikeFloat_co = ..., +) -> _ArrayType: ... # NOTE: Not an alias, but they do have identical signatures # (that we can reuse) quantile = percentile + +_SCT_fm = TypeVar( + "_SCT_fm", + bound=floating[Any] | complexfloating[Any, Any] | timedelta64, +) + +class _SupportsRMulFloat(Protocol[_T_co]): + def __rmul__(self, other: float, /) -> _T_co: ... + +@overload +def trapezoid( # type: ignore[overload-overlap] + y: Sequence[_FloatLike_co], + x: Sequence[_FloatLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64: ... +@overload +def trapezoid( + y: Sequence[_ComplexLike_co], + x: Sequence[_ComplexLike_co] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> complex128: ... +@overload +def trapezoid( + y: _ArrayLike[bool_ | integer[Any]], + x: _ArrayLike[bool_ | integer[Any]] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float64 | NDArray[float64]: ... +@overload +def trapezoid( # type: ignore[overload-overlap] + y: _ArrayLikeObject_co, + x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> float | NDArray[object_]: ... +@overload +def trapezoid( + y: _ArrayLike[_SCT_fm], + x: _ArrayLike[_SCT_fm] | _ArrayLikeInt_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _SCT_fm | NDArray[_SCT_fm]: ... +@overload +def trapezoid( + y: Sequence[_SupportsRMulFloat[_T]], + x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> _T: ... +@overload +def trapezoid( + y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co, + x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ..., + dx: float = ..., + axis: SupportsIndex = ..., +) -> ( + floating[Any] | complexfloating[Any, Any] | timedelta64 + | NDArray[floating[Any] | complexfloating[Any, Any] | timedelta64 | object_] +): ... + def meshgrid( *xi: ArrayLike, copy: bool = ..., diff --git a/numpy/lib/_histograms_impl.py b/numpy/lib/_histograms_impl.py index a091d41a84c8..45b6500e892d 100644 --- a/numpy/lib/_histograms_impl.py +++ b/numpy/lib/_histograms_impl.py @@ -410,6 +410,8 @@ def _get_bin_edges(a, bins, range, weights): # Do not call selectors on empty arrays width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge)) if width: + if np.issubdtype(a.dtype, np.integer) and width < 1: + width = 1 n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width)) else: # Width can be zero for some estimators, e.g. FD when @@ -625,8 +627,12 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): The simplest and fastest estimator. Only takes into account the data size. + Additionally, if the data is of integer dtype, then the binwidth will never + be less than 1. + Examples -------- + >>> import numpy as np >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5]) >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1)) array([0. , 0.25, 0.5 , 0.75, 1. ]) @@ -750,6 +756,7 @@ def histogram(a, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) @@ -967,7 +974,9 @@ def histogramdd(sample, bins=10, range=None, density=None, weights=None): Examples -------- - >>> r = np.random.randn(100,3) + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> r = rng.normal(size=(100,3)) >>> H, edges = np.histogramdd(r, bins = (5, 8, 4)) >>> H.shape, edges[0].size, edges[1].size, edges[2].size ((5, 8, 4), 6, 9, 5) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 62f1d213b29f..3014e46130e8 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -65,6 +65,7 @@ def ix_(*args): Examples -------- + >>> import numpy as np >>> a = np.arange(10).reshape(2, 5) >>> a array([[0, 1, 2, 3, 4], @@ -140,6 +141,7 @@ class nd_grid: Users should use these pre-defined instances instead of using `nd_grid` directly. """ + __slots__ = ('sparse',) def __init__(self, sparse=False): self.sparse = sparse @@ -239,6 +241,7 @@ class MGridClass(nd_grid): Examples -------- + >>> import numpy as np >>> np.mgrid[0:5, 0:5] array([[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], @@ -261,6 +264,7 @@ class MGridClass(nd_grid): (3, 4, 5, 6) """ + __slots__ = () def __init__(self): super().__init__(sparse=False) @@ -312,6 +316,7 @@ class OGridClass(nd_grid): array([[0, 1, 2, 3, 4]])) """ + __slots__ = () def __init__(self): super().__init__(sparse=True) @@ -326,6 +331,8 @@ class AxisConcatenator: For detailed documentation on usage, see `r_`. """ + __slots__ = ('axis', 'matrix', 'trans1d', 'ndmin') + # allow ma.mr_ to override this concatenate = staticmethod(_nx.concatenate) makemat = staticmethod(matrixlib.matrix) @@ -505,6 +512,7 @@ class RClass(AxisConcatenator): Examples -------- + >>> import numpy as np >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] array([1, 2, 3, ..., 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] @@ -539,6 +547,7 @@ class RClass(AxisConcatenator): matrix([[1, 2, 3, 4, 5, 6]]) """ + __slots__ = () def __init__(self): AxisConcatenator.__init__(self, 0) @@ -563,6 +572,7 @@ class CClass(AxisConcatenator): Examples -------- + >>> import numpy as np >>> np.c_[np.array([1,2,3]), np.array([4,5,6])] array([[1, 4], [2, 5], @@ -571,6 +581,7 @@ class CClass(AxisConcatenator): array([[1, 2, 3, ..., 4, 5, 6]]) """ + __slots__ = () def __init__(self): AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0) @@ -597,6 +608,7 @@ class ndenumerate: Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> for index, x in np.ndenumerate(a): ... print(index, x) @@ -649,6 +661,8 @@ class ndindex: Examples -------- + >>> import numpy as np + Dimensions as individual arguments >>> for index in np.ndindex(3, 2, 1): @@ -762,6 +776,7 @@ class IndexExpression: Examples -------- + >>> import numpy as np >>> np.s_[2::2] slice(2, None, 2) >>> np.index_exp[2::2] @@ -771,6 +786,7 @@ class IndexExpression: array([2, 4]) """ + __slots__ = ('maketuple',) def __init__(self, maketuple): self.maketuple = maketuple @@ -833,6 +849,7 @@ def fill_diagonal(a, val, wrap=False): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), int) >>> np.fill_diagonal(a, 5) >>> a @@ -959,6 +976,8 @@ def diag_indices(n, ndim=2): Examples -------- + >>> import numpy as np + Create a set of indices to access the diagonal of a (4, 4) array: >>> di = np.diag_indices(4) @@ -1023,7 +1042,8 @@ def diag_indices_from(arr): Examples -------- - + >>> import numpy as np + Create a 4 by 4 array. >>> a = np.arange(16).reshape(4, 4) @@ -1032,7 +1052,7 @@ def diag_indices_from(arr): [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) - + Get the indices of the diagonal elements. >>> di = np.diag_indices_from(a) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index a38b0017ee5d..908ca7762fdd 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -72,15 +72,13 @@ def has_nested_fields(ndtype): Examples -------- + >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)]) >>> np.lib._iotools.has_nested_fields(dt) False """ - for name in ndtype.names or (): - if ndtype[name].names is not None: - return True - return False + return any(ndtype[name].names is not None for name in ndtype.names or ()) def flatten_dtype(ndtype, flatten_base=False): @@ -100,6 +98,7 @@ def flatten_dtype(ndtype, flatten_base=False): Examples -------- + >>> import numpy as np >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) @@ -266,6 +265,7 @@ class NameValidator: Examples -------- + >>> import numpy as np >>> validator = np.lib._iotools.NameValidator() >>> validator(['file', 'field2', 'with space', 'CaSe']) ('file_', 'field2', 'with_space', 'CaSe') @@ -403,6 +403,7 @@ def str2bool(value): Examples -------- + >>> import numpy as np >>> np.lib._iotools.str2bool('TRUE') True >>> np.lib._iotools.str2bool('false') @@ -564,7 +565,7 @@ def upgrade_mapper(cls, func, default=None): >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) """ # Func is a single functions - if hasattr(func, '__call__'): + if callable(func): cls._mapper.insert(-1, (cls._getsubdtype(default), func, default)) return elif hasattr(func, '__iter__'): @@ -611,7 +612,7 @@ def __init__(self, dtype_or_func=None, default=None, missing_values=None, dtype = np.dtype(dtype_or_func) except TypeError: # dtype_or_func must be a function, then - if not hasattr(dtype_or_func, '__call__'): + if not callable(dtype_or_func): errmsg = ("The input argument `dtype` is neither a" " function nor a dtype (got '%s' instead)") raise TypeError(errmsg % type(dtype_or_func)) @@ -844,6 +845,7 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs): Examples -------- + >>> import numpy as np >>> np.lib._iotools.easy_dtype(float) dtype('float64') >>> np.lib._iotools.easy_dtype("i4, f8") diff --git a/numpy/lib/_nanfunctions_impl.py b/numpy/lib/_nanfunctions_impl.py index 54788a738c7e..958ebc3cbe82 100644 --- a/numpy/lib/_nanfunctions_impl.py +++ b/numpy/lib/_nanfunctions_impl.py @@ -141,7 +141,7 @@ def _copyto(a, val, mask): return a -def _remove_nan_1d(arr1d, overwrite_input=False): +def _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False): """ Equivalent to arr1d[~arr1d.isnan()], but in a different order @@ -151,6 +151,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ---------- arr1d : ndarray Array to remove nans from + second_arr1d : ndarray or None + A second array which will have the same positions removed as arr1d. overwrite_input : bool True if `arr1d` can be modified in place @@ -158,6 +160,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False): ------- res : ndarray Array with nan elements removed + second_res : ndarray or None + Second array with nan element positions of first array removed. overwrite_input : bool True if `res` can be modified in place, given the constraint on the input @@ -172,9 +176,12 @@ def _remove_nan_1d(arr1d, overwrite_input=False): if s.size == arr1d.size: warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=6) - return arr1d[:0], True + if second_arr1d is None: + return arr1d[:0], None, True + else: + return arr1d[:0], second_arr1d[:0], True elif s.size == 0: - return arr1d, overwrite_input + return arr1d, second_arr1d, overwrite_input else: if not overwrite_input: arr1d = arr1d.copy() @@ -183,7 +190,15 @@ def _remove_nan_1d(arr1d, overwrite_input=False): # fill nans in beginning of array with non-nans of end arr1d[s[:enonan.size]] = enonan - return arr1d[:-s.size], True + if second_arr1d is None: + return arr1d[:-s.size], None, True + else: + if not overwrite_input: + second_arr1d = second_arr1d.copy() + enonan = second_arr1d[-s.size:][~c[-s.size:]] + second_arr1d[s[:enonan.size]] = enonan + + return arr1d[:-s.size], second_arr1d[:-s.size], True def _divide_by_count(a, b, out=None): @@ -315,6 +330,7 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmin(a) 1.0 @@ -448,6 +464,7 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nanmax(a) 3.0 @@ -536,6 +553,7 @@ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmin(a) 0 @@ -597,6 +615,7 @@ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([[np.nan, 4], [2, 3]]) >>> np.argmax(a) 0 @@ -699,6 +718,7 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.nansum(1) 1 >>> np.nansum([1]) @@ -790,6 +810,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> np.nanprod(1) 1 >>> np.nanprod([1]) @@ -857,6 +878,7 @@ def nancumsum(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.nancumsum(1) array([1]) >>> np.nancumsum([1]) @@ -924,6 +946,7 @@ def nancumprod(a, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> np.nancumprod(1) array([1]) >>> np.nancumprod([1]) @@ -1021,6 +1044,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanmean(a) 2.6666666666666665 @@ -1061,7 +1085,7 @@ def _nanmedian1d(arr1d, overwrite_input=False): Private function for rank 1 arrays. Compute the median ignoring NaNs. See nanmedian for parameter usage """ - arr1d_parsed, overwrite_input = _remove_nan_1d( + arr1d_parsed, _, overwrite_input = _remove_nan_1d( arr1d, overwrite_input=overwrite_input, ) @@ -1186,6 +1210,7 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu Examples -------- + >>> import numpy as np >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) >>> a[0, 1] = np.nan >>> a @@ -1344,10 +1369,13 @@ def nanpercentile( Notes ----- - For more information please see `numpy.percentile` + The behavior of `numpy.nanpercentile` with percentage `q` is that of + `numpy.quantile` with argument ``q/100`` (ignoring nan values). + For more information, please see `numpy.quantile`. Examples -------- + >>> import numpy as np >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a @@ -1532,10 +1560,13 @@ def nanquantile( Notes ----- - For more information please see `numpy.quantile` + The behavior of `numpy.nanquantile` is the same as that of + `numpy.quantile` (ignoring nan values). + For more information, please see `numpy.quantile`. Examples -------- + >>> import numpy as np >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a @@ -1646,13 +1677,36 @@ def _nanquantile_ureduce_func( wgt = None if weights is None else weights.ravel() result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt) else: - result = np.apply_along_axis(_nanquantile_1d, axis, a, q, - overwrite_input, method, weights) - # apply_along_axis fills in collapsed axis with results. - # Move that axis to the beginning to match percentile's - # convention. - if q.ndim != 0: - result = np.moveaxis(result, axis, 0) + # Note that this code could try to fill in `out` right away + if weights is None: + result = np.apply_along_axis(_nanquantile_1d, axis, a, q, + overwrite_input, method, weights) + # apply_along_axis fills in collapsed axis with results. + # Move those axes to the beginning to match percentile's + # convention. + if q.ndim != 0: + from_ax = [axis + i for i in range(q.ndim)] + result = np.moveaxis(result, from_ax, list(range(q.ndim))) + else: + # We need to apply along axis over 2 arrays, a and weights. + # move operation axes to end for simplicity: + a = np.moveaxis(a, axis, -1) + if weights is not None: + weights = np.moveaxis(weights, axis, -1) + if out is not None: + result = out + else: + # weights are limited to `inverted_cdf` so the result dtype + # is known to be identical to that of `a` here: + result = np.empty_like(a, shape=q.shape + a.shape[:-1]) + + for ii in np.ndindex(a.shape[:-1]): + result[(...,) + ii] = _nanquantile_1d( + a[ii], q, weights=weights[ii], + overwrite_input=overwrite_input, method=method, + ) + # This path dealt with `out` already... + return result if out is not None: out[...] = result @@ -1666,8 +1720,9 @@ def _nanquantile_1d( Private function for rank 1 arrays. Compute quantile ignoring NaNs. See nanpercentile for parameter usage """ - arr1d, overwrite_input = _remove_nan_1d(arr1d, - overwrite_input=overwrite_input) + # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]? + arr1d, weights, overwrite_input = _remove_nan_1d(arr1d, + second_arr1d=weights, overwrite_input=overwrite_input) if arr1d.size == 0: # convert to scalar return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()] @@ -1788,6 +1843,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanvar(a) 1.5555555555555554 @@ -1984,6 +2040,7 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue, Examples -------- + >>> import numpy as np >>> a = np.array([[1, np.nan], [3, 4]]) >>> np.nanstd(a) 1.247219128924647 diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 8fef65e7f6ab..a83c46b0e654 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -51,6 +51,7 @@ class BagObj: Examples -------- + >>> import numpy as np >>> from numpy.lib._npyio_impl import BagObj as BO >>> class BagDemo: ... def __getitem__(self, key): # An instance of BagObj(BagDemo) @@ -157,6 +158,7 @@ class NpzFile(Mapping): Examples -------- + >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) @@ -278,6 +280,34 @@ def __repr__(self): array_names += "..." return f"NpzFile {filename!r} with keys: {array_names}" + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, @@ -375,6 +405,8 @@ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, Examples -------- + >>> import numpy as np + Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) @@ -473,7 +505,7 @@ def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): @array_function_dispatch(_save_dispatcher) -def save(file, arr, allow_pickle=True, fix_imports=True): +def save(file, arr, allow_pickle=True, fix_imports=np._NoValue): """ Save an array to a binary file in NumPy ``.npy`` format. @@ -487,18 +519,19 @@ def save(file, arr, allow_pickle=True, fix_imports=True): arr : array_like Array data to be saved. allow_pickle : bool, optional - Allow saving object arrays using Python pickles. Reasons for + Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute - arbitrary code) and portability (pickled objects may not be loadable + arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is - compatible between Python 2 and Python 3). + compatible between different versions of Python). Default: True fix_imports : bool, optional - Only useful in forcing objects in object arrays on Python 3 to be - pickled in a Python 2 compatible way. If `fix_imports` is True, pickle - will try to map the new Python 3 names to the old module names used in - Python 2, so that the pickle data stream is readable with Python 2. + The `fix_imports` flag is deprecated and has no effect. + + .. deprecated:: 2.1 + This flag is ignored since NumPy 1.17 and was only needed to + support loading some files in Python 2 written in Python 3. See Also -------- @@ -513,6 +546,8 @@ def save(file, arr, allow_pickle=True, fix_imports=True): Examples -------- + >>> import numpy as np + >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() @@ -533,6 +568,12 @@ def save(file, arr, allow_pickle=True, fix_imports=True): >>> print(a, b) # [1 2] [1 3] """ + if fix_imports is not np._NoValue: + # Deprecated 2024-05-16, NumPy 2.1 + warnings.warn( + "The 'fix_imports' flag is deprecated and has no effect. " + "(Deprecated in NumPy 2.1)", + DeprecationWarning, stacklevel=2) if hasattr(file, 'write'): file_ctx = contextlib.nullcontext(file) else: @@ -609,6 +650,7 @@ def savez(file, *args, **kwds): Examples -------- + >>> import numpy as np >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) @@ -697,6 +739,7 @@ def savez_compressed(file, *args, **kwds): Examples -------- + >>> import numpy as np >>> test_array = np.random.rand(3, 2) >>> test_vector = np.random.rand(4) >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) @@ -1005,6 +1048,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', # Due to chunking, certain error reports are less clear, currently. if filelike: data = iter(data) # cannot chunk when reading from file + filelike = False c_byte_converters = False if read_dtype_via_object_chunks == "S": @@ -1020,7 +1064,7 @@ def _read(fname, *, delimiter=',', comment='#', quote='"', next_arr = _load_from_filelike( data, delimiter=delimiter, comment=comment, quote=quote, imaginary_unit=imaginary_unit, - usecols=usecols, skiplines=skiplines, max_rows=max_rows, + usecols=usecols, skiplines=skiplines, max_rows=chunk_size, converters=converters, dtype=dtype, encoding=encoding, filelike=filelike, byte_converters=byte_converters, @@ -1207,6 +1251,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, Examples -------- + >>> import numpy as np >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\n2 3") >>> np.loadtxt(c) @@ -1482,6 +1527,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', Examples -------- + >>> import numpy as np >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays @@ -1658,6 +1704,7 @@ def fromregex(file, regexp, dtype, encoding=None): Examples -------- + >>> import numpy as np >>> from io import StringIO >>> text = StringIO("1312 foo\n1534 bar\n444 qux") @@ -1814,10 +1861,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` - is a file object. The special value 'bytes' enables backward + is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays - when possible and passes latin1 encoded strings to converters. - Override this value to receive unicode arrays and pass strings + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. @@ -1854,7 +1901,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. * Custom converters may receive unexpected values due to dtype - discovery. + discovery. References ---------- @@ -2127,7 +2174,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, except ValueError: # We couldn't find it: the name must have been dropped continue - # Redefine the key if it's a column number + # Redefine the key if it's a column number # and usecols is defined if usecols: try: @@ -2161,23 +2208,23 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, + converters = [StringConverter(dt, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, + converters = [StringConverter(dtype, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): - # If the converter is specified by column names, + # If the converter is specified by column names, # use the index instead if _is_string_like(j): try: @@ -2201,8 +2248,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if conv is bytes: user_conv = asbytes elif byte_converters: - # Converters may use decode to workaround numpy's old - # behavior, so encode the string again before passing + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing # to the user converter. def tobytes_first(x, conv): if type(x) is bytes: @@ -2338,7 +2385,7 @@ def tobytes_first(x, conv): "argument is deprecated. Set the encoding, use None for the " "system default.", np.exceptions.VisibleDeprecationWarning, stacklevel=2) - + def encode_unicode_cols(row_tup): row = list(row_tup) for i in strcolidx: diff --git a/numpy/lib/_npyio_impl.pyi b/numpy/lib/_npyio_impl.pyi index d9b43578d798..f1dcbfd52d01 100644 --- a/numpy/lib/_npyio_impl.pyi +++ b/numpy/lib/_npyio_impl.pyi @@ -158,7 +158,7 @@ def loadtxt( delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., - usecols: int | Sequence[int] = ..., + usecols: int | Sequence[int] | None = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., @@ -175,7 +175,7 @@ def loadtxt( delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., - usecols: int | Sequence[int] = ..., + usecols: int | Sequence[int] | None = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., @@ -192,7 +192,7 @@ def loadtxt( delimiter: None | str = ..., converters: None | Mapping[int | str, Callable[[str], Any]] = ..., skiprows: int = ..., - usecols: int | Sequence[int] = ..., + usecols: int | Sequence[int] | None = ..., unpack: bool = ..., ndmin: L[0, 1, 2] = ..., encoding: None | str = ..., diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 63c12f438240..9bcf0a3d92a6 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -103,6 +103,8 @@ def poly(seq_of_zeros): -------- Given a sequence of a polynomial's zeros: + >>> import numpy as np + >>> np.poly((0, 0, 0)) # Multiple root example array([1., 0., 0., 0.]) @@ -209,6 +211,7 @@ def roots(p): Examples -------- + >>> import numpy as np >>> coeff = [3.2, 2, 1] >>> np.roots(coeff) array([-0.3125+0.46351241j, -0.3125-0.46351241j]) @@ -297,6 +300,8 @@ def polyint(p, m=1, k=None): -------- The defining property of the antiderivative: + >>> import numpy as np + >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P @@ -392,6 +397,8 @@ def polyder(p, m=1): -------- The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is: + >>> import numpy as np + >>> p = np.poly1d([1,1,1,1]) >>> p2 = np.polyder(p) >>> p2 @@ -575,6 +582,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): Examples -------- + >>> import numpy as np >>> import warnings >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0]) >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) @@ -675,7 +683,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): raise ValueError("the number of data points must exceed order " "to scale the covariance matrix") # note, this used to be: fac = resids / (len(x) - order - 2.0) - # it was deciced that the "- 2" (originally justified by "Bayesian + # it was decided that the "- 2" (originally justified by "Bayesian # uncertainty analysis") is not what the user expects # (see gh-11196 and gh-11197) fac = resids / (len(x) - order) @@ -749,6 +757,7 @@ def polyval(p, x): Examples -------- + >>> import numpy as np >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) @@ -808,6 +817,7 @@ def polyadd(a1, a2): Examples -------- + >>> import numpy as np >>> np.polyadd([1, 2], [9, 5, 4]) array([9, 6, 6]) @@ -875,6 +885,8 @@ def polysub(a1, a2): -------- .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2) + >>> import numpy as np + >>> np.polysub([2, 10, -2], [3, 10, -4]) array([-1, 0, 2]) @@ -933,6 +945,7 @@ def polymul(a1, a2): Examples -------- + >>> import numpy as np >>> np.polymul([1, 2, 3], [9, 5, 1]) array([ 9, 23, 38, 17, 3]) @@ -1009,6 +1022,7 @@ def polydiv(u, v): -------- .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25 + >>> import numpy as np >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) @@ -1098,6 +1112,8 @@ class poly1d: -------- Construct the polynomial :math:`x^2 + 2x + 3`: + >>> import numpy as np + >>> p = np.poly1d([1, 2, 3]) >>> print(np.poly1d(p)) 2 diff --git a/numpy/lib/_scimath_impl.py b/numpy/lib/_scimath_impl.py index 43682fefee17..d5492c645247 100644 --- a/numpy/lib/_scimath_impl.py +++ b/numpy/lib/_scimath_impl.py @@ -66,6 +66,7 @@ def _tocomplex(arr): Examples -------- + >>> import numpy as np First, consider an input of type short: @@ -124,6 +125,7 @@ def _fix_real_lt_zero(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_real_lt_zero([1,2]) array([1, 2]) @@ -152,6 +154,7 @@ def _fix_int_lt_zero(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_int_lt_zero([1,2]) array([1, 2]) @@ -179,6 +182,7 @@ def _fix_real_abs_gt_1(x): Examples -------- + >>> import numpy as np >>> np.lib.scimath._fix_real_abs_gt_1([0,1]) array([0, 1]) @@ -222,6 +226,8 @@ def sqrt(x): -------- For real, non-negative inputs this works just like `numpy.sqrt`: + >>> import numpy as np + >>> np.emath.sqrt(1) 1.0 >>> np.emath.sqrt([1, 4]) @@ -282,6 +288,7 @@ def log(x): Examples -------- + >>> import numpy as np >>> np.emath.log(np.exp(1)) 1.0 @@ -330,6 +337,7 @@ def log10(x): Examples -------- + >>> import numpy as np (We set the printing precision so the example can be auto-tested) @@ -373,6 +381,7 @@ def logn(n, x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.logn(2, [4, 8]) @@ -420,6 +429,7 @@ def log2(x): Examples -------- + We set the printing precision so the example can be auto-tested: >>> np.set_printoptions(precision=4) @@ -468,6 +478,7 @@ def power(x, p): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.power(2, 2) @@ -523,6 +534,7 @@ def arccos(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arccos(1) # a scalar is returned @@ -569,6 +581,7 @@ def arcsin(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arcsin(0) @@ -617,6 +630,7 @@ def arctanh(x): Examples -------- + >>> import numpy as np >>> np.set_printoptions(precision=4) >>> np.emath.arctanh(0.5) diff --git a/numpy/lib/_shape_base_impl.py b/numpy/lib/_shape_base_impl.py index 68453095db7e..3e2f2ba7d46c 100644 --- a/numpy/lib/_shape_base_impl.py +++ b/numpy/lib/_shape_base_impl.py @@ -115,6 +115,7 @@ def take_along_axis(arr, indices, axis): Examples -------- + >>> import numpy as np For this sample array @@ -162,6 +163,9 @@ def take_along_axis(arr, indices, axis): """ # normalize inputs if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') arr = arr.flat arr_shape = (len(arr),) # flatiter has no .shape axis = 0 @@ -233,6 +237,7 @@ def put_along_axis(arr, indices, values, axis): Examples -------- + >>> import numpy as np For this sample array @@ -252,6 +257,9 @@ def put_along_axis(arr, indices, values, axis): """ # normalize inputs if axis is None: + if indices.ndim != 1: + raise ValueError( + 'when axis=None, `indices` must have a single dimension.') arr = arr.flat axis = 0 arr_shape = (len(arr),) # flatiter has no .shape @@ -325,6 +333,7 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): Examples -------- + >>> import numpy as np >>> def my_func(a): ... \"\"\"Average first and last element of a 1-D array\"\"\" ... return (a[0] + a[-1]) * 0.5 @@ -455,6 +464,7 @@ def apply_over_axes(func, a, axes): Examples -------- + >>> import numpy as np >>> a = np.arange(24).reshape(2,3,4) >>> a array([[[ 0, 1, 2, 3], @@ -543,6 +553,7 @@ def expand_dims(a, axis): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2]) >>> x.shape (2,) @@ -645,6 +656,7 @@ def column_stack(tup): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) @@ -704,6 +716,7 @@ def dstack(tup): Examples -------- + >>> import numpy as np >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.dstack((a,b)) @@ -756,6 +769,7 @@ def array_split(ary, indices_or_sections, axis=0): Examples -------- + >>> import numpy as np >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] @@ -852,6 +866,7 @@ def split(ary, indices_or_sections, axis=0): Examples -------- + >>> import numpy as np >>> x = np.arange(9.0) >>> np.split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] @@ -895,6 +910,7 @@ def hsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], @@ -965,6 +981,7 @@ def vsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], @@ -1018,6 +1035,7 @@ def dsplit(ary, indices_or_sections): Examples -------- + >>> import numpy as np >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x array([[[ 0., 1., 2., 3.], @@ -1120,6 +1138,7 @@ def kron(a, b): Examples -------- + >>> import numpy as np >>> np.kron([1,10,100], [5,6,7]) array([ 5, 6, 7, ..., 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) @@ -1234,6 +1253,7 @@ def tile(A, reps): Examples -------- + >>> import numpy as np >>> a = np.array([0, 1, 2]) >>> np.tile(a, 2) array([0, 1, 2, 0, 1, 2]) diff --git a/numpy/lib/_shape_base_impl.pyi b/numpy/lib/_shape_base_impl.pyi index cdfe9d9d5637..c765e1e5edf5 100644 --- a/numpy/lib/_shape_base_impl.pyi +++ b/numpy/lib/_shape_base_impl.pyi @@ -1,11 +1,13 @@ -import sys from collections.abc import Callable, Sequence -from typing import TypeVar, Any, overload, SupportsIndex, Protocol - -if sys.version_info >= (3, 10): - from typing import ParamSpec, Concatenate -else: - from typing_extensions import ParamSpec, Concatenate +from typing import ( + TypeVar, + Any, + overload, + SupportsIndex, + Protocol, + ParamSpec, + Concatenate, +) import numpy as np from numpy import ( diff --git a/numpy/lib/_stride_tricks_impl.py b/numpy/lib/_stride_tricks_impl.py index 0cfbbcfe9c81..def62523ee0e 100644 --- a/numpy/lib/_stride_tricks_impl.py +++ b/numpy/lib/_stride_tricks_impl.py @@ -137,7 +137,7 @@ def sliding_window_view(x, window_shape, axis=None, *, Also known as rolling or moving window, the window slides across all dimensions of the array and extracts subsets of the array at all window positions. - + .. versionadded:: 1.20.0 Parameters @@ -204,6 +204,7 @@ def sliding_window_view(x, window_shape, axis=None, *, Examples -------- + >>> import numpy as np >>> from numpy.lib.stride_tricks import sliding_window_view >>> x = np.arange(6) >>> x.shape @@ -413,6 +414,7 @@ def broadcast_to(array, shape, subok=False): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3]) >>> np.broadcast_to(x, (3, 3)) array([[1, 2, 3], @@ -439,6 +441,9 @@ def _broadcast_shape(*args): return b.shape +_size0_dtype = np.dtype([]) + + @set_module('numpy') def broadcast_shapes(*args): """ @@ -472,13 +477,14 @@ def broadcast_shapes(*args): Examples -------- + >>> import numpy as np >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2)) (3, 2) >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7)) (5, 6, 7) """ - arrays = [np.empty(x, dtype=[]) for x in args] + arrays = [np.empty(x, dtype=_size0_dtype) for x in args] return _broadcast_shape(*arrays) @@ -523,6 +529,7 @@ def broadcast_arrays(*args, subok=False): Examples -------- + >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> y = np.array([[4],[5]]) >>> np.broadcast_arrays(x, y) @@ -546,13 +553,11 @@ def broadcast_arrays(*args, subok=False): # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], # order='C').itviews - args = tuple(np.array(_m, copy=None, subok=subok) for _m in args) + args = [np.array(_m, copy=None, subok=subok) for _m in args] shape = _broadcast_shape(*args) - if all(array.shape == shape for array in args): - # Common case where nothing needs to be broadcasted. - return args - - return tuple(_broadcast_to(array, shape, subok=subok, readonly=False) - for array in args) + result = [array if array.shape == shape + else _broadcast_to(array, shape, subok=subok, readonly=False) + for array in args] + return tuple(result) diff --git a/numpy/lib/_twodim_base_impl.py b/numpy/lib/_twodim_base_impl.py index dd6372429687..584efbfc307e 100644 --- a/numpy/lib/_twodim_base_impl.py +++ b/numpy/lib/_twodim_base_impl.py @@ -79,6 +79,7 @@ def fliplr(m): Examples -------- + >>> import numpy as np >>> A = np.diag([1.,2.,3.]) >>> A array([[1., 0., 0.], @@ -89,7 +90,8 @@ def fliplr(m): [0., 2., 0.], [3., 0., 0.]]) - >>> A = np.random.randn(2,3,5) + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) >>> np.all(np.fliplr(A) == A[:,::-1,...]) True @@ -132,6 +134,7 @@ def flipud(m): Examples -------- + >>> import numpy as np >>> A = np.diag([1.0, 2, 3]) >>> A array([[1., 0., 0.], @@ -142,7 +145,8 @@ def flipud(m): [0., 2., 0.], [1., 0., 0.]]) - >>> A = np.random.randn(2,3,5) + >>> rng = np.random.default_rng() + >>> A = rng.normal(size=(2,3,5)) >>> np.all(np.flipud(A) == A[::-1,...]) True @@ -201,6 +205,7 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None): Examples -------- + >>> import numpy as np >>> np.eye(2, dtype=int) array([[1, 0], [0, 1]]) @@ -275,6 +280,7 @@ def diag(v, k=0): Examples -------- + >>> import numpy as np >>> x = np.arange(9).reshape((3,3)) >>> x array([[0, 1, 2], @@ -339,6 +345,7 @@ def diagflat(v, k=0): Examples -------- + >>> import numpy as np >>> np.diagflat([[1,2], [3,4]]) array([[1, 0, 0, 0], [0, 2, 0, 0], @@ -399,6 +406,7 @@ def tri(N, M=None, k=0, dtype=float, *, like=None): Examples -------- + >>> import numpy as np >>> np.tri(3, 5, 2, dtype=int) array([[1, 1, 1, 0, 0], [1, 1, 1, 1, 0], @@ -460,6 +468,7 @@ def tril(m, k=0): Examples -------- + >>> import numpy as np >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 0, 0, 0], [ 4, 0, 0], @@ -504,6 +513,7 @@ def triu(m, k=0): Examples -------- + >>> import numpy as np >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1) array([[ 1, 2, 3], [ 4, 5, 6], @@ -574,6 +584,7 @@ def vander(x, N=None, increasing=False): Examples -------- + >>> import numpy as np >>> x = np.array([1, 2, 3, 5]) >>> N = 3 >>> np.vander(x, N) @@ -716,6 +727,7 @@ def histogram2d(x, y, bins=10, range=None, density=None, weights=None): Examples -------- + >>> import numpy as np >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt @@ -854,6 +866,8 @@ def mask_indices(n, mask_func, k=0): Examples -------- + >>> import numpy as np + These are the indices that would allow you to access the upper triangular part of any 3x3 array: @@ -923,6 +937,8 @@ def tril_indices(n, k=0, m=None): Examples -------- + >>> import numpy as np + Compute two different sets of indices to access 4x4 arrays, one for the lower triangular part starting at the main diagonal, and one starting two diagonals further right: @@ -990,8 +1006,9 @@ def tril_indices_from(arr, k=0): Examples -------- + >>> import numpy as np - Create a 4 by 4 array. + Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a @@ -1074,6 +1091,8 @@ def triu_indices(n, k=0, m=None): Examples -------- + >>> import numpy as np + Compute two different sets of indices to access 4x4 arrays, one for the upper triangular part starting at the main diagonal, and one starting two diagonals further right: @@ -1142,8 +1161,9 @@ def triu_indices_from(arr, k=0): Examples -------- + >>> import numpy as np - Create a 4 by 4 array. + Create a 4 by 4 array >>> a = np.arange(16).reshape(4, 4) >>> a diff --git a/numpy/lib/_twodim_base_impl.pyi b/numpy/lib/_twodim_base_impl.pyi index 4096976871d7..c4690a4304bd 100644 --- a/numpy/lib/_twodim_base_impl.pyi +++ b/numpy/lib/_twodim_base_impl.pyi @@ -2,6 +2,7 @@ import builtins from collections.abc import Callable, Sequence from typing import ( Any, + TypeAlias, overload, TypeVar, Literal as L, @@ -16,6 +17,7 @@ from numpy import ( int_, intp, float64, + complex128, signedinteger, floating, complexfloating, @@ -29,6 +31,7 @@ from numpy._typing import ( ArrayLike, _ArrayLike, NDArray, + _SupportsArray, _SupportsArrayFunc, _ArrayLikeInt_co, _ArrayLikeFloat_co, @@ -164,44 +167,220 @@ def vander( increasing: bool = ..., ) -> NDArray[object_]: ... + +_Int_co: TypeAlias = np.integer[Any] | np.bool +_Float_co: TypeAlias = np.floating[Any] | _Int_co +_Number_co: TypeAlias = np.number[Any] | np.bool + +_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_SCT]] | Sequence[_SCT] +_ArrayLike2D: TypeAlias = ( + _SupportsArray[np.dtype[_SCT]] + | Sequence[_ArrayLike1D[_SCT]] +) + +_ArrayLike1DInt_co = ( + _SupportsArray[np.dtype[_Int_co]] + | Sequence[int | _Int_co] +) +_ArrayLike1DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[float | int | _Float_co] +) +_ArrayLike2DFloat_co = ( + _SupportsArray[np.dtype[_Float_co]] + | Sequence[_ArrayLike1DFloat_co] +) +_ArrayLike1DNumber_co = ( + _SupportsArray[np.dtype[_Number_co]] + | Sequence[int | float | complex | _Number_co] +) + +_SCT_complex = TypeVar("_SCT_complex", bound=np.complexfloating[Any, Any]) +_SCT_inexact = TypeVar("_SCT_inexact", bound=np.inexact[Any]) +_SCT_number_co = TypeVar("_SCT_number_co", bound=_Number_co) + @overload -def histogram2d( # type: ignore[misc] - x: _ArrayLikeFloat_co, - y: _ArrayLikeFloat_co, +def histogram2d( + x: _ArrayLike1D[_SCT_complex], + y: _ArrayLike1D[_SCT_complex | _Float_co], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[floating[Any]], - NDArray[floating[Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... @overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, + x: _ArrayLike1D[_SCT_complex | _Float_co], + y: _ArrayLike1D[_SCT_complex], bins: int | Sequence[int] = ..., - range: None | _ArrayLikeFloat_co = ..., + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[complexfloating[Any, Any]], - NDArray[complexfloating[Any, Any]], + NDArray[_SCT_complex], + NDArray[_SCT_complex], ]: ... -@overload # TODO: Sort out `bins` +@overload def histogram2d( - x: _ArrayLikeComplex_co, - y: _ArrayLikeComplex_co, - bins: Sequence[_ArrayLikeInt_co], - range: None | _ArrayLikeFloat_co = ..., + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact | _Int_co], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact | _Int_co], + y: _ArrayLike1D[_SCT_inexact], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_inexact], + NDArray[_SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[float64], + NDArray[float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: int | Sequence[int] = ..., + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[complex128 | float64], + NDArray[complex128 | float64], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: _ArrayLike1D[_SCT_number_co] | Sequence[_ArrayLike1D[_SCT_number_co]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co], + NDArray[_SCT_number_co], +]: ... +@overload +def histogram2d( + x: _ArrayLike1D[_SCT_inexact], + y: _ArrayLike1D[_SCT_inexact], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | _SCT_inexact], + NDArray[_SCT_number_co | _SCT_inexact], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DInt_co | Sequence[float | int], + y: _ArrayLike1DInt_co | Sequence[float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | float64], + NDArray[_SCT_number_co | float64], +]: ... +@overload +def histogram2d( + x: Sequence[complex | float | int], + y: Sequence[complex | float | int], + bins: Sequence[_ArrayLike1D[_SCT_number_co] | int], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[_SCT_number_co | complex128 | float64], + NDArray[_SCT_number_co | complex128 | float64] , +]: ... + +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.bool], + NDArray[np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.int_ | np.bool], + NDArray[np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., + density: None | bool = ..., + weights: None | _ArrayLike1DFloat_co = ..., +) -> tuple[ + NDArray[float64], + NDArray[np.float64 | np.int_ | np.bool], + NDArray[np.float64 | np.int_ | np.bool], +]: ... +@overload +def histogram2d( + x: _ArrayLike1DNumber_co, + y: _ArrayLike1DNumber_co, + bins: Sequence[Sequence[complex | float | int | bool]], + range: None | _ArrayLike2DFloat_co = ..., density: None | bool = ..., - weights: None | _ArrayLikeFloat_co = ..., + weights: None | _ArrayLike1DFloat_co = ..., ) -> tuple[ NDArray[float64], - NDArray[Any], - NDArray[Any], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], + NDArray[np.complex128 | np.float64 | np.int_ | np.bool], ]: ... # NOTE: we're assuming/demanding here the `mask_func` returns diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 2e4ef4e6954a..5f662f6eb34e 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -56,6 +56,7 @@ def mintypecode(typechars, typeset='GDFgdf', default='d'): Examples -------- + >>> import numpy as np >>> np.mintypecode(['d', 'f', 'S']) 'd' >>> x = np.array([1.1, 2-3.j]) @@ -103,6 +104,7 @@ def real(val): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real array([1., 3., 5.]) @@ -149,6 +151,7 @@ def imag(val): Examples -------- + >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag array([2., 4., 6.]) @@ -195,6 +198,7 @@ def iscomplex(x): Examples -------- + >>> import numpy as np >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j]) array([ True, False, False, False, False, True]) @@ -235,6 +239,7 @@ def isreal(x): Examples -------- + >>> import numpy as np >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex) >>> np.isreal(a) array([False, True, True, True, True, False]) @@ -287,6 +292,7 @@ def iscomplexobj(x): Examples -------- + >>> import numpy as np >>> np.iscomplexobj(1) False >>> np.iscomplexobj(1+0j) @@ -341,6 +347,7 @@ def isrealobj(x): Examples -------- + >>> import numpy as np >>> np.isrealobj(1) True >>> np.isrealobj(1+0j) @@ -434,6 +441,7 @@ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): Examples -------- + >>> import numpy as np >>> np.nan_to_num(np.inf) 1.7976931348623157e+308 >>> np.nan_to_num(-np.inf) @@ -525,6 +533,7 @@ def real_if_close(a, tol=100): Examples -------- + >>> import numpy as np >>> np.finfo(float).eps 2.2204460492503131e-16 # may vary @@ -593,6 +602,7 @@ def typename(char): Examples -------- + >>> import numpy as np >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q', ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q'] >>> for typechar in typechars: @@ -689,7 +699,7 @@ def common_type(*arrays): if issubclass(t, _nx.integer): p = 2 # array_precision[_nx.double] else: - p = array_precision.get(t, None) + p = array_precision.get(t) if p is None: raise TypeError("can't get common type for non-numeric array") precision = max(precision, p) diff --git a/numpy/lib/_ufunclike_impl.py b/numpy/lib/_ufunclike_impl.py index 241d8af4b4ce..3f026a2ce79c 100644 --- a/numpy/lib/_ufunclike_impl.py +++ b/numpy/lib/_ufunclike_impl.py @@ -21,12 +21,12 @@ def fix(x, out=None): Round to nearest integer towards zero. Round an array of floats element-wise to nearest integer towards zero. - The rounded values are returned as floats. + The rounded values have the same data-type as the input. Parameters ---------- x : array_like - An array of floats to be rounded + An array to be rounded out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the input broadcasts to. If not provided or None, a @@ -35,12 +35,12 @@ def fix(x, out=None): Returns ------- out : ndarray of floats - A float array with the same dimensions as the input. - If second argument is not supplied then a float array is returned + An array with the same dimensions and data-type as the input. + If second argument is not supplied then a new array is returned with the rounded values. If a second argument is supplied the result is stored there. - The return value `out` is then a reference to that array. + The return value ``out`` is then a reference to that array. See Also -------- @@ -49,10 +49,11 @@ def fix(x, out=None): Examples -------- + >>> import numpy as np >>> np.fix(3.14) 3.0 >>> np.fix(3) - 3.0 + 3 >>> np.fix([2.1, 2.9, -2.1, -2.9]) array([ 2., 2., -2., -2.]) @@ -111,6 +112,7 @@ def isposinf(x, out=None): Examples -------- + >>> import numpy as np >>> np.isposinf(np.inf) True >>> np.isposinf(-np.inf) @@ -180,6 +182,7 @@ def isneginf(x, out=None): Examples -------- + >>> import numpy as np >>> np.isneginf(-np.inf) True >>> np.isneginf(np.inf) diff --git a/numpy/lib/_utils_impl.py b/numpy/lib/_utils_impl.py index 8a2c4b5c61e7..0c5d08ee7d9c 100644 --- a/numpy/lib/_utils_impl.py +++ b/numpy/lib/_utils_impl.py @@ -99,6 +99,11 @@ def get_include(): $ pkg-config --cflags -I/path/to/site-packages/numpy/_core/include + Examples + -------- + >>> np.get_include() + '.../site-packages/numpy/core/include' # may vary + """ import numpy if numpy.show_config is None: diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index bfac5f814501..7dec3243b883 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -11,7 +11,7 @@ __all__ = ['NumpyVersion'] -class NumpyVersion(): +class NumpyVersion: """Parse and compare numpy version strings. NumPy has the following versioning scheme (numbers given are examples; they diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 87f35a7a4f60..a90403459848 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -271,6 +271,8 @@ def dtype_to_descr(dtype): warnings.warn("metadata on a dtype is not saved to an npy/npz. " "Use another format (such as pickle) to store it.", UserWarning, stacklevel=2) + dtype = new_dtype + if dtype.names is not None: # This is a record array. The .descr is fine. XXX: parts of the # record array with an empty name, like padding bytes, still get @@ -741,7 +743,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None): "when allow_pickle=False") if pickle_kwargs is None: pickle_kwargs = {} - pickle.dump(array, fp, protocol=3, **pickle_kwargs) + pickle.dump(array, fp, protocol=4, **pickle_kwargs) elif array.flags.f_contiguous and not array.flags.c_contiguous: if isfileobj(fp): array.T.tofile(fp) diff --git a/numpy/lib/introspect.py b/numpy/lib/introspect.py index 4688eadc32ac..70e638d4dde1 100644 --- a/numpy/lib/introspect.py +++ b/numpy/lib/introspect.py @@ -30,6 +30,7 @@ def opt_func_info(func_name=None, signature=None): Retrieve dispatch information for functions named 'add' or 'sub' and data types 'float64' or 'float32': + >>> import numpy as np >>> dict = np.lib.introspect.opt_func_info( ... func_name="add|abs", signature="float64|complex64" ... ) diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index bc5c5de095a8..ab16d1f9f1aa 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -52,6 +52,7 @@ def recursive_fill_fields(input, output): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) >>> b = np.zeros((3,), dtype=a.dtype) @@ -84,6 +85,7 @@ def _get_fieldspec(dtype): Examples -------- + >>> import numpy as np >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr [(('a', 'A'), '>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype) ('A',) @@ -148,6 +151,7 @@ def get_names_flat(adtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None False @@ -173,6 +177,7 @@ def flatten_descr(ndtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype) @@ -240,6 +245,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = np.dtype([('A', int), ... ('B', [('BA', int), @@ -380,6 +386,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) array([( 1, 10.), ( 2, 20.), (-1, 30.)], @@ -526,6 +533,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) @@ -621,6 +629,7 @@ def rename_fields(base, namemapper): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) @@ -805,6 +814,7 @@ def repack_fields(a, align=False, recurse=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> def print_offsets(d): @@ -975,6 +985,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) @@ -1110,6 +1121,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)]) @@ -1204,6 +1216,7 @@ def apply_along_fields(func, arr): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)], @@ -1294,6 +1307,7 @@ def require_fields(array, required_dtype): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')]) @@ -1338,6 +1352,7 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> x = np.array([1, 2,]) >>> rfn.stack_arrays(x) is x @@ -1427,6 +1442,7 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False): Examples -------- + >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index 8723f4d9ba73..ef3319e901a0 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -867,6 +867,42 @@ def test_check_03(self): a = np.pad([1, 2, 3], 4, 'reflect') b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) + + def test_check_04(self): + a = np.pad([1, 2, 3], [1, 10], 'reflect') + b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1]) + assert_array_equal(a, b) + + def test_check_05(self): + a = np.pad([1, 2, 3, 4], [45, 10], 'reflect') + b = np.array( + [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, + 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, + 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, + 2, 1, 2, 3, 4, 3, 2, 1, 2]) + assert_array_equal(a, b) + + def test_check_06(self): + a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric') + b = np.array( + [2, 3, 4, 4, 3, 2, 1, 1, 2, 3, + 4, 4, 3, 2, 1, 1, 2, 3, 4, 4, + 3] + ) + assert_array_equal(a, b) + + def test_check_07(self): + a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric') + b = np.array( + [4, 5, 6, 6, 5, 4, 3, 2, 1, 1, + 2, 3, 4, 5, 6, 6, 5, 4, 3, 2, + 1, 1, 2, 3, 4, 5, 6, 6, 5, 4, + 3, 2, 1, 1, 2, 3, 4, 5, 6, 6, + 5, 4, 3, 2, 1, 1, 2, 3, 4, 5, + 6, 6, 5, 4]) + assert_array_equal(a, b) class TestEmptyArray: diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index f537621482c0..b613fa3e736d 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -107,6 +107,21 @@ def test_setxor1d(self): assert_array_equal([], setxor1d([], [])) + def test_setxor1d_unique(self): + a = np.array([1, 8, 2, 3]) + b = np.array([6, 5, 4, 8]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + + a = np.array([[1], [8], [2], [3]]) + b = np.array([[6, 5], [4, 8]]) + + ec = np.array([1, 2, 3, 4, 5, 6]) + c = setxor1d(a, b, assume_unique=True) + assert_array_equal(c, ec) + def test_ediff1d(self): zero_elem = np.array([]) one_elem = np.array([1]) @@ -400,6 +415,7 @@ def test_isin_table_timedelta_fails(self): (np.uint16, np.uint8), (np.uint8, np.int16), (np.int16, np.uint8), + (np.uint64, np.int64), ] ) @pytest.mark.parametrize("kind", [None, "sort", "table"]) @@ -415,10 +431,8 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): expected = np.array([True, True, False, False]) - expect_failure = kind == "table" and any(( - dtype1 == np.int8 and dtype2 == np.int16, - dtype1 == np.int16 and dtype2 == np.int8 - )) + expect_failure = kind == "table" and ( + dtype1 == np.int16 and dtype2 == np.int8) if expect_failure: with pytest.raises(RuntimeError, match="exceed the maximum"): @@ -426,6 +440,22 @@ def test_isin_mixed_dtype(self, dtype1, dtype2, kind): else: assert_array_equal(isin(ar1, ar2, kind=kind), expected) + @pytest.mark.parametrize("data", [ + np.array([2**63, 2**63+1], dtype=np.uint64), + np.array([-2**62, -2**62-1], dtype=np.int64), + ]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) + def test_isin_mixed_huge_vals(self, kind, data): + """Test values outside intp range (negative ones if 32bit system)""" + query = data[1] + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, True]) + # Also check that nothing weird happens for values can't possibly + # in range. + data = data.astype(np.int32) # clearly different values + res = np.isin(data, query, kind=kind) + assert_array_equal(res, [False, False]) + @pytest.mark.parametrize("kind", [None, "sort", "table"]) def test_isin_mixed_boolean(self, kind): """Test that isin works as expected for bool/int input.""" @@ -814,11 +844,8 @@ def test_unique_1d_with_axis(self, axis): def test_unique_inverse_with_axis(self, axis): x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]]) uniq, inv = unique(x, return_inverse=True, axis=axis) - assert_equal(inv.ndim, x.ndim) - if axis is None: - assert_array_equal(x, np.take(uniq, inv)) - else: - assert_array_equal(x, np.take_along_axis(uniq, inv, axis=axis)) + assert_equal(inv.ndim, x.ndim if axis is None else 1) + assert_array_equal(x, np.take(uniq, inv, axis=axis)) def test_unique_axis_zeros(self): # issue 15559 @@ -830,7 +857,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(1, 0))) assert_array_equal(idx, np.array([0])) - assert_array_equal(inv, np.array([[0], [0]])) + assert_array_equal(inv, np.array([0, 0])) assert_array_equal(cnt, np.array([2])) # there's 0 elements of shape (2,) along axis 1 @@ -840,7 +867,7 @@ def test_unique_axis_zeros(self): assert_equal(uniq.dtype, single_zero.dtype) assert_array_equal(uniq, np.empty(shape=(2, 0))) assert_array_equal(idx, np.array([])) - assert_array_equal(inv, np.empty((1, 0))) + assert_array_equal(inv, np.array([])) assert_array_equal(cnt, np.array([])) # test a "complicated" shape @@ -909,7 +936,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=0" assert_array_equal(data[idx], uniq, msg) msg = "Unique's return_inverse=True failed with axis=0" - assert_array_equal(np.take_along_axis(uniq, inv, axis=0), data) + assert_array_equal(np.take(uniq, inv, axis=0), data) msg = "Unique's return_counts=True failed with axis=0" assert_array_equal(cnt, np.array([2, 2]), msg) @@ -918,7 +945,7 @@ def _run_axis_tests(self, dtype): msg = "Unique's return_index=True failed with axis=1" assert_array_equal(data[:, idx], uniq) msg = "Unique's return_inverse=True failed with axis=1" - assert_array_equal(np.take_along_axis(uniq, inv, axis=1), data) + assert_array_equal(np.take(uniq, inv, axis=1), data) msg = "Unique's return_counts=True failed with axis=1" assert_array_equal(cnt, np.array([2, 1, 1]), msg) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index 31352864b7e2..bb262e048cba 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -998,32 +998,30 @@ def test_header_growth_axis(): assert len(fp.getvalue()) == expected_header_length -@pytest.mark.parametrize('dt, fail', [ - (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', - metadata={'some': 'stuff'})]}), True), - (np.dtype(int, metadata={'some': 'stuff'}), False), - (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False), +@pytest.mark.parametrize('dt', [ + np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3', + metadata={'some': 'stuff'})]}), + np.dtype(int, metadata={'some': 'stuff'}), + np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), # recursive: metadata on the field of a dtype - (np.dtype({'names': ['a', 'b'], 'formats': [ + np.dtype({'names': ['a', 'b'], 'formats': [ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]}) - ]}), False) + ]}), ]) @pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8), reason="PyPy bug in error formatting") -def test_metadata_dtype(dt, fail): +def test_metadata_dtype(dt): # gh-14142 arr = np.ones(10, dtype=dt) buf = BytesIO() with assert_warns(UserWarning): np.save(buf, arr) buf.seek(0) - if fail: - with assert_raises(ValueError): - np.load(buf) - else: - arr2 = np.load(buf) - # BUG: assert_array_equal does not check metadata - from numpy.lib._utils_impl import drop_metadata - assert_array_equal(arr, arr2) - assert drop_metadata(arr.dtype) is not arr.dtype - assert drop_metadata(arr2.dtype) is arr2.dtype + + # Loading should work (metadata was stripped): + arr2 = np.load(buf) + # BUG: assert_array_equal does not check metadata + from numpy.lib._utils_impl import drop_metadata + assert_array_equal(arr, arr2) + assert drop_metadata(arr.dtype) is not arr.dtype + assert drop_metadata(arr2.dtype) is arr2.dtype diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index a6465019fae4..b51564619051 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -20,8 +20,9 @@ from numpy.exceptions import AxisError from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, - assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY, - assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, IS_WASM + assert_array_almost_equal, assert_raises, assert_allclose, + assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT, + IS_WASM, NOGIL_BUILD ) import numpy.lib._function_base_impl as nfb from numpy.random import rand @@ -254,8 +255,8 @@ def test_nd(self): @pytest.mark.parametrize("dtype", ["i8", "U10", "object", "datetime64[ms]"]) def test_any_and_all_result_dtype(dtype): arr = np.ones(3, dtype=dtype) - assert np.any(arr).dtype == np.bool_ - assert np.all(arr).dtype == np.bool_ + assert np.any(arr).dtype == np.bool + assert np.all(arr).dtype == np.bool class TestCopy: @@ -694,7 +695,8 @@ def test_basic(self): class TestCumsum: - def test_basic(self): + @pytest.mark.parametrize("cumsum", [np.cumsum, np.cumulative_sum]) + def test_basic(self, cumsum): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, @@ -704,15 +706,15 @@ def test_basic(self): a2 = np.array(ba2, ctype) tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) - assert_array_equal(np.cumsum(a, axis=0), tgt) + assert_array_equal(cumsum(a, axis=0), tgt) tgt = np.array( [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) - assert_array_equal(np.cumsum(a2, axis=0), tgt) + assert_array_equal(cumsum(a2, axis=0), tgt) tgt = np.array( [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) - assert_array_equal(np.cumsum(a2, axis=1), tgt) + assert_array_equal(cumsum(a2, axis=1), tgt) class TestProd: @@ -737,7 +739,8 @@ def test_basic(self): class TestCumprod: - def test_basic(self): + @pytest.mark.parametrize("cumprod", [np.cumprod, np.cumulative_prod]) + def test_basic(self, cumprod): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, @@ -745,23 +748,52 @@ def test_basic(self): a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: - assert_raises(ArithmeticError, np.cumprod, a) - assert_raises(ArithmeticError, np.cumprod, a2, 1) - assert_raises(ArithmeticError, np.cumprod, a) + assert_raises(ArithmeticError, cumprod, a) + assert_raises(ArithmeticError, cumprod, a2, 1) + assert_raises(ArithmeticError, cumprod, a) else: - assert_array_equal(np.cumprod(a, axis=-1), + assert_array_equal(cumprod(a, axis=-1), np.array([1, 2, 20, 220, 1320, 6600, 26400], ctype)) - assert_array_equal(np.cumprod(a2, axis=0), + assert_array_equal(cumprod(a2, axis=0), np.array([[1, 2, 3, 4], [5, 12, 21, 36], [50, 36, 84, 180]], ctype)) - assert_array_equal(np.cumprod(a2, axis=-1), + assert_array_equal(cumprod(a2, axis=-1), np.array([[1, 2, 6, 24], [5, 30, 210, 1890], [10, 30, 120, 600]], ctype)) +def test_cumulative_include_initial(): + arr = np.arange(8).reshape((2, 2, 2)) + + expected = np.array([ + [[0, 0], [0, 1], [2, 4]], [[0, 0], [4, 5], [10, 12]] + ]) + assert_array_equal( + np.cumulative_sum(arr, axis=1, include_initial=True), expected + ) + + expected = np.array([ + [[1, 0, 0], [1, 2, 6]], [[1, 4, 20], [1, 6, 42]] + ]) + assert_array_equal( + np.cumulative_prod(arr, axis=2, include_initial=True), expected + ) + + out = np.zeros((3, 2), dtype=np.float64) + expected = np.array([[0, 0], [1, 2], [4, 6]], dtype=np.float64) + arr = np.arange(1, 5).reshape((2, 2)) + np.cumulative_sum(arr, axis=0, out=out, include_initial=True) + assert_array_equal(out, expected) + + expected = np.array([1, 2, 4]) + assert_array_equal( + np.cumulative_prod(np.array([2, 2]), include_initial=True), expected + ) + + class TestDiff: def test_basic(self): @@ -1901,6 +1933,13 @@ def test_positional_regression_9477(self): r = f([2]) assert_equal(r.dtype, np.dtype('float64')) + def test_datetime_conversion(self): + otype = "datetime64[ns]" + arr = np.array(['2024-01-01', '2024-01-02', '2024-01-03'], + dtype='datetime64[ns]') + assert_array_equal(np.vectorize(lambda x: x, signature="(i)->(j)", + otypes=[otype])(arr), arr) + class TestLeaks: class A: @@ -1914,6 +1953,9 @@ def unbound(*args): return 0 @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + @pytest.mark.skipif(NOGIL_BUILD, + reason=("Functions are immortalized if a thread is " + "launched, making this test flaky")) @pytest.mark.parametrize('name, incr', [ ('bound', A.iters), ('unbound', 0), @@ -2806,6 +2848,11 @@ def test_empty_with_minlength(self): y = np.bincount(x, minlength=5) assert_array_equal(y, np.zeros(5, dtype=int)) + @pytest.mark.parametrize('minlength', [0, 3]) + def test_empty_list(self, minlength): + assert_array_equal(np.bincount([], minlength=minlength), + np.zeros(minlength, dtype=int)) + def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises_regex(TypeError, @@ -3963,6 +4010,17 @@ def test_quantile_with_weights_and_axis(self, method): ) assert_allclose(q, q_res) + @pytest.mark.parametrize("method", methods_supporting_weights) + def test_quantile_weights_min_max(self, method): + # Test weighted quantile at 0 and 1 with leading and trailing zero + # weights. + w = [0, 0, 1, 2, 3, 0] + y = np.arange(6) + y_min = np.quantile(y, 0, weights=w, method="inverted_cdf") + y_max = np.quantile(y, 1, weights=w, method="inverted_cdf") + assert y_min == y[2] # == 2 + assert y_max == y[4] # == 4 + def test_quantile_weights_raises_negative_weights(self): y = [1, 2] w = [-0.5, 1] @@ -3987,6 +4045,20 @@ def test_weibull_fraction(self): quantile = np.quantile(arr, [Fraction(1, 2)], method='weibull') assert_equal(quantile, np.array(Fraction(1, 20))) + def test_closest_observation(self): + # Round ties to nearest even order statistic (see #26656) + m = 'closest_observation' + q = 0.5 + arr = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + assert_equal(2, np.quantile(arr[0:3], q, method=m)) + assert_equal(2, np.quantile(arr[0:4], q, method=m)) + assert_equal(2, np.quantile(arr[0:5], q, method=m)) + assert_equal(3, np.quantile(arr[0:6], q, method=m)) + assert_equal(4, np.quantile(arr[0:7], q, method=m)) + assert_equal(4, np.quantile(arr[0:8], q, method=m)) + assert_equal(4, np.quantile(arr[0:9], q, method=m)) + assert_equal(5, np.quantile(arr, q, method=m)) + class TestLerp: @hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False, diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 89758706d78f..09a1a5ab709d 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -469,7 +469,7 @@ def test_small(self): 'doane': 3, 'sqrt': 2, 'stone': 1}} for testlen, expectedResults in small_dat.items(): - testdat = np.arange(testlen) + testdat = np.arange(testlen).astype(float) for estimator, expbins in expectedResults.items(): a, b = np.histogram(testdat, estimator) assert_equal(len(a), expbins, err_msg="For the {0} estimator " @@ -592,6 +592,30 @@ def test_signed_integer_data(self, bins): assert_array_equal(hist, hist32) assert_array_equal(edges, edges32) + @pytest.mark.parametrize("bins", ['auto', 'fd', 'doane', 'scott', + 'stone', 'rice', 'sturges']) + def test_integer(self, bins): + """ + Test that bin width for integer data is at least 1. + """ + with suppress_warnings() as sup: + if bins == 'stone': + sup.filter(RuntimeWarning) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), bins), + np.arange(9)) + + def test_integer_non_auto(self): + """ + Test that the bin-width>=1 requirement *only* applies to auto binning. + """ + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), 16), + np.arange(17) / 2) + assert_equal( + np.histogram_bin_edges(np.tile(np.arange(9), 1000), [.1, .2]), + [.1, .2]) + def test_simple_weighted(self): """ Check that weighted data raises a TypeError diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 44664c2df891..38ded1f26cda 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2760,12 +2760,16 @@ def test_npzfile_dict(): assert_(f in ['x', 'y']) assert_equal(a.shape, (3, 3)) + for a in z.values(): + assert_equal(a.shape, (3, 3)) + assert_(len(z.items()) == 2) for f in z: assert_(f in ['x', 'y']) assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") diff --git a/numpy/lib/tests/test_loadtxt.py b/numpy/lib/tests/test_loadtxt.py index 78c84e491c08..aba00c4256ad 100644 --- a/numpy/lib/tests/test_loadtxt.py +++ b/numpy/lib/tests/test_loadtxt.py @@ -18,12 +18,12 @@ def test_scientific_notation(): """Test that both 'e' and 'E' are parsed correctly.""" data = StringIO( - ( + "1.0e-1,2.0E1,3.0\n" "4.0e-2,5.0E-1,6.0\n" "7.0e-3,8.0E1,9.0\n" "0.0e-4,1.0E-1,2.0" - ) + ) expected = np.array( [[0.1, 20., 3.0], [0.04, 0.5, 6], [0.007, 80., 9], [0, 0.1, 2]] @@ -46,14 +46,14 @@ def mixed_types_structured(): with the associated structured array. """ data = StringIO( - ( + "1000;2.4;alpha;-34\n" "2000;3.1;beta;29\n" "3500;9.9;gamma;120\n" "4090;8.1;delta;0\n" "5001;4.4;epsilon;-99\n" "6543;7.8;omega;-1\n" - ) + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] @@ -597,14 +597,14 @@ def test_comment_multichar_error_with_quote(): def test_structured_dtype_with_quotes(): data = StringIO( - ( + "1000;2.4;'alpha';-34\n" "2000;3.1;'beta';29\n" "3500;9.9;'gamma';120\n" "4090;8.1;'delta';0\n" "5001;4.4;'epsilon';-99\n" "6543;7.8;'omega';-1\n" - ) + ) dtype = np.dtype( [('f0', np.uint16), ('f1', np.float64), ('f2', 'S7'), ('f3', np.int8)] @@ -970,12 +970,15 @@ def test_parametric_unit_discovery( """Check that the correct unit (e.g. month, day, second) is discovered from the data when a user specifies a unitless datetime.""" # Unit should be "D" (days) due to last entry - data = [generic_data] * 50000 + [long_datum] + data = [generic_data] * nrows + [long_datum] expected = np.array(data, dtype=expected_dtype) + assert len(data) == nrows+1 + assert len(data) == len(expected) # file-like path txt = StringIO("\n".join(data)) a = np.loadtxt(txt, dtype=unitless_dtype) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) @@ -983,11 +986,17 @@ def test_parametric_unit_discovery( fd, fname = mkstemp() os.close(fd) with open(fname, "w") as fh: - fh.write("\n".join(data)) + fh.write("\n".join(data)+"\n") + # loading the full file... a = np.loadtxt(fname, dtype=unitless_dtype) - os.remove(fname) + assert len(a) == len(expected) assert a.dtype == expected.dtype assert_equal(a, expected) + # loading half of the file... + a = np.loadtxt(fname, dtype=unitless_dtype, max_rows=int(nrows/2)) + os.remove(fname) + assert len(a) == int(nrows/2) + assert_equal(a, expected[:int(nrows/2)]) def test_str_dtype_unit_discovery_with_converter(): @@ -995,7 +1004,7 @@ def test_str_dtype_unit_discovery_with_converter(): expected = np.array( ["spam-a-lot"] * 60000 + ["tis_but_a_scratch"], dtype="U17" ) - conv = lambda s: s.strip("XXX") + conv = lambda s: s.removeprefix("XXX") # file-like path txt = StringIO("\n".join(data)) @@ -1041,5 +1050,26 @@ def test_field_growing_cases(): assert len(res) == 0 for i in range(1, 1024): - res = np.loadtxt(["," * i], delimiter=",", dtype=bytes) + res = np.loadtxt(["," * i], delimiter=",", dtype=bytes, max_rows=10) assert len(res) == i+1 + +@pytest.mark.parametrize("nmax", (10000, 50000, 55000, 60000)) +def test_maxrows_exceeding_chunksize(nmax): + # tries to read all of the file, + # or less, equal, greater than _loadtxt_chunksize + file_length = 60000 + + # file-like path + data = ["a 0.5 1"]*file_length + txt = StringIO("\n".join(data)) + res = np.loadtxt(txt, dtype=str, delimiter=" ", max_rows=nmax) + assert len(res) == nmax + + # file-obj path + fd, fname = mkstemp() + os.close(fd) + with open(fname, "w") as fh: + fh.write("\n".join(data)) + res = np.loadtxt(fname, dtype=str, delimiter=" ", max_rows=nmax) + os.remove(fname) + assert len(res) == nmax diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py index da3ee0f2a3dc..2a92cad2f315 100644 --- a/numpy/lib/tests/test_nanfunctions.py +++ b/numpy/lib/tests/test_nanfunctions.py @@ -651,7 +651,7 @@ def test_empty(self): tgt = mat res = f(mat, axis=1) assert_equal(res, tgt) - tgt = np.zeros((0)) + tgt = np.zeros(0) res = f(mat, axis=None) assert_equal(res, tgt) @@ -1144,7 +1144,8 @@ def test_complex(self): assert_raises(TypeError, np.nanpercentile, arr_c, 0.5) @pytest.mark.parametrize("weighted", [False, True]) - def test_result_values(self, weighted): + @pytest.mark.parametrize("use_out", [False, True]) + def test_result_values(self, weighted, use_out): if weighted: percentile = partial(np.percentile, method="inverted_cdf") nanpercentile = partial(np.nanpercentile, method="inverted_cdf") @@ -1160,13 +1161,16 @@ def gen_weights(d): return None tgt = [percentile(d, 28, weights=gen_weights(d)) for d in _rdat] - res = nanpercentile(_ndat, 28, axis=1, weights=gen_weights(_ndat)) + out = np.empty_like(tgt) if use_out else None + res = nanpercentile(_ndat, 28, axis=1, + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) # Transpose the array to fit the output convention of numpy.percentile tgt = np.transpose([percentile(d, (28, 98), weights=gen_weights(d)) for d in _rdat]) + out = np.empty_like(tgt) if use_out else None res = nanpercentile(_ndat, (28, 98), axis=1, - weights=gen_weights(_ndat)) + weights=gen_weights(_ndat), out=out) assert_almost_equal(res, tgt) @pytest.mark.parametrize("axis", [None, 0, 1]) @@ -1242,6 +1246,58 @@ def test_multiple_percentiles(self): np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6) ) + @pytest.mark.parametrize("nan_weight", [0, 1, 2, 3, 1e200]) + def test_nan_value_with_weight(self, nan_weight): + x = [1, np.nan, 2, 3] + result = np.float64(2.0) + q_unweighted = np.nanpercentile(x, 50, method="inverted_cdf") + assert_equal(q_unweighted, result) + + # The weight value at the nan position should not matter. + w = [1.0, nan_weight, 1.0, 1.0] + q_weighted = np.nanpercentile(x, 50, weights=w, method="inverted_cdf") + assert_equal(q_weighted, result) + + @pytest.mark.parametrize("axis", [0, 1, 2]) + def test_nan_value_with_weight_ndim(self, axis): + # Create a multi-dimensional array to test + np.random.seed(1) + x_no_nan = np.random.random(size=(100, 99, 2)) + # Set some places to NaN (not particularly smart) so there is always + # some non-Nan. + x = x_no_nan.copy() + x[np.arange(99), np.arange(99), 0] = np.nan + + p = np.array([[20., 50., 30], [70, 33, 80]]) + + # We just use ones as weights, but replace it with 0 or 1e200 at the + # NaN positions below. + weights = np.ones_like(x) + + # For comparison use weighted normal percentile with nan weights at + # 0 (and no NaNs); not sure this is strictly identical but should be + # sufficiently so (if a percentile lies exactly on a 0 value). + weights[np.isnan(x)] = 0 + p_expected = np.percentile( + x_no_nan, p, axis=axis, weights=weights, method="inverted_cdf") + + p_unweighted = np.nanpercentile( + x, p, axis=axis, method="inverted_cdf") + # The normal and unweighted versions should be identical: + assert_equal(p_unweighted, p_expected) + + weights[np.isnan(x)] = 1e200 # huge value, shouldn't matter + p_weighted = np.nanpercentile( + x, p, axis=axis, weights=weights, method="inverted_cdf") + assert_equal(p_weighted, p_expected) + # Also check with out passed: + out = np.empty_like(p_weighted) + res = np.nanpercentile( + x, p, axis=axis, weights=weights, out=out, method="inverted_cdf") + + assert res is out + assert_equal(out, p_expected) + class TestNanFunctions_Quantile: # most of this is already tested by TestPercentile diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py index 5b07f41c6260..a446156327cd 100644 --- a/numpy/lib/tests/test_packbits.py +++ b/numpy/lib/tests/test_packbits.py @@ -282,7 +282,7 @@ def test_unpackbits_large(): assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d) -class TestCount(): +class TestCount: x = np.array([ [1, 0, 1, 0, 0, 1, 0], [0, 1, 1, 1, 0, 0, 0], diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py index 07b80904b917..5b777f5735e4 100644 --- a/numpy/lib/tests/test_regression.py +++ b/numpy/lib/tests/test_regression.py @@ -215,7 +215,7 @@ def test_nansum_with_boolean(self): def test_py3_compat(self): # gh-2561 # Test if the oldstyle class test is bypassed in python3 - class C(): + class C: """Old-style class in python2, normal class in python3""" pass diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py index 609b77720c86..13529e001354 100644 --- a/numpy/lib/tests/test_shape_base.py +++ b/numpy/lib/tests/test_shape_base.py @@ -63,6 +63,8 @@ def test_invalid(self): assert_raises(IndexError, take_along_axis, a, ai.astype(float), axis=1) # invalid axis assert_raises(AxisError, take_along_axis, a, ai, axis=10) + # invalid indices + assert_raises(ValueError, take_along_axis, a, ai, axis=None) def test_empty(self): """ Test everything is ok with empty results, even with inserted dims """ @@ -104,6 +106,24 @@ def test_broadcast(self): put_along_axis(a, ai, 20, axis=1) assert_equal(take_along_axis(a, ai, axis=1), 20) + def test_invalid(self): + """ Test invalid inputs """ + a_base = np.array([[10, 30, 20], [60, 40, 50]]) + indices = np.array([[0], [1]]) + values = np.array([[2], [1]]) + + # sanity check + a = a_base.copy() + put_along_axis(a, indices, values, axis=0) + assert np.all(a == [[2, 2, 2], [1, 1, 1]]) + + # invalid indices + a = a_base.copy() + with assert_raises(ValueError) as exc: + put_along_axis(a, indices, values, axis=None) + assert "single dimension" in str(exc.exception) + + class TestApplyAlongAxis: def test_simple(self): @@ -494,7 +514,7 @@ def test_2D_arrays(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - column_stack((np.arange(3) for _ in range(2))) + column_stack(np.arange(3) for _ in range(2)) class TestDstack: @@ -531,7 +551,7 @@ def test_2D_array2(self): def test_generator(self): with pytest.raises(TypeError, match="arrays to stack must be"): - dstack((np.arange(3) for _ in range(2))) + dstack(np.arange(3) for _ in range(2)) # array_split has more comprehensive test of splitting. diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py index 543a2d6c5d4b..3cbebbdd552e 100644 --- a/numpy/lib/tests/test_stride_tricks.py +++ b/numpy/lib/tests/test_stride_tricks.py @@ -341,7 +341,7 @@ def test_broadcast_shapes_raises(): [(2, 3), (2,)], [(3,), (3,), (4,)], [(1, 3, 4), (2, 3, 3)], - [(1, 2), (3,1), (3,2), (10, 5)], + [(1, 2), (3, 1), (3, 2), (10, 5)], [2, (2, 3)], ] for input_shapes in data: @@ -578,11 +578,12 @@ def test_writeable(): # but the result of broadcast_arrays needs to be writeable, to # preserve backwards compatibility - for is_broadcast, results in [(False, broadcast_arrays(original,)), - (True, broadcast_arrays(0, original))]: - for result in results: + test_cases = [((False,), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version - if is_broadcast: + if array_is_broadcast: with assert_warns(FutureWarning): assert_equal(result.flags.writeable, True) with assert_warns(DeprecationWarning): @@ -623,11 +624,12 @@ def test_writeable_memoryview(): # See gh-13929. original = np.array([1, 2, 3]) - for is_broadcast, results in [(False, broadcast_arrays(original,)), - (True, broadcast_arrays(0, original))]: - for result in results: + test_cases = [((False, ), broadcast_arrays(original,)), + ((True, False), broadcast_arrays(0, original))] + for is_broadcast, results in test_cases: + for array_is_broadcast, result in zip(is_broadcast, results): # This will change to False in a future version - if is_broadcast: + if array_is_broadcast: # memoryview(result, writable=True) will give warning but cannot # be tested using the python API. assert memoryview(result).readonly diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index e2f72ac90c92..644912d941e3 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -43,7 +43,7 @@ def _compare_dtypes(dt1, dt2): assert dt_m.metadata is None assert dt_m['l1'].metadata is None assert dt_m['l1']['l2'].metadata is None - + # alignment dt = np.dtype([('x', '>> import numpy as np >>> a = np.eye(2*3*4) >>> a.shape = (2*3, 4, 2, 3, 4) - >>> b = np.random.randn(2*3, 4) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(2*3, 4)) >>> x = np.linalg.tensorsolve(a, b) >>> x.shape (2, 3, 4) @@ -379,6 +381,7 @@ def solve(a, b): ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 5]]) >>> b = np.array([1, 2]) >>> x = np.linalg.solve(a, b) @@ -451,12 +454,14 @@ def tensorinv(a, ind=2): Examples -------- + >>> import numpy as np >>> a = np.eye(4*6) >>> a.shape = (4, 6, 8, 3) >>> ainv = np.linalg.tensorinv(a, ind=2) >>> ainv.shape (8, 3, 4, 6) - >>> b = np.random.randn(4, 6) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=(4, 6)) >>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b)) True @@ -465,7 +470,8 @@ def tensorinv(a, ind=2): >>> ainv = np.linalg.tensorinv(a, ind=1) >>> ainv.shape (8, 3, 24) - >>> b = np.random.randn(24) + >>> rng = np.random.default_rng() + >>> b = rng.normal(size=24) >>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b)) True @@ -538,6 +544,7 @@ def inv(a): Examples -------- + >>> import numpy as np >>> from numpy.linalg import inv >>> a = np.array([[1., 2.], [3., 4.]]) >>> ainv = inv(a) @@ -649,6 +656,7 @@ def matrix_power(a, n): Examples -------- + >>> import numpy as np >>> from numpy.linalg import matrix_power >>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit >>> matrix_power(i, 3) # should = -i @@ -803,6 +811,7 @@ def cholesky(a, /, *, upper=False): Examples -------- + >>> import numpy as np >>> A = np.array([[1,-2j],[2j,5]]) >>> A array([[ 1.+0.j, -0.-2.j], @@ -873,6 +882,40 @@ def outer(x1, x2, /): -------- outer + Examples + -------- + Make a (*very* coarse) grid for computing a Mandelbrot set: + + >>> rl = np.linalg.outer(np.ones((5,)), np.linspace(-2, 2, 5)) + >>> rl + array([[-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.], + [-2., -1., 0., 1., 2.]]) + >>> im = np.linalg.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) + >>> im + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + >>> grid = rl + im + >>> grid + array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], + [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j], + [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j], + [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j], + [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]]) + + An example using a "vector" of letters: + + >>> x = np.array(['a', 'b', 'c'], dtype=object) + >>> np.linalg.outer(x, [1, 2, 3]) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) + """ x1 = asanyarray(x1) x2 = asanyarray(x2) @@ -978,7 +1021,9 @@ def qr(a, mode='reduced'): Examples -------- - >>> a = np.random.randn(9, 6) + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) >>> Q, R = np.linalg.qr(a) >>> np.allclose(a, np.dot(Q, R)) # a does equal QR True @@ -1047,15 +1092,10 @@ def qr(a, mode='reduced'): a = _to_native_byte_order(a) mn = min(m, n) - if m <= n: - gufunc = _umath_linalg.qr_r_raw_m - else: - gufunc = _umath_linalg.qr_r_raw_n - signature = 'D->D' if isComplexType(t) else 'd->d' with errstate(call=_raise_linalgerror_qr, invalid='call', over='ignore', divide='ignore', under='ignore'): - tau = gufunc(a, signature=signature) + tau = _umath_linalg.qr_r_raw(a, signature=signature) # handle modes that don't return q if mode == 'r': @@ -1152,6 +1192,7 @@ def eigvals(a): if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as ``A``: + >>> import numpy as np >>> from numpy import linalg as LA >>> x = np.random.random() >>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]]) @@ -1248,6 +1289,7 @@ def eigvalsh(a, UPLO='L'): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) @@ -1388,6 +1430,7 @@ def eig(a): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA (Almost) trivial example with real eigenvalues and eigenvectors. @@ -1528,6 +1571,7 @@ def eigh(a, UPLO='L'): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a @@ -1570,12 +1614,14 @@ def eigh(a, UPLO='L'): [0.+2.j, 2.+0.j]]) >>> wa, va = LA.eigh(a) >>> wb, vb = LA.eig(b) - >>> wa; wb + >>> wa array([1., 6.]) + >>> wb array([6.+0.j, 1.+0.j]) - >>> va; vb + >>> va array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary [ 0. +0.89442719j, 0. -0.4472136j ]]) + >>> vb array([[ 0.89442719+0.j , -0. +0.4472136j], [-0. +0.4472136j, 0.89442719+0.j ]]) @@ -1703,8 +1749,11 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): Examples -------- - >>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6) - >>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3) + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) + 1j*rng.normal(size=(9, 6)) + >>> b = rng.normal(size=(2, 7, 8, 3)) + 1j*rng.normal(size=(2, 7, 8, 3)) + Reconstruction based on full SVD, 2D case: @@ -1779,15 +1828,9 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): m, n = a.shape[-2:] if compute_uv: if full_matrices: - if m < n: - gufunc = _umath_linalg.svd_m_f - else: - gufunc = _umath_linalg.svd_n_f + gufunc = _umath_linalg.svd_f else: - if m < n: - gufunc = _umath_linalg.svd_m_s - else: - gufunc = _umath_linalg.svd_n_s + gufunc = _umath_linalg.svd_s signature = 'D->DdD' if isComplexType(t) else 'd->ddd' with errstate(call=_raise_linalgerror_svd_nonconvergence, @@ -1799,16 +1842,11 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False): vh = vh.astype(result_t, copy=False) return SVDResult(wrap(u), s, wrap(vh)) else: - if m < n: - gufunc = _umath_linalg.svd_m - else: - gufunc = _umath_linalg.svd_n - signature = 'D->d' if isComplexType(t) else 'd->d' with errstate(call=_raise_linalgerror_svd_nonconvergence, invalid='call', over='ignore', divide='ignore', under='ignore'): - s = gufunc(a, signature=signature) + s = _umath_linalg.svd(a, signature=signature) s = s.astype(_realType(result_t), copy=False) return s @@ -1846,6 +1884,23 @@ def svdvals(x, /): -------- scipy.linalg.svdvals : Compute singular values of a matrix. + Examples + -------- + + >>> np.linalg.svdvals([[1, 2, 3, 4, 5], + ... [1, 4, 9, 16, 25], + ... [1, 8, 27, 64, 125]]) + array([146.68862757, 5.57510612, 0.60393245]) + + Determine the rank of a matrix using singular values: + + >>> s = np.linalg.svdvals([[1, 2, 3], + ... [2, 4, 6], + ... [-1, 1, -1]]); s + array([8.38434191e+00, 1.64402274e+00, 2.31534378e-16]) + >>> np.count_nonzero(s > 1e-10) # Matrix of rank 2 + 2 + """ return svd(x, compute_uv=False, hermitian=False) @@ -1908,6 +1963,7 @@ def cond(x, p=None): Examples -------- + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]]) >>> a @@ -2066,6 +2122,7 @@ def matrix_rank(A, tol=None, hermitian=False, *, rtol=None): Examples -------- + >>> import numpy as np >>> from numpy.linalg import matrix_rank >>> matrix_rank(np.eye(4)) # Full rank matrix 4 @@ -2181,7 +2238,9 @@ def pinv(a, rcond=None, hermitian=False, *, rtol=_NoValue): The following example checks that ``a * a+ * a == a`` and ``a+ * a * a+ == a+``: - >>> a = np.random.randn(9, 6) + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> a = rng.normal(size=(9, 6)) >>> B = np.linalg.pinv(a) >>> np.allclose(a, np.dot(a, np.dot(B, a))) True @@ -2276,6 +2335,7 @@ def slogdet(a): -------- The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logabsdet) = np.linalg.slogdet(a) >>> (sign, logabsdet) @@ -2350,6 +2410,7 @@ def det(a): -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: + >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 # may vary @@ -2446,6 +2507,7 @@ def lstsq(a, b, rcond=None): -------- Fit a line, ``y = mx + c``, through some noisy data-points: + >>> import numpy as np >>> x = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) @@ -2492,11 +2554,6 @@ def lstsq(a, b, rcond=None): if rcond is None: rcond = finfo(t).eps * max(n, m) - if m <= n: - gufunc = _umath_linalg.lstsq_m - else: - gufunc = _umath_linalg.lstsq_n - signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid' if n_rhs == 0: # lapack can't handle n_rhs = 0 - so allocate @@ -2505,7 +2562,8 @@ def lstsq(a, b, rcond=None): with errstate(call=_raise_linalgerror_lstsq, invalid='call', over='ignore', divide='ignore', under='ignore'): - x, resids, rank, s = gufunc(a, b, rcond, signature=signature) + x, resids, rank, s = _umath_linalg.lstsq(a, b, rcond, + signature=signature) if m == 0: x[...] = 0 if n_rhs == 0: @@ -2647,6 +2705,8 @@ def norm(x, ord=None, axis=None, keepdims=False): Examples -------- + + >>> import numpy as np >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a @@ -2884,6 +2944,7 @@ def multi_dot(arrays): return functools.reduce(np.dot, arrays) -------- `multi_dot` allows you to write:: + >>> import numpy as np >>> from numpy.linalg import multi_dot >>> # Prepare some data >>> A = np.random.random((10000, 100)) @@ -2974,7 +3035,7 @@ def _multi_dot_three(A, B, C, out=None): def _multi_dot_matrix_chain_order(arrays, return_costs=False): """ - Return a np.array that encodes the optimal order of mutiplications. + Return a np.array that encodes the optimal order of multiplications. The optimal order array is then used by `_multi_dot()` to do the multiplication. @@ -3066,6 +3127,58 @@ def diagonal(x, /, *, offset=0): -------- numpy.diagonal + Examples + -------- + >>> a = np.arange(4).reshape(2, 2); a + array([[0, 1], + [2, 3]]) + >>> np.linalg.diagonal(a) + array([0, 3]) + + A 3-D example: + + >>> a = np.arange(8).reshape(2, 2, 2); a + array([[[0, 1], + [2, 3]], + [[4, 5], + [6, 7]]]) + >>> np.linalg.diagonal(a) + array([[0, 3], + [4, 7]]) + + Diagonals adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(a, offset=1) # First superdiagonal + array([1, 5]) + >>> np.linalg.diagonal(a, offset=2) # Second superdiagonal + array([2]) + >>> np.linalg.diagonal(a, offset=-1) # First subdiagonal + array([3, 7]) + >>> np.linalg.diagonal(a, offset=-2) # Second subdiagonal + array([6]) + + The anti-diagonal can be obtained by reversing the order of elements + using either `numpy.flipud` or `numpy.fliplr`. + + >>> a = np.arange(9).reshape(3, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.diagonal(np.fliplr(a)) # Horizontal flip + array([2, 4, 6]) + >>> np.linalg.diagonal(np.flipud(a)) # Vertical flip + array([6, 4, 2]) + + Note that the order in which the diagonal is retrieved varies depending + on the flip function. + """ return _core_diagonal(x, offset, axis1=-2, axis2=-1) @@ -3119,6 +3232,38 @@ def trace(x, /, *, offset=0, dtype=None): -------- numpy.trace + Examples + -------- + >>> np.linalg.trace(np.eye(3)) + 3.0 + >>> a = np.arange(8).reshape((2, 2, 2)) + >>> np.linalg.trace(a) + array([3, 11]) + + Trace is computed with the last two axes as the 2-d sub-arrays. + This behavior differs from :py:func:`numpy.trace` which uses the first two + axes by default. + + >>> a = np.arange(24).reshape((3, 2, 2, 2)) + >>> np.linalg.trace(a).shape + (3, 2) + + Traces adjacent to the main diagonal can be obtained by using the + `offset` argument: + + >>> a = np.arange(9).reshape((3, 3)); a + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + >>> np.linalg.trace(a, offset=1) # First superdiagonal + 6 + >>> np.linalg.trace(a, offset=2) # Second superdiagonal + 2 + >>> np.linalg.trace(a, offset=-1) # First subdiagonal + 10 + >>> np.linalg.trace(a, offset=-2) # Second subdiagonal + 6 + """ return _core_trace(x, offset, axis1=-2, axis2=-1, dtype=dtype) @@ -3163,7 +3308,35 @@ def cross(x1, x2, /, *, axis=-1): -------- numpy.cross + Examples + -------- + Vector cross-product. + + >>> x = np.array([1, 2, 3]) + >>> y = np.array([4, 5, 6]) + >>> np.linalg.cross(x, y) + array([-3, 6, -3]) + + Multiple vector cross-products. Note that the direction of the cross + product vector is defined by the *right-hand rule*. + + >>> x = np.array([[1,2,3], [4,5,6]]) + >>> y = np.array([[4,5,6], [1,2,3]]) + >>> np.linalg.cross(x, y) + array([[-3, 6, -3], + [ 3, -6, 3]]) + + >>> x = np.array([[1, 2], [3, 4], [5, 6]]) + >>> y = np.array([[4, 5], [6, 1], [2, 3]]) + >>> np.linalg.cross(x, y, axis=0) + array([[-24, 6], + [ 18, 24], + [-6, -18]]) + """ + x1 = asanyarray(x1) + x2 = asanyarray(x2) + if x1.shape[axis] != 3 or x2.shape[axis] != 3: raise ValueError( "Both input arrays must be (arrays of) 3-dimensional vectors, " @@ -3213,6 +3386,53 @@ def matmul(x1, x2, /): -------- numpy.matmul + Examples + -------- + For 2-D arrays it is the matrix product: + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([[4, 1], + ... [2, 2]]) + >>> np.linalg.matmul(a, b) + array([[4, 1], + [2, 2]]) + + For 2-D mixed with 1-D, the result is the usual. + + >>> a = np.array([[1, 0], + ... [0, 1]]) + >>> b = np.array([1, 2]) + >>> np.linalg.matmul(a, b) + array([1, 2]) + >>> np.linalg.matmul(b, a) + array([1, 2]) + + + Broadcasting is conventional for stacks of arrays + + >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) + >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) + >>> np.linalg.matmul(a,b).shape + (2, 2, 2) + >>> np.linalg.matmul(a, b)[0, 1, 1] + 98 + >>> sum(a[0, 1, :] * b[0 , :, 1]) + 98 + + Vector, vector returns the scalar inner product, but neither argument + is complex-conjugated: + + >>> np.linalg.matmul([2j, 3j], [2j, 3j]) + (-13+0j) + + Scalar multiplication raises an error. + + >>> np.linalg.matmul([1,2], 3) + Traceback (most recent call last): + ... + ValueError: matmul: Input operand 1 does not have enough dimensions ... + """ return _core_matmul(x1, x2) @@ -3272,6 +3492,36 @@ def matrix_norm(x, /, *, keepdims=False, ord="fro"): -------- numpy.linalg.norm : Generic norm function + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, ..., 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> LA.matrix_norm(b) + 7.745966692414834 + >>> LA.matrix_norm(b, ord='fro') + 7.745966692414834 + >>> LA.matrix_norm(b, ord=np.inf) + 9.0 + >>> LA.matrix_norm(b, ord=-np.inf) + 2.0 + + >>> LA.matrix_norm(b, ord=1) + 7.0 + >>> LA.matrix_norm(b, ord=-1) + 6.0 + >>> LA.matrix_norm(b, ord=2) + 7.3484692283495345 + >>> LA.matrix_norm(b, ord=-2) + 1.8570331885190563e-016 # may vary + """ x = asanyarray(x) return norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord) @@ -3311,6 +3561,34 @@ def vector_norm(x, /, *, axis=None, keepdims=False, ord=2): -------- numpy.linalg.norm : Generic norm function + Examples + -------- + >>> from numpy import linalg as LA + >>> a = np.arange(9) + 1 + >>> a + array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + >>> b = a.reshape((3, 3)) + >>> b + array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + + >>> LA.vector_norm(b) + 16.881943016134134 + >>> LA.vector_norm(b, ord=np.inf) + 9.0 + >>> LA.vector_norm(b, ord=-np.inf) + 1.0 + + >>> LA.vector_norm(b, ord=1) + 45.0 + >>> LA.vector_norm(b, ord=-1) + 0.3534857623790153 + >>> LA.vector_norm(b, ord=2) + 16.881943016134134 + >>> LA.vector_norm(b, ord=-2) + 0.8058837395885292 + """ x = asanyarray(x) shape = list(x.shape) @@ -3390,5 +3668,14 @@ def vecdot(x1, x2, /, *, axis=-1): -------- numpy.vecdot + Examples + -------- + Get the projected size along a given normal for an array of vectors. + + >>> v = np.array([[0., 5., 0.], [0., 0., 10.], [0., 6., 8.]]) + >>> n = np.array([0., 0.6, 0.8]) + >>> np.linalg.vecdot(v, n) + array([ 3., 8., 10.]) + """ return _core_vecdot(x1, x2, axis=axis) diff --git a/numpy/linalg/_linalg.pyi b/numpy/linalg/_linalg.pyi index e9f00e226a94..0d431794b74d 100644 --- a/numpy/linalg/_linalg.pyi +++ b/numpy/linalg/_linalg.pyi @@ -2,6 +2,7 @@ from collections.abc import Iterable from typing import ( Literal as L, overload, + TypeAlias, TypeVar, Any, SupportsIndex, @@ -45,8 +46,8 @@ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any]) _SCT = TypeVar("_SCT", bound=generic, covariant=True) _SCT2 = TypeVar("_SCT2", bound=generic, covariant=True) -_2Tuple = tuple[_T, _T] -_ModeKind = L["reduced", "complete", "r", "raw"] +_2Tuple: TypeAlias = tuple[_T, _T] +_ModeKind: TypeAlias = L["reduced", "complete", "r", "raw"] __all__: list[str] diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c index 37a41408be22..71a4c81edbf1 100644 --- a/numpy/linalg/lapack_lite/python_xerbla.c +++ b/numpy/linalg/lapack_lite/python_xerbla.c @@ -28,22 +28,16 @@ CBLAS_INT BLAS_FUNC(xerbla)(char *srname, CBLAS_INT *info) char buf[sizeof(format) + 6 + 4]; /* 6 for name, 4 for param. num. */ int len = 0; /* length of subroutine name*/ -#ifdef WITH_THREAD PyGILState_STATE save; -#endif while( len<6 && srname[len]!='\0' ) len++; while( len && srname[len-1]==' ' ) len--; -#ifdef WITH_THREAD save = PyGILState_Ensure(); -#endif PyOS_snprintf(buf, sizeof(buf), format, len, srname, (int)*info); PyErr_SetString(PyExc_ValueError, buf); -#ifdef WITH_THREAD PyGILState_Release(save); -#endif return 0; } diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c index 766dfa9527b1..85590ba687ca 100644 --- a/numpy/linalg/lapack_litemodule.c +++ b/numpy/linalg/lapack_litemodule.c @@ -409,5 +409,10 @@ PyMODINIT_FUNC PyInit_lapack_lite(void) PyDict_SetItemString(d, "_ilp64", Py_False); #endif +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/linalg/meson.build b/numpy/linalg/meson.build index 104808ab5a1d..740c9f56c6fa 100644 --- a/numpy/linalg/meson.build +++ b/numpy/linalg/meson.build @@ -57,5 +57,6 @@ py.install_sources( 'tests/test_linalg.py', 'tests/test_regression.py', ], - subdir: 'numpy/linalg/tests' + subdir: 'numpy/linalg/tests', + install_tag: 'tests' ) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 02e94354399d..ffd9550e7c1d 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -4,6 +4,7 @@ import os import sys import itertools +import threading import traceback import textwrap import subprocess @@ -1943,7 +1944,9 @@ def test_generalized_raise_multiloop(): assert_raises(np.linalg.LinAlgError, np.linalg.inv, x) - +@pytest.mark.skipif( + threading.active_count() > 1, + reason="skipping test that uses fork because there are multiple threads") def test_xerbla_override(): # Check that our xerbla has been successfully linked in. If it is not, # the default xerbla routine is called, which prints a message to stdout @@ -2307,6 +2310,14 @@ def test_cross(): assert_equal(actual, expected) + # We test that lists are converted to arrays. + u = [1, 2, 3] + v = [4, 5, 6] + actual = np.linalg.cross(u, v) + expected = array([-3, 6, -3]) + + assert_equal(actual, expected) + with assert_raises_regex( ValueError, r"input arrays must be \(arrays of\) 3-dimensional vectors" diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 8cac195aa864..91051c0eca4f 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -161,3 +161,18 @@ def test_matrix_rank_rtol_argument(self, rtol): x = np.zeros((4, 3, 2)) res = np.linalg.matrix_rank(x, rtol=rtol) assert res.shape == (4,) + + def test_openblas_threading(self): + # gh-27036 + # Test whether matrix multiplication involving a large matrix always + # gives the same (correct) answer + x = np.arange(500000, dtype=np.float64) + src = np.vstack((x, -10*x)).T + matrix = np.array([[0, 1], [1, 0]]) + expected = np.vstack((-10*x, x)).T # src @ matrix + for i in range(200): + result = src @ matrix + mismatches = (~np.isclose(result, expected)).sum() + if mismatches != 0: + assert False, ("unexpected result from matmul, " + "probably due to OpenBLAS threading issues") diff --git a/numpy/linalg/umath_linalg.cpp b/numpy/linalg/umath_linalg.cpp index cd5ddddaa8d7..ead6d84a73a2 100644 --- a/numpy/linalg/umath_linalg.cpp +++ b/numpy/linalg/umath_linalg.cpp @@ -8,13 +8,12 @@ #define PY_SSIZE_T_CLEAN #include +#define NPY_TARGET_VERSION NPY_2_1_API_VERSION #define NPY_NO_DEPRECATED_API NPY_API_VERSION #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "numpy/npy_math.h" -#include "npy_pycompat.h" - #include "npy_config.h" #include "npy_cblas.h" @@ -29,6 +28,15 @@ static const char* umath_linalg_version_string = "0.1.5"; +// global lock to serialize calls into lapack_lite +#if !HAVE_EXTERNAL_LAPACK +#if PY_VERSION_HEX < 0x30d00b3 +static PyThread_type_lock lapack_lite_lock; +#else +static PyMutex lapack_lite_lock = {0}; +#endif +#endif + /* **************************************************************************** * Debugging support * @@ -402,6 +410,18 @@ FNAME(zgemm)(char *transa, char *transb, #define LAPACK(FUNC) \ FNAME(FUNC) +#ifdef HAVE_EXTERNAL_LAPACK + #define LOCK_LAPACK_LITE + #define UNLOCK_LAPACK_LITE +#else +#if PY_VERSION_HEX < 0x30d00b3 + #define LOCK_LAPACK_LITE PyThread_acquire_lock(lapack_lite_lock, WAIT_LOCK) + #define UNLOCK_LAPACK_LITE PyThread_release_lock(lapack_lite_lock) +#else + #define LOCK_LAPACK_LITE PyMutex_Lock(&lapack_lite_lock) + #define UNLOCK_LAPACK_LITE PyMutex_Unlock(&lapack_lite_lock) +#endif +#endif /* ***************************************************************************** @@ -541,39 +561,33 @@ const f2c_doublecomplex numeric_limits::nan = {NPY_NAN, NPY_N * column_strides: the number of bytes between consecutive columns. * output_lead_dim: BLAS/LAPACK-side leading dimension, in elements */ -typedef struct linearize_data_struct +struct linearize_data { npy_intp rows; npy_intp columns; npy_intp row_strides; npy_intp column_strides; npy_intp output_lead_dim; -} LINEARIZE_DATA_t; +}; -static inline void -init_linearize_data_ex(LINEARIZE_DATA_t *lin_data, - npy_intp rows, +static inline +linearize_data init_linearize_data_ex(npy_intp rows, npy_intp columns, npy_intp row_strides, npy_intp column_strides, npy_intp output_lead_dim) { - lin_data->rows = rows; - lin_data->columns = columns; - lin_data->row_strides = row_strides; - lin_data->column_strides = column_strides; - lin_data->output_lead_dim = output_lead_dim; + return {rows, columns, row_strides, column_strides, output_lead_dim}; } -static inline void -init_linearize_data(LINEARIZE_DATA_t *lin_data, - npy_intp rows, +static inline +linearize_data init_linearize_data(npy_intp rows, npy_intp columns, npy_intp row_strides, npy_intp column_strides) { - init_linearize_data_ex( - lin_data, rows, columns, row_strides, column_strides, columns); + return init_linearize_data_ex( + rows, columns, row_strides, column_strides, columns); } #if _UMATH_LINALG_DEBUG @@ -603,7 +617,7 @@ dump_ufunc_object(PyUFuncObject* ufunc) } static inline void -dump_linearize_data(const char* name, const LINEARIZE_DATA_t* params) +dump_linearize_data(const char* name, const linearize_data* params) { TRACE_TXT("\n\t%s rows: %zd columns: %zd"\ "\n\t\trow_strides: %td column_strides: %td"\ @@ -845,7 +859,7 @@ template static inline void * linearize_matrix(typ *dst, typ *src, - const LINEARIZE_DATA_t* data) + const linearize_data* data) { using ftyp = fortran_type_t; if (dst) { @@ -890,7 +904,7 @@ template static inline void * delinearize_matrix(typ *dst, typ *src, - const LINEARIZE_DATA_t* data) + const linearize_data* data) { using ftyp = fortran_type_t; @@ -937,7 +951,7 @@ using ftyp = fortran_type_t; template static inline void -nan_matrix(typ *dst, const LINEARIZE_DATA_t* data) +nan_matrix(typ *dst, const linearize_data* data) { int i, j; for (i = 0; i < data->rows; i++) { @@ -953,7 +967,7 @@ nan_matrix(typ *dst, const LINEARIZE_DATA_t* data) template static inline void -zero_matrix(typ *dst, const LINEARIZE_DATA_t* data) +zero_matrix(typ *dst, const linearize_data* data) { int i, j; for (i = 0; i < data->rows; i++) { @@ -1118,7 +1132,9 @@ using ftyp = fortran_type_t; fortran_int lda = fortran_int_max(m, 1); int i; /* note: done in place */ + LOCK_LAPACK_LITE; getrf(&m, &m, (ftyp*)src, &lda, pivots, &info); + UNLOCK_LAPACK_LITE; if (info == 0) { int change_sign = 0; @@ -1168,9 +1184,8 @@ slogdet(char **args, tmp_buff = (char *)malloc(matrix_size + pivot_size); if (tmp_buff) { - LINEARIZE_DATA_t lin_data; /* swapped steps to get matrix in FORTRAN order */ - init_linearize_data(&lin_data, m, m, steps[1], steps[0]); + linearize_data lin_data = init_linearize_data(m, m, steps[1], steps[0]); BEGIN_OUTER_LOOP_3 linearize_matrix((typ*)tmp_buff, (typ*)args[0], &lin_data); slogdet_single_element(m, @@ -1220,11 +1235,11 @@ det(char **args, tmp_buff = (char *)malloc(matrix_size + pivot_size); if (tmp_buff) { - LINEARIZE_DATA_t lin_data; + /* swapped steps to get matrix in FORTRAN order */ + linearize_data lin_data = init_linearize_data(m, m, steps[1], steps[0]); + typ sign; basetyp logdet; - /* swapped steps to get matrix in FORTRAN order */ - init_linearize_data(&lin_data, m, m, steps[1], steps[0]); BEGIN_OUTER_LOOP_2 linearize_matrix((typ*)tmp_buff, (typ*)args[0], &lin_data); @@ -1271,22 +1286,26 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(ssyevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, params->W, params->WORK, ¶ms->LWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dsyevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, params->W, params->WORK, ¶ms->LWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1373,12 +1392,14 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cheevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, (fortran_type_t*)params->A, ¶ms->LDA, params->W, (fortran_type_t*)params->WORK, ¶ms->LWORK, params->RWORK, ¶ms->LRWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1386,12 +1407,14 @@ static inline fortran_int call_evd(EIGH_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zheevd)(¶ms->JOBZ, ¶ms->UPLO, ¶ms->N, (fortran_type_t*)params->A, ¶ms->LDA, params->W, (fortran_type_t*)params->WORK, ¶ms->LWORK, params->RWORK, ¶ms->LRWORK, params->IWORK, ¶ms->LIWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1524,20 +1547,11 @@ eigh_wrapper(char JOBZ, JOBZ, UPLO, (fortran_int)dimensions[0], dispatch_scalar())) { - LINEARIZE_DATA_t matrix_in_ld; - LINEARIZE_DATA_t eigenvectors_out_ld; - LINEARIZE_DATA_t eigenvalues_out_ld; - - init_linearize_data(&matrix_in_ld, - eigh_params.N, eigh_params.N, - steps[1], steps[0]); - init_linearize_data(&eigenvalues_out_ld, - 1, eigh_params.N, - 0, steps[2]); + linearize_data matrix_in_ld = init_linearize_data(eigh_params.N, eigh_params.N, steps[1], steps[0]); + linearize_data eigenvalues_out_ld = init_linearize_data(1, eigh_params.N, 0, steps[2]); + linearize_data eigenvectors_out_ld = {}; /* silence uninitialized warning */ if ('V' == eigh_params.JOBZ) { - init_linearize_data(&eigenvectors_out_ld, - eigh_params.N, eigh_params.N, - steps[4], steps[3]); + eigenvectors_out_ld = init_linearize_data(eigh_params.N, eigh_params.N, steps[4], steps[3]); } for (iter = 0; iter < outer_dim; ++iter) { @@ -1634,11 +1648,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1646,11 +1662,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1658,11 +1676,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1670,11 +1690,13 @@ static inline fortran_int call_gesv(GESV_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgesv)(¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->IPIV, params->B, ¶ms->LDB, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1741,11 +1763,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; nrhs = (fortran_int)dimensions[1]; if (init_gesv(¶ms, n, nrhs)) { - LINEARIZE_DATA_t a_in, b_in, r_out; - - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&b_in, nrhs, n, steps[3], steps[2]); - init_linearize_data(&r_out, nrhs, n, steps[5], steps[4]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data b_in = init_linearize_data(nrhs, n, steps[3], steps[2]); + linearize_data r_out = init_linearize_data(nrhs, n, steps[5], steps[4]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -1780,10 +1800,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; if (init_gesv(¶ms, n, 1)) { - LINEARIZE_DATA_t a_in, b_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&b_in, 1, n, 1, steps[2]); - init_linearize_data(&r_out, 1, n, 1, steps[3]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data b_in = init_linearize_data(1, n, 1, steps[2]); + linearize_data r_out = init_linearize_data(1, n, 1, steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -1817,9 +1836,8 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[0]; if (init_gesv(¶ms, n, n)) { - LINEARIZE_DATA_t a_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&r_out, n, n, steps[3], steps[2]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data r_out = init_linearize_data(n, n, steps[3], steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; @@ -1892,9 +1910,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(spotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1902,9 +1922,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1912,9 +1934,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1922,9 +1946,11 @@ static inline fortran_int call_potrf(POTR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zpotrf)(¶ms->UPLO, ¶ms->N, params->A, ¶ms->LDA, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -1978,9 +2004,8 @@ cholesky(char uplo, char **args, npy_intp const *dimensions, npy_intp const *ste n = (fortran_int)dimensions[0]; if (init_potrf(¶ms, uplo, n)) { - LINEARIZE_DATA_t a_in, r_out; - init_linearize_data(&a_in, n, n, steps[1], steps[0]); - init_linearize_data(&r_out, n, n, steps[3], steps[2]); + linearize_data a_in = init_linearize_data(n, n, steps[1], steps[0]); + linearize_data r_out = init_linearize_data(n, n, steps[3], steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; linearize_matrix(params.A, (ftyp*)args[0], &a_in); @@ -2096,6 +2121,7 @@ static inline fortran_int call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->WR, params->WI, @@ -2103,6 +2129,7 @@ call_geev(GEEV_PARAMS_t* params) params->VRR, ¶ms->LDVR, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2110,6 +2137,7 @@ static inline fortran_int call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->WR, params->WI, @@ -2117,6 +2145,7 @@ call_geev(GEEV_PARAMS_t* params) params->VRR, ¶ms->LDVR, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2308,6 +2337,7 @@ call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->W, @@ -2316,6 +2346,7 @@ call_geev(GEEV_PARAMS_t* params) params->WORK, ¶ms->LWORK, params->WR, /* actually RWORK */ &rv); + UNLOCK_LAPACK_LITE; return rv; } #endif @@ -2325,6 +2356,7 @@ call_geev(GEEV_PARAMS_t* params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgeev)(¶ms->JOBVL, ¶ms->JOBVR, ¶ms->N, params->A, ¶ms->LDA, params->W, @@ -2333,6 +2365,7 @@ call_geev(GEEV_PARAMS_t* params) params->WORK, ¶ms->LWORK, params->WR, /* actually RWORK */ &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2465,27 +2498,25 @@ eig_wrapper(char JOBVL, if (init_geev(&geev_params, JOBVL, JOBVR, (fortran_int)dimensions[0], dispatch_scalar())) { - LINEARIZE_DATA_t a_in; - LINEARIZE_DATA_t w_out; - LINEARIZE_DATA_t vl_out; - LINEARIZE_DATA_t vr_out; + linearize_data vl_out = {}; /* silence uninitialized warning */ + linearize_data vr_out = {}; /* silence uninitialized warning */ - init_linearize_data(&a_in, + linearize_data a_in = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); steps += 2; - init_linearize_data(&w_out, + linearize_data w_out = init_linearize_data( 1, geev_params.N, 0, steps[0]); steps += 1; if ('V' == geev_params.JOBVL) { - init_linearize_data(&vl_out, + vl_out = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); steps += 2; } if ('V' == geev_params.JOBVR) { - init_linearize_data(&vr_out, + vr_out = init_linearize_data( geev_params.N, geev_params.N, steps[1], steps[0]); } @@ -2657,6 +2688,7 @@ static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2665,12 +2697,14 @@ call_gesdd(GESDD_PARAMS_t *params) params->WORK, ¶ms->LWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2679,6 +2713,7 @@ call_gesdd(GESDD_PARAMS_t *params) params->WORK, ¶ms->LWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2785,6 +2820,7 @@ static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2794,12 +2830,14 @@ call_gesdd(GESDD_PARAMS_t *params) params->RWORK, params->IWORK, &rv); + LOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gesdd(GESDD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgesdd)(¶ms->JOBZ, ¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->S, @@ -2809,6 +2847,7 @@ call_gesdd(GESDD_PARAMS_t *params) params->RWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -2953,13 +2992,13 @@ using basetyp = basetype_t; (fortran_int)dimensions[0], (fortran_int)dimensions[1], dispatch_scalar())) { - LINEARIZE_DATA_t a_in, u_out = {}, s_out = {}, v_out = {}; + linearize_data u_out = {}, s_out = {}, v_out = {}; fortran_int min_m_n = params.M < params.N ? params.M : params.N; - init_linearize_data(&a_in, params.N, params.M, steps[1], steps[0]); + linearize_data a_in = init_linearize_data(params.N, params.M, steps[1], steps[0]); if ('N' == params.JOBZ) { /* only the singular values are wanted */ - init_linearize_data(&s_out, 1, min_m_n, 0, steps[2]); + s_out = init_linearize_data(1, min_m_n, 0, steps[2]); } else { fortran_int u_columns, v_rows; if ('S' == params.JOBZ) { @@ -2969,13 +3008,13 @@ dispatch_scalar())) { u_columns = params.M; v_rows = params.N; } - init_linearize_data(&u_out, + u_out = init_linearize_data( u_columns, params.M, steps[3], steps[2]); - init_linearize_data(&s_out, + s_out = init_linearize_data( 1, min_m_n, 0, steps[4]); - init_linearize_data(&v_out, + v_out = init_linearize_data( params.N, v_rows, steps[6], steps[5]); } @@ -3099,22 +3138,26 @@ static inline fortran_int call_geqrf(GEQRF_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgeqrf)(¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_geqrf(GEQRF_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgeqrf)(¶ms->M, ¶ms->N, params->A, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3296,10 +3339,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[1]; if (init_geqrf(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_out; - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_out, 1, fortran_int_min(m, n), 1, steps[2]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_out = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); BEGIN_OUTER_LOOP_2 int not_ok; @@ -3342,22 +3384,26 @@ static inline fortran_int call_gqr(GQR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dorgqr)(¶ms->M, ¶ms->MC, ¶ms->MN, params->Q, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } static inline fortran_int call_gqr(GQR_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zungqr)(¶ms->M, ¶ms->MC, ¶ms->MN, params->Q, ¶ms->LDA, params->TAU, params->WORK, ¶ms->LWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3590,11 +3636,9 @@ using ftyp = fortran_type_t; n = (fortran_int)dimensions[1]; if (init_gqr(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_in, q_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_in, 1, fortran_int_min(m, n), 1, steps[2]); - init_linearize_data(&q_out, fortran_int_min(m, n), m, steps[4], steps[3]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_in = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); + linearize_data q_out = init_linearize_data(fortran_int_min(m, n), m, steps[4], steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -3646,11 +3690,9 @@ using ftyp = fortran_type_t; if (init_gqr_complete(¶ms, m, n)) { - LINEARIZE_DATA_t a_in, tau_in, q_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data(&tau_in, 1, fortran_int_min(m, n), 1, steps[2]); - init_linearize_data(&q_out, m, m, steps[4], steps[3]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data tau_in = init_linearize_data(1, fortran_int_min(m, n), 1, steps[2]); + linearize_data q_out = init_linearize_data(m, m, steps[4], steps[3]); BEGIN_OUTER_LOOP_3 int not_ok; @@ -3742,6 +3784,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(sgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3750,6 +3793,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3758,6 +3802,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(dgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3766,6 +3811,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3869,6 +3915,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(cgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3877,6 +3924,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->RWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -3884,6 +3932,7 @@ static inline fortran_int call_gelsd(GELSD_PARAMS_t *params) { fortran_int rv; + LOCK_LAPACK_LITE; LAPACK(zgelsd)(¶ms->M, ¶ms->N, ¶ms->NRHS, params->A, ¶ms->LDA, params->B, ¶ms->LDB, @@ -3892,6 +3941,7 @@ call_gelsd(GELSD_PARAMS_t *params) params->WORK, ¶ms->LWORK, params->RWORK, (fortran_int*)params->IWORK, &rv); + UNLOCK_LAPACK_LITE; return rv; } @@ -4053,13 +4103,11 @@ using basetyp = basetype_t; excess = m - n; if (init_gelsd(¶ms, m, n, nrhs, dispatch_scalar{})) { - LINEARIZE_DATA_t a_in, b_in, x_out, s_out, r_out; - - init_linearize_data(&a_in, n, m, steps[1], steps[0]); - init_linearize_data_ex(&b_in, nrhs, m, steps[3], steps[2], fortran_int_max(n, m)); - init_linearize_data_ex(&x_out, nrhs, n, steps[5], steps[4], fortran_int_max(n, m)); - init_linearize_data(&r_out, 1, nrhs, 1, steps[6]); - init_linearize_data(&s_out, 1, fortran_int_min(n, m), 1, steps[7]); + linearize_data a_in = init_linearize_data(n, m, steps[1], steps[0]); + linearize_data b_in = init_linearize_data_ex(nrhs, m, steps[3], steps[2], fortran_int_max(n, m)); + linearize_data x_out = init_linearize_data_ex(nrhs, n, steps[5], steps[4], fortran_int_max(n, m)); + linearize_data r_out = init_linearize_data(1, nrhs, 1, steps[6]); + linearize_data s_out = init_linearize_data(1, fortran_int_min(n, m), 1, steps[7]); BEGIN_OUTER_LOOP_7 int not_ok; @@ -4217,14 +4265,14 @@ GUFUNC_FUNC_ARRAY_REAL_COMPLEX__(lstsq); GUFUNC_FUNC_ARRAY_EIG(eig); GUFUNC_FUNC_ARRAY_EIG(eigvals); -static char equal_2_types[] = { +static const char equal_2_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_CDOUBLE, NPY_CDOUBLE }; -static char equal_3_types[] = { +static const char equal_3_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_CFLOAT, @@ -4232,47 +4280,47 @@ static char equal_3_types[] = { }; /* second result is logdet, that will always be a REAL */ -static char slogdet_types[] = { +static const char slogdet_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE }; -static char eigh_types[] = { +static const char eigh_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE }; -static char eighvals_types[] = { +static const char eighvals_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; -static char eig_types[] = { +static const char eig_types[] = { NPY_FLOAT, NPY_CFLOAT, NPY_CFLOAT, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE }; -static char eigvals_types[] = { +static const char eigvals_types[] = { NPY_FLOAT, NPY_CFLOAT, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE }; -static char svd_1_1_types[] = { +static const char svd_1_1_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_FLOAT, NPY_CDOUBLE, NPY_DOUBLE }; -static char svd_1_3_types[] = { +static const char svd_1_3_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, @@ -4280,31 +4328,85 @@ static char svd_1_3_types[] = { }; /* A, tau */ -static char qr_r_raw_types[] = { +static const char qr_r_raw_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, tau, q */ -static char qr_reduced_types[] = { +static const char qr_reduced_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, tau, q */ -static char qr_complete_types[] = { +static const char qr_complete_types[] = { NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, NPY_CDOUBLE, }; /* A, b, rcond, x, resid, rank, s, */ -static char lstsq_types[] = { +static const char lstsq_types[] = { NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE, NPY_CFLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_CFLOAT, NPY_FLOAT, NPY_INT, NPY_FLOAT, NPY_CDOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_CDOUBLE, NPY_DOUBLE, NPY_INT, NPY_DOUBLE, }; +/* + * Function to process core dimensions of a gufunc with two input core + * dimensions m and n, and one output core dimension p which must be + * min(m, n). The parameters m_index, n_index and p_index indicate + * the locations of the core dimensions in core_dims[]. + */ +static int +mnp_min_indexed_process_core_dims(PyUFuncObject *gufunc, + npy_intp core_dims[], + npy_intp m_index, + npy_intp n_index, + npy_intp p_index) +{ + npy_intp m = core_dims[m_index]; + npy_intp n = core_dims[n_index]; + npy_intp p = core_dims[p_index]; + npy_intp required_p = m > n ? n : m; /* min(m, n) */ + if (p == -1) { + core_dims[p_index] = required_p; + return 0; + } + if (p != required_p) { + PyErr_Format(PyExc_ValueError, + "core output dimension p must be min(m, n), where " + "m and n are the core dimensions of the inputs. Got " + "m=%zd and n=%zd, so p must be %zd, but got p=%zd.", + m, n, required_p, p); + return -1; + } + return 0; +} + +/* + * Function to process core dimensions of a gufunc with two input core + * dimensions m and n, and one output core dimension p which must be + * min(m, n). There can be only those three core dimensions in the + * gufunc shape signature. + */ +static int +mnp_min_process_core_dims(PyUFuncObject *gufunc, npy_intp core_dims[]) +{ + return mnp_min_indexed_process_core_dims(gufunc, core_dims, 0, 1, 2); +} + +/* + * Process the core dimensions for the lstsq gufunc. + */ +static int +lstsq_process_core_dims(PyUFuncObject *gufunc, npy_intp core_dims[]) +{ + return mnp_min_indexed_process_core_dims(gufunc, core_dims, 0, 1, 3); +} + + typedef struct gufunc_descriptor_struct { const char *name; const char *signature; @@ -4313,7 +4415,8 @@ typedef struct gufunc_descriptor_struct { int nin; int nout; PyUFuncGenericFunction *funcs; - char *types; + const char *types; + PyUFunc_ProcessCoreDimsFunc *process_core_dims_func; } GUFUNC_DESCRIPTOR_t; GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { @@ -4326,7 +4429,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(),()\" \n", 4, 1, 2, FUNC_ARRAY_NAME(slogdet), - slogdet_types + slogdet_types, + nullptr }, { "det", @@ -4335,7 +4439,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->()\" \n", 4, 1, 1, FUNC_ARRAY_NAME(det), - equal_2_types + equal_2_types, + nullptr }, { "eigh_lo", @@ -4347,7 +4452,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 4, 1, 2, FUNC_ARRAY_NAME(eighlo), - eigh_types + eigh_types, + nullptr }, { "eigh_up", @@ -4359,7 +4465,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 4, 1, 2, FUNC_ARRAY_NAME(eighup), - eigh_types + eigh_types, + nullptr }, { "eigvalsh_lo", @@ -4371,7 +4478,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(eigvalshlo), - eighvals_types + eighvals_types, + nullptr }, { "eigvalsh_up", @@ -4383,7 +4491,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(eigvalshup), - eighvals_types + eighvals_types, + nullptr }, { "solve", @@ -4394,7 +4503,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m),(m,n)->(m,n)\" \n", 4, 2, 1, FUNC_ARRAY_NAME(solve), - equal_3_types + equal_3_types, + nullptr }, { "solve1", @@ -4405,7 +4515,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m),(m)->(m)\" \n", 4, 2, 1, FUNC_ARRAY_NAME(solve1), - equal_3_types + equal_3_types, + nullptr }, { "inv", @@ -4416,7 +4527,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\" \n", 4, 1, 1, FUNC_ARRAY_NAME(inv), - equal_2_types + equal_2_types, + nullptr }, { "cholesky_lo", @@ -4426,7 +4538,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\"\n", 4, 1, 1, FUNC_ARRAY_NAME(cholesky_lo), - equal_2_types + equal_2_types, + nullptr }, { "cholesky_up", @@ -4436,55 +4549,36 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m,m)\"\n", 4, 1, 1, FUNC_ARRAY_NAME(cholesky_up), - equal_2_types + equal_2_types, + nullptr }, { - "svd_m", - "(m,n)->(m)", - "svd when n>=m. ", + "svd", + "(m,n)->(p)", + "Singular values of array with shape (m, n).\n" + "Return value is 1-d array with shape (min(m, n),).", 4, 1, 1, FUNC_ARRAY_NAME(svd_N), - svd_1_1_types + svd_1_1_types, + mnp_min_process_core_dims }, { - "svd_n", - "(m,n)->(n)", - "svd when n<=m", - 4, 1, 1, - FUNC_ARRAY_NAME(svd_N), - svd_1_1_types - }, - { - "svd_m_s", - "(m,n)->(m,m),(m),(m,n)", - "svd when m<=n", - 4, 1, 3, - FUNC_ARRAY_NAME(svd_S), - svd_1_3_types - }, - { - "svd_n_s", - "(m,n)->(m,n),(n),(n,n)", - "svd when m>=n", + "svd_s", + "(m,n)->(m,p),(p),(p,n)", + "svd (full_matrices=False)", 4, 1, 3, FUNC_ARRAY_NAME(svd_S), - svd_1_3_types - }, - { - "svd_m_f", - "(m,n)->(m,m),(m),(n,n)", - "svd when m<=n", - 4, 1, 3, - FUNC_ARRAY_NAME(svd_A), - svd_1_3_types + svd_1_3_types, + mnp_min_process_core_dims }, { - "svd_n_f", - "(m,n)->(m,m),(n),(n,n)", - "svd when m>=n", + "svd_f", + "(m,n)->(m,m),(p),(n,n)", + "svd (full_matrices=True)", 4, 1, 3, FUNC_ARRAY_NAME(svd_A), - svd_1_3_types + svd_1_3_types, + mnp_min_process_core_dims }, { "eig", @@ -4495,7 +4589,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { " \"(m,m)->(m),(m,m)\" \n", 3, 1, 2, FUNC_ARRAY_NAME(eig), - eig_types + eig_types, + nullptr }, { "eigvals", @@ -4504,25 +4599,18 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "Results in a vector of eigenvalues. \n", 3, 1, 1, FUNC_ARRAY_NAME(eigvals), - eigvals_types + eigvals_types, + nullptr }, { - "qr_r_raw_m", - "(m,n)->(m)", + "qr_r_raw", + "(m,n)->(p)", "Compute TAU vector for the last two dimensions \n"\ - "and broadcast to the rest. For m <= n. \n", - 2, 1, 1, - FUNC_ARRAY_NAME(qr_r_raw), - qr_r_raw_types - }, - { - "qr_r_raw_n", - "(m,n)->(n)", - "Compute TAU vector for the last two dimensions \n"\ - "and broadcast to the rest. For m > n. \n", + "and broadcast to the rest. \n", 2, 1, 1, FUNC_ARRAY_NAME(qr_r_raw), - qr_r_raw_types + qr_r_raw_types, + mnp_min_process_core_dims }, { "qr_reduced", @@ -4531,7 +4619,8 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "and broadcast to the rest. \n", 2, 2, 1, FUNC_ARRAY_NAME(qr_reduced), - qr_reduced_types + qr_reduced_types, + nullptr }, { "qr_complete", @@ -4540,37 +4629,30 @@ GUFUNC_DESCRIPTOR_t gufunc_descriptors [] = { "and broadcast to the rest. For m > n. \n", 2, 2, 1, FUNC_ARRAY_NAME(qr_complete), - qr_complete_types - }, - { - "lstsq_m", - "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(m)", - "least squares on the last two dimensions and broadcast to the rest. \n"\ - "For m <= n. \n", - 4, 3, 4, - FUNC_ARRAY_NAME(lstsq), - lstsq_types + qr_complete_types, + nullptr }, { - "lstsq_n", - "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(n)", - "least squares on the last two dimensions and broadcast to the rest. \n"\ - "For m >= n, meaning that residuals are produced. \n", + "lstsq", + "(m,n),(m,nrhs),()->(n,nrhs),(nrhs),(),(p)", + "least squares on the last two dimensions and broadcast to the rest.", 4, 3, 4, FUNC_ARRAY_NAME(lstsq), - lstsq_types + lstsq_types, + lstsq_process_core_dims } }; static int addUfuncs(PyObject *dictionary) { - PyObject *f; + PyUFuncObject *f; int i; const int gufunc_count = sizeof(gufunc_descriptors)/ sizeof(gufunc_descriptors[0]); for (i = 0; i < gufunc_count; i++) { GUFUNC_DESCRIPTOR_t* d = &gufunc_descriptors[i]; - f = PyUFunc_FromFuncAndDataAndSignature(d->funcs, + f = (PyUFuncObject *) PyUFunc_FromFuncAndDataAndSignature( + d->funcs, array_of_nulls, d->types, d->ntypes, @@ -4584,10 +4666,11 @@ addUfuncs(PyObject *dictionary) { if (f == NULL) { return -1; } + f->process_core_dims_func = d->process_core_dims_func; #if _UMATH_LINALG_DEBUG dump_ufunc_object((PyUFuncObject*) f); #endif - int ret = PyDict_SetItemString(dictionary, d->name, f); + int ret = PyDict_SetItemString(dictionary, d->name, (PyObject *)f); Py_DECREF(f); if (ret < 0) { return -1; @@ -4599,7 +4682,7 @@ addUfuncs(PyObject *dictionary) { /* -------------------------------------------------------------------------- */ - /* Module initialization stuff */ + /* Module initialization and state */ static PyMethodDef UMath_LinAlgMethods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ @@ -4651,11 +4734,24 @@ PyMODINIT_FUNC PyInit__umath_linalg(void) return NULL; } +#if PY_VERSION_HEX < 0x30d00b3 && !HAVE_EXTERNAL_LAPACK + lapack_lite_lock = PyThread_allocate_lock(); + if (lapack_lite_lock == NULL) { + PyErr_NoMemory(); + return NULL; + } +#endif + #ifdef HAVE_BLAS_ILP64 PyDict_SetItemString(d, "_ilp64", Py_True); #else PyDict_SetItemString(d, "_ilp64", Py_False); #endif +#if Py_GIL_DISABLED + // signal this module supports running with the GIL disabled + PyUnstable_Module_SetGIL(m, Py_MOD_GIL_NOT_USED); +#endif + return m; } diff --git a/numpy/ma/__init__.py b/numpy/ma/__init__.py index 870cc4ef2daa..03e9fcd075cc 100644 --- a/numpy/ma/__init__.py +++ b/numpy/ma/__init__.py @@ -22,8 +22,8 @@ >>> m = np.ma.masked_array(x, np.isnan(x)) >>> m -masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --], - mask = [False False False True False False False True], +masked_array(data=[2.0, 1.0, 3.0, --, 5.0, 2.0, 3.0, --], + mask=[False, False, False, True, False, False, False, True], fill_value=1e+20) Here, we construct a masked array that suppress all ``NaN`` values. We diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 8316b481e827..01eb8f9415a9 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -37,6 +37,7 @@ from numpy import array as narray, expand_dims, iinfo, finfo from numpy._core.numeric import normalize_axis_tuple from numpy._utils._inspect import getargspec, formatargspec +from numpy._utils import set_module __all__ = [ @@ -285,6 +286,7 @@ def default_fill_value(obj): Examples -------- + >>> import numpy as np >>> np.ma.default_fill_value(1) 999999 >>> np.ma.default_fill_value(np.array([1.1, 2., np.pi])) @@ -347,6 +349,7 @@ def minimum_fill_value(obj): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.minimum_fill_value(a) @@ -398,6 +401,7 @@ def maximum_fill_value(obj): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.int8() >>> ma.maximum_fill_value(a) @@ -524,6 +528,7 @@ def set_fill_value(a, fill_value): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5) >>> a @@ -592,6 +597,7 @@ def common_fill_value(a, b): Examples -------- + >>> import numpy as np >>> x = np.ma.array([0, 1.], fill_value=3) >>> y = np.ma.array([0, 1.], fill_value=3) >>> np.ma.common_fill_value(x, y) @@ -607,9 +613,12 @@ def common_fill_value(a, b): def filled(a, fill_value=None): """ - Return input as an array with masked data replaced by a fill value. + Return input as an `~numpy.ndarray`, with masked values replaced by + `fill_value`. If `a` is not a `MaskedArray`, `a` itself is returned. + If `a` is a `MaskedArray` with no masked values, then ``a.data`` is + returned. If `a` is a `MaskedArray` and `fill_value` is None, `fill_value` is set to ``a.fill_value``. @@ -633,6 +642,7 @@ def filled(a, fill_value=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], @@ -712,6 +722,7 @@ def getdata(a, subok=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -775,6 +786,7 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x masked_array(data=[--, -1.0, nan, inf], @@ -876,7 +888,7 @@ def __call__(self, a, b): self.tolerance = np.finfo(float).tiny # don't call ma ufuncs from __array_wrap__ which would fail for scalars a, b = np.asarray(a), np.asarray(b) - with np.errstate(invalid='ignore'): + with np.errstate(all='ignore'): return umath.absolute(a) * self.tolerance >= umath.absolute(b) @@ -1367,6 +1379,7 @@ def make_mask_descr(ndtype): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], ... 'formats':[np.float32, np.int64]}) @@ -1401,6 +1414,7 @@ def getmask(a): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -1463,6 +1477,7 @@ def getmaskarray(arr): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a @@ -1520,6 +1535,7 @@ def is_mask(m): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m @@ -1604,6 +1620,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> m = [True, False, True, True] >>> ma.make_mask(m) @@ -1692,6 +1709,7 @@ def make_mask_none(newshape, dtype=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> ma.make_mask_none((3,)) array([False, False, False]) @@ -1754,6 +1772,7 @@ def mask_or(m1, m2, copy=False, shrink=True): Examples -------- + >>> import numpy as np >>> m1 = np.ma.make_mask([0, 1, 1, 0]) >>> m2 = np.ma.make_mask([1, 0, 0, 0]) >>> np.ma.mask_or(m1, m2) @@ -1797,6 +1816,7 @@ def flatten_mask(mask): Examples -------- + >>> import numpy as np >>> mask = np.array([0, 0, 1]) >>> np.ma.flatten_mask(mask) array([False, False, True]) @@ -1886,6 +1906,7 @@ def masked_where(condition, a, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -1983,6 +2004,7 @@ def masked_greater(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2009,6 +2031,7 @@ def masked_greater_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2035,6 +2058,7 @@ def masked_less(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2061,6 +2085,7 @@ def masked_less_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2087,6 +2112,7 @@ def masked_not_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2117,6 +2143,7 @@ def masked_equal(x, value, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a @@ -2150,6 +2177,7 @@ def masked_inside(x, v1, v2, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) @@ -2190,6 +2218,7 @@ def masked_outside(x, v1, v2, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) @@ -2243,6 +2272,7 @@ def masked_object(x, value, copy=True, shrink=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food @@ -2319,6 +2349,7 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) @@ -2367,6 +2398,7 @@ def masked_invalid(a, copy=True): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(5, dtype=float) >>> a[2] = np.nan @@ -2528,6 +2560,7 @@ def flatten_structured_array(a): Examples -------- + >>> import numpy as np >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) >>> np.ma.flatten_structured_array(a) @@ -2633,10 +2666,11 @@ class MaskedIterator: Examples -------- + >>> import numpy as np >>> x = np.ma.array(arange(6).reshape(2, 3)) >>> fl = x.flat >>> type(fl) - + >>> for item in fl: ... print(item) ... @@ -2695,6 +2729,7 @@ def __next__(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([3, 2], mask=[0, 1]) >>> fl = x.flat >>> next(fl) @@ -2717,6 +2752,7 @@ def __next__(self): return d +@set_module("numpy.ma") class MaskedArray(ndarray): """ An array class with possibly masked values. @@ -2772,6 +2808,7 @@ class MaskedArray(ndarray): Examples -------- + >>> import numpy as np The ``mask`` can be initialized with an array of boolean values with the same shape as ``data``. @@ -3112,7 +3149,7 @@ def __array_wrap__(self, obj, context=None, return_scalar=False): input_args = args[:func.nin] m = reduce(mask_or, [getmaskarray(arg) for arg in input_args]) # Get the domain mask - domain = ufunc_domain.get(func, None) + domain = ufunc_domain.get(func) if domain is not None: # Take the domain, and make sure it's a ndarray with np.errstate(divide='ignore', invalid='ignore'): @@ -3627,6 +3664,7 @@ def hardmask(self): Examples -------- + >>> import numpy as np >>> x = np.arange(10) >>> m = np.ma.masked_array(x, x>5) >>> assert not m.hardmask @@ -3692,6 +3730,7 @@ def shrink_mask(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2 ], [3, 4]], mask=[0]*4) >>> x.mask array([[False, False], @@ -3752,6 +3791,7 @@ def fill_value(self): Examples -------- + >>> import numpy as np >>> for dt in [np.int32, np.int64, np.float64, np.complex128]: ... np.ma.array([0, 1], dtype=dt).get_fill_value() ... @@ -3836,6 +3876,7 @@ def filled(self, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() array([ 1, 2, -999, 4, -999]) @@ -3903,6 +3944,7 @@ def compressed(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(5), mask=[0]*2 + [1]*3) >>> x.compressed() array([0, 1]) @@ -3956,6 +3998,7 @@ def compress(self, condition, axis=None, out=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4529,6 +4572,7 @@ def imag(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.imag masked_array(data=[1.0, --, 1.6], @@ -4556,6 +4600,7 @@ def real(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], @@ -4698,6 +4743,7 @@ def ravel(self, order='C'): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4767,6 +4813,7 @@ def reshape(self, *s, **kwargs): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) >>> x masked_array( @@ -4842,6 +4889,7 @@ def put(self, indices, values, mode='raise'): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -4910,6 +4958,7 @@ def ids(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary @@ -4936,6 +4985,7 @@ def iscontiguous(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3]) >>> x.iscontiguous() True @@ -4970,6 +5020,7 @@ def all(self, axis=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) @@ -5064,6 +5115,7 @@ def nonzero(self): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x @@ -5190,6 +5242,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -5261,6 +5314,7 @@ def cumsum(self, axis=None, dtype=None, out=None): Examples -------- + >>> import numpy as np >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) >>> marr.cumsum() masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], @@ -5368,6 +5422,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], @@ -5430,6 +5485,7 @@ def anom(self, axis=None, dtype=None): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1,2,3]) >>> a.anom() masked_array(data=[-1., 0., 1.], @@ -5556,6 +5612,7 @@ def round(self, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.array([1.35, 2.5, 1.5, 1.75, 2.25, 2.75], ... mask=[0, 0, 0, 1, 0, 0]) @@ -5634,6 +5691,7 @@ def argsort(self, axis=np._NoValue, kind=None, order=None, endwith=True, Examples -------- + >>> import numpy as np >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a masked_array(data=[3, 2, --], @@ -5691,6 +5749,7 @@ def argmin(self, axis=None, fill_value=None, out=None, *, Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) >>> x @@ -5736,6 +5795,7 @@ def argmax(self, axis=None, fill_value=None, out=None, *, Examples -------- + >>> import numpy as np >>> a = np.arange(6).reshape(2,3) >>> a.argmax() 5 @@ -5799,6 +5859,7 @@ def sort(self, axis=-1, kind=None, order=None, endwith=True, Examples -------- + >>> import numpy as np >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() @@ -6084,6 +6145,7 @@ def ptp(self, axis=None, out=None, fill_value=None, keepdims=False): Examples -------- + >>> import numpy as np >>> x = np.ma.MaskedArray([[4, 9, 2, 10], ... [6, 9, 7, 12]]) @@ -6238,6 +6300,7 @@ def tolist(self, fill_value=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3], [4,5,6], [7,8,9]], mask=[0] + [1,0]*4) >>> x.tolist() [[1, None, 3], [None, 5, None], [7, None, 9]] @@ -6317,6 +6380,7 @@ def tobytes(self, fill_value=None, order='C'): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' @@ -6366,6 +6430,7 @@ def toflex(self): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( @@ -6626,6 +6691,7 @@ def isMaskedArray(x): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = np.eye(3, 3) >>> a @@ -6810,6 +6876,7 @@ def is_masked(x): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x @@ -7053,6 +7120,7 @@ def power(a, b, third=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7147,6 +7215,7 @@ def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None, *, Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -7186,6 +7255,7 @@ def compressed(x): Examples -------- + >>> import numpy as np Create an array with negative values masked: @@ -7234,6 +7304,7 @@ def concatenate(arrays, axis=0): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked @@ -7284,6 +7355,7 @@ def diag(v, k=0): Examples -------- + >>> import numpy as np Create an array with negative values masked: @@ -7332,6 +7404,33 @@ def left_shift(a, n): -------- numpy.left_shift + Examples + -------- + Shift with a masked array: + + >>> arr = np.ma.array([10, 20, 30], mask=[False, True, False]) + >>> np.ma.left_shift(arr, 1) + masked_array(data=[20, --, 60], + mask=[False, True, False], + fill_value=999999) + + Large shift: + + >>> np.ma.left_shift(10, 10) + masked_array(data=10240, + mask=False, + fill_value=999999) + + Shift with a scalar and an array: + + >>> scalar = 10 + >>> arr = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> np.ma.left_shift(scalar, arr) + masked_array(data=[20, --, 80], + mask=[False, True, False], + fill_value=999999) + + """ m = getmask(a) if m is nomask: @@ -7355,6 +7454,7 @@ def right_shift(a, n): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11, 3, 8, 1] >>> mask = [0, 0, 0, 1] @@ -7389,6 +7489,28 @@ def put(a, indices, values, mode='raise'): -------- MaskedArray.put + Examples + -------- + Putting values in a masked array: + + >>> a = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.put(a, [1, 3], [10, 30]) + >>> a + masked_array(data=[ 1, 10, 3, 30], + mask=False, + fill_value=999999) + + Using put with a 2D array: + + >>> b = np.ma.array([[1, 2], [3, 4]], mask=[[False, True], [False, False]]) + >>> np.ma.put(b, [[0, 1], [1, 0]], [[10, 20], [30, 40]]) + >>> b + masked_array( + data=[[40, 30], + [ 3, 4]], + mask=False, + fill_value=999999) + """ # We can't use 'frommethod', the order of arguments is different try: @@ -7415,6 +7537,7 @@ def putmask(a, mask, values): # , mode='raise'): Examples -------- + >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[1, 0], [0, 0]] >>> x = np.ma.array(arr, mask=mask) @@ -7465,6 +7588,7 @@ def transpose(a, axes=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked @@ -7501,6 +7625,37 @@ def reshape(a, new_shape, order='C'): -------- MaskedArray.reshape : equivalent function + Examples + -------- + Reshaping a 1-D array: + + >>> a = np.ma.array([1, 2, 3, 4]) + >>> np.ma.reshape(a, (2, 2)) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 2-D array: + + >>> b = np.ma.array([[1, 2], [3, 4]]) + >>> np.ma.reshape(b, (1, 4)) + masked_array(data=[[1, 2, 3, 4]], + mask=False, + fill_value=999999) + + Reshaping a 1-D array with a mask: + + >>> c = np.ma.array([1, 2, 3, 4], mask=[False, True, False, False]) + >>> np.ma.reshape(c, (2, 2)) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) + """ # We can't use 'frommethod', it whine about some parameters. Dmmit. try: @@ -7525,6 +7680,7 @@ def resize(x, new_shape): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked @@ -7666,6 +7822,7 @@ def diff(a, /, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.array([1, 2, 3, 4, 7, 0, 2, 3]) >>> x = np.ma.masked_where(a < 2, a) >>> np.ma.diff(x) @@ -7767,6 +7924,7 @@ def where(condition, x=_NoValue, y=_NoValue): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) @@ -7864,6 +8022,7 @@ def choose(indices, choices, out=None, mode='raise'): Examples -------- + >>> import numpy as np >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) @@ -7927,6 +8086,7 @@ def round_(a, decimals=0, out=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> x = [11.2, -3.973, 0.801, -1.41] >>> mask = [0, 0, 0, 1] @@ -8011,6 +8171,7 @@ def dot(a, b, strict=False, out=None): Examples -------- + >>> import numpy as np >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) >>> np.ma.dot(a, b) @@ -8139,6 +8300,37 @@ def correlate(a, v, mode='valid', propagate_mask=True): See Also -------- numpy.correlate : Equivalent function in the top-level NumPy module. + + Examples + -------- + Basic correlation: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid') + masked_array(data=[2], + mask=[False], + fill_value=999999) + + Correlation with masked elements: + + >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) + masked_array(data=[--], + mask=[ True], + fill_value=999999, + dtype=int64) + + Correlation with different modes and mixed array types: + + >>> a = np.ma.array([1, 2, 3]) + >>> v = np.ma.array([0, 1, 0]) + >>> np.ma.correlate(a, v, mode='full') + masked_array(data=[0, 1, 2, 3, 0], + mask=[False, False, False, False, False], + fill_value=999999) + """ return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask) @@ -8198,6 +8390,7 @@ def allequal(a, b, fill_value=True): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], @@ -8275,6 +8468,7 @@ def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): Examples -------- + >>> import numpy as np >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a masked_array(data=[10000000000.0, 1e-07, --], @@ -8367,6 +8561,7 @@ def asarray(a, dtype=None, order=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], @@ -8378,7 +8573,7 @@ def asarray(a, dtype=None, order=None): mask=False, fill_value=1e+20) >>> type(np.ma.asarray(x)) - + """ order = order or 'C' @@ -8414,6 +8609,7 @@ def asanyarray(a, dtype=None): Examples -------- + >>> import numpy as np >>> x = np.arange(10.).reshape(2, 5) >>> x array([[0., 1., 2., 3., 4.], @@ -8425,7 +8621,7 @@ def asanyarray(a, dtype=None): mask=False, fill_value=1e+20) >>> type(np.ma.asanyarray(x)) - + """ # workaround for #8666, to preserve identity. Ideally the bottom line @@ -8469,6 +8665,7 @@ def fromflex(fxarray): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec @@ -8689,6 +8886,7 @@ def append(a, b, axis=None): Examples -------- + >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index d6cc0a782c23..826250d4c3a8 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -15,9 +15,7 @@ from numpy import ( angle as angle ) -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) +_ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) __all__: list[str] @@ -165,7 +163,7 @@ class MaskedIterator: def __setitem__(self, index, value): ... def __next__(self): ... -class MaskedArray(ndarray[_ShapeType, _DType_co]): +class MaskedArray(ndarray[_ShapeType_co, _DType_co]): __array_priority__: Any def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ... def __array_finalize__(self, obj): ... @@ -300,7 +298,7 @@ class MaskedArray(ndarray[_ShapeType, _DType_co]): def __reduce__(self): ... def __deepcopy__(self, memo=...): ... -class mvoid(MaskedArray[_ShapeType, _DType_co]): +class mvoid(MaskedArray[_ShapeType_co, _DType_co]): def __new__( self, data, diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 743f4bead446..8d41e939632f 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -70,6 +70,7 @@ def count_masked(arr, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(9).reshape((3,3)) >>> a = np.ma.array(a) >>> a[1, 0] = np.ma.masked @@ -133,6 +134,7 @@ def masked_all(shape, dtype=float): Examples -------- + >>> import numpy as np >>> np.ma.masked_all((3, 3)) masked_array( data=[[--, --, --], @@ -196,6 +198,7 @@ def masked_all_like(arr): Examples -------- + >>> import numpy as np >>> arr = np.zeros((2, 3), dtype=np.float32) >>> arr array([[0., 0., 0.], @@ -499,6 +502,7 @@ def apply_over_axes(func, a, axes): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(24).reshape(2,3,4) >>> a[:,0,1] = np.ma.masked >>> a[:,1,:] = np.ma.masked @@ -608,6 +612,7 @@ def average(a, axis=None, weights=None, returned=False, *, Examples -------- + >>> import numpy as np >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True]) >>> np.ma.average(a, weights=[3, 1, 0, 0]) 1.25 @@ -761,6 +766,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4) >>> np.ma.median(x) 1.5 @@ -895,6 +901,7 @@ def compress_nd(x, axis=None): Examples -------- + >>> import numpy as np >>> arr = [[1, 2], [3, 4]] >>> mask = [[0, 1], [0, 0]] >>> x = np.ma.array(arr, mask=mask) @@ -956,6 +963,7 @@ def compress_rowcols(x, axis=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) @@ -1009,12 +1017,13 @@ def compress_rows(a): Examples -------- + >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) >>> np.ma.compress_rows(a) array([[6, 7, 8]]) - + """ a = asarray(a) if a.ndim != 2: @@ -1047,6 +1056,7 @@ def compress_cols(a): Examples -------- + >>> import numpy as np >>> a = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0], ... [1, 0, 0], ... [0, 0, 0]]) @@ -1107,6 +1117,7 @@ def mask_rowcols(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a @@ -1163,6 +1174,7 @@ def mask_rows(a, axis=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a @@ -1213,6 +1225,7 @@ def mask_cols(a, axis=np._NoValue): Examples -------- + >>> import numpy as np >>> a = np.zeros((3, 3), dtype=int) >>> a[1, 1] = 1 >>> a @@ -1266,6 +1279,7 @@ def ediff1d(arr, to_end=None, to_begin=None): Examples -------- + >>> import numpy as np >>> arr = np.ma.array([1, 2, 4, 7, 0]) >>> np.ma.ediff1d(arr) masked_array(data=[ 1, 2, 3, -7], @@ -1303,6 +1317,7 @@ def unique(ar1, return_index=False, return_inverse=False): Examples -------- + >>> import numpy as np >>> a = [1, 2, 1000, 2, 3] >>> mask = [0, 0, 1, 0, 0] >>> masked_a = np.ma.masked_array(a, mask) @@ -1354,6 +1369,7 @@ def intersect1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) >>> np.ma.intersect1d(x, y) @@ -1383,11 +1399,12 @@ def setxor1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 2, 4]) >>> ar2 = np.ma.array([2, 3, 5, 7, 5]) >>> np.ma.setxor1d(ar1, ar2) masked_array(data=[1, 4, 5, 7], - mask=False, + mask=False, fill_value=999999) """ @@ -1395,7 +1412,7 @@ def setxor1d(ar1, ar2, assume_unique=False): ar1 = unique(ar1) ar2 = unique(ar2) - aux = ma.concatenate((ar1, ar2)) + aux = ma.concatenate((ar1, ar2), axis=None) if aux.size == 0: return aux aux.sort() @@ -1427,6 +1444,7 @@ def in1d(ar1, ar2, assume_unique=False, invert=False): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) >>> ar2 = [0, 2] >>> np.ma.in1d(ar1, ar2) @@ -1477,6 +1495,7 @@ def isin(element, test_elements, assume_unique=False, invert=False): Examples -------- + >>> import numpy as np >>> element = np.ma.array([1, 2, 3, 4, 5, 6]) >>> test_elements = [0, 2] >>> np.ma.isin(element, test_elements) @@ -1502,6 +1521,7 @@ def union1d(ar1, ar2): Examples -------- + >>> import numpy as np >>> ar1 = np.ma.array([1, 2, 3, 4]) >>> ar2 = np.ma.array([3, 4, 5, 6]) >>> np.ma.union1d(ar1, ar2) @@ -1526,6 +1546,7 @@ def setdiff1d(ar1, ar2, assume_unique=False): Examples -------- + >>> import numpy as np >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) >>> np.ma.setdiff1d(x, [1, 2]) masked_array(data=[3, --], @@ -1569,7 +1590,14 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): tup = (None, slice(None)) # if y is None: - xnotmask = np.logical_not(xmask).astype(int) + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(xmask).astype(xnm_dtype) else: y = array(y, copy=False, ndmin=2, dtype=float) ymask = ma.getmaskarray(y) @@ -1584,7 +1612,16 @@ def _covhelper(x, y=None, rowvar=True, allow_masked=True): x._sharedmask = False y._sharedmask = False x = ma.concatenate((x, y), axis) - xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int) + # Check if we can guarantee that the integers in the (N - ddof) + # normalisation can be accurately represented with single-precision + # before computing the dot product. + if x.shape[0] > 2 ** 24 or x.shape[1] > 2 ** 24: + xnm_dtype = np.float64 + else: + xnm_dtype = np.float32 + xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype( + xnm_dtype + ) x -= x.mean(axis=rowvar)[tup] return (x, xnotmask, rowvar) @@ -1643,6 +1680,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): Examples -------- + >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> y = np.ma.array([[1, 0], [0, 1]], mask=[0, 0, 1, 1]) >>> np.ma.cov(x, y) @@ -1657,7 +1695,7 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): [ True, True, True, True]], fill_value=1e+20, dtype=float64) - + """ # Check inputs if ddof is not None and ddof != int(ddof): @@ -1671,11 +1709,17 @@ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None): (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof - result = (dot(x.T, x.conj(), strict=False) / fact).squeeze() + fact = np.dot(xnotmask.T, xnotmask) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x.T, 0), filled(x.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof - result = (dot(x, x.T.conj(), strict=False) / fact).squeeze() + fact = np.dot(xnotmask, xnotmask.T) - ddof + mask = np.less_equal(fact, 0, dtype=bool) + with np.errstate(divide="ignore", invalid="ignore"): + data = np.dot(filled(x, 0), filled(x.T.conj(), 0)) / fact + result = ma.array(data, mask=mask).squeeze() return result @@ -1729,6 +1773,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, Examples -------- + >>> import numpy as np >>> x = np.ma.array([[0, 1], [1, 1]], mask=[0, 1, 0, 1]) >>> np.ma.corrcoef(x) masked_array( @@ -1744,39 +1789,15 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True, if bias is not np._NoValue or ddof is not np._NoValue: # 2015-03-15, 1.10 warnings.warn(msg, DeprecationWarning, stacklevel=2) - # Get the data - (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked) - # Compute the covariance matrix - if not rowvar: - fact = np.dot(xnotmask.T, xnotmask) * 1. - c = (dot(x.T, x.conj(), strict=False) / fact).squeeze() - else: - fact = np.dot(xnotmask, xnotmask.T) * 1. - c = (dot(x, x.T.conj(), strict=False) / fact).squeeze() - # Check whether we have a scalar + # Estimate the covariance matrix. + corr = cov(x, y, rowvar, allow_masked=allow_masked) + # The non-masked version returns a masked value for a scalar. try: - diag = ma.diagonal(c) + std = ma.sqrt(ma.diagonal(corr)) except ValueError: - return 1 - # - if xnotmask.all(): - _denom = ma.sqrt(ma.multiply.outer(diag, diag)) - else: - _denom = diagflat(diag) - _denom._sharedmask = False # We know return is always a copy - n = x.shape[1 - rowvar] - if rowvar: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols(vstack((x[i], x[j]))).var(axis=1) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - else: - for i in range(n - 1): - for j in range(i + 1, n): - _x = mask_cols( - vstack((x[:, i], x[:, j]))).var(axis=1) - _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x)) - return c / _denom + return ma.MaskedConstant() + corr /= ma.multiply.outer(std, std) + return corr #####-------------------------------------------------------------------------- #---- --- Concatenation helpers --- @@ -1793,6 +1814,8 @@ class MAxisConcatenator(AxisConcatenator): mr_class """ + __slots__ = () + concatenate = staticmethod(concatenate) @classmethod @@ -1824,12 +1847,15 @@ class mr_class(MAxisConcatenator): Examples -------- + >>> import numpy as np >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] masked_array(data=[1, 2, 3, ..., 4, 5, 6], mask=False, fill_value=999999) """ + __slots__ = () + def __init__(self): MAxisConcatenator.__init__(self, 0) @@ -1867,6 +1893,7 @@ def ndenumerate(a, compressed=True): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(9).reshape((3, 3)) >>> a[1, 0] = np.ma.masked >>> a[1, 2] = np.ma.masked @@ -1936,6 +1963,7 @@ def flatnotmasked_edges(a): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_edges(a) array([0, 9]) @@ -1993,6 +2021,7 @@ def notmasked_edges(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(9).reshape((3, 3)) >>> m = np.zeros_like(a) >>> m[1:, 1:] = 1 @@ -2042,6 +2071,7 @@ def flatnotmasked_contiguous(a): Examples -------- + >>> import numpy as np >>> a = np.ma.arange(10) >>> np.ma.flatnotmasked_contiguous(a) [slice(0, 10, None)] @@ -2103,6 +2133,7 @@ def notmasked_contiguous(a, axis=None): Examples -------- + >>> import numpy as np >>> a = np.arange(12).reshape((3, 4)) >>> mask = np.zeros_like(a) >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0 @@ -2204,6 +2235,7 @@ def clump_unmasked(a): Examples -------- + >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_unmasked(a) @@ -2243,6 +2275,7 @@ def clump_masked(a): Examples -------- + >>> import numpy as np >>> a = np.ma.masked_array(np.arange(10)) >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked >>> np.ma.clump_masked(a) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index 56228b927080..8e458fe165af 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -1,5 +1,6 @@ from typing import Any -from numpy.lib.index_tricks import AxisConcatenator + +from numpy.lib._index_tricks_impl import AxisConcatenator from numpy.ma.core import ( dot as dot, diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi index 264807e05d57..85714420cb64 100644 --- a/numpy/ma/mrecords.pyi +++ b/numpy/ma/mrecords.pyi @@ -5,12 +5,10 @@ from numpy.ma import MaskedArray __all__: list[str] -# TODO: Set the `bound` to something more suitable once we -# have proper shape support -_ShapeType = TypeVar("_ShapeType", bound=Any) +_ShapeType_co = TypeVar("_ShapeType_co", covariant=True, bound=tuple[int, ...]) _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True) -class MaskedRecords(MaskedArray[_ShapeType, _DType_co]): +class MaskedRecords(MaskedArray[_ShapeType_co, _DType_co]): def __new__( cls, shape, diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 99e869685e60..970ae2875493 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -251,7 +251,7 @@ def test_creation_with_list_of_maskedarrays_no_bool_cast(self): # The above only failed due a long chain of oddity, try also with # an object array that cannot be converted to bool always: - class NotBool(): + class NotBool: def __bool__(self): raise ValueError("not a bool!") masked_obj = np.ma.masked_array([NotBool(), 'b'], mask=[True, False]) @@ -2581,6 +2581,13 @@ def test_no_masked_nan_warnings(self): # also check that allclose uses ma ufuncs, to avoid warning allclose(m, 0.5) + def test_masked_array_underflow(self): + x = np.arange(0, 3, 0.1) + X = np.ma.array(x) + with np.errstate(under="raise"): + X2 = X/2.0 + np.testing.assert_array_equal(X2, x/2) + class TestMaskedArrayInPlaceArithmetic: # Test MaskedArray Arithmetic @@ -4624,7 +4631,7 @@ def test_masked_invalid_pandas(self): # getdata() used to be bad for pandas series due to its _data # attribute. This test is a regression test mainly and may be # removed if getdata() is adjusted. - class Series(): + class Series: _data = "nonsense" def __array__(self, dtype=None, copy=None): diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index ad6bdf38f45c..daf376b766d5 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -29,7 +29,7 @@ ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols, mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin, - diagflat, ndenumerate, stack, vstack + diagflat, ndenumerate, stack, vstack, _covhelper ) @@ -1287,6 +1287,26 @@ class TestCov: def setup_method(self): self.data = array(np.random.rand(12)) + def test_covhelper(self): + x = self.data + # Test not mask output type is a float. + assert_(_covhelper(x, rowvar=True)[1].dtype, np.float32) + assert_(_covhelper(x, y=x, rowvar=False)[1].dtype, np.float32) + # Test not mask output is equal after casting to float. + mask = x > 0.5 + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), rowvar=True + )[1].astype(bool), + ~mask.reshape(1, -1), + ) + assert_array_equal( + _covhelper( + np.ma.masked_array(x, mask), y=x, rowvar=False + )[1].astype(bool), + np.vstack((~mask, ~mask)), + ) + def test_1d_without_missing(self): # Test cov on 1D variable w/o missing values x = self.data @@ -1661,6 +1681,25 @@ def test_setxor1d(self): # assert_array_equal([], setxor1d([], [])) + def test_setxor1d_unique(self): + # Test setxor1d with assume_unique=True + a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) + b = [1, 2, 3, 4, 5] + test = setxor1d(a, b, assume_unique=True) + assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1])) + # + a = array([1, 8, 2, 3], mask=[0, 1, 0, 0]) + b = array([6, 5, 4, 8], mask=[0, 0, 0, 1]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + # + a = array([[1], [8], [2], [3]]) + b = array([[6, 5], [4, 8]]) + test = setxor1d(a, b, assume_unique=True) + assert_(isinstance(test, MaskedArray)) + assert_equal(test, [1, 2, 3, 4, 5, 6]) + def test_isin(self): # the tests for in1d cover most of isin's behavior # if in1d is removed, would need to change those tests to test diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index dc2c561b888c..a364268a344b 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -70,7 +70,7 @@ def test_get(self): assert_equal(mbase_last.recordmask, True) assert_equal(mbase_last._mask.item(), (True, True, True)) assert_equal(mbase_last['a'], mbase['a'][-1]) - assert_((mbase_last['a'] is masked)) + assert_(mbase_last['a'] is masked) # as slice .......... mbase_sl = mbase[:2] assert_(isinstance(mbase_sl, mrecarray)) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 866f867c8eaa..99c07fcf8f87 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -18,8 +18,7 @@ def _convert_from_string(data): rows = data.split(';') newdata = [] - count = 0 - for row in rows: + for count, row in enumerate(rows): trow = row.split(',') newrow = [] for col in trow: @@ -29,7 +28,6 @@ def _convert_from_string(data): Ncols = len(newrow) elif len(newrow) != Ncols: raise ValueError("Rows not the same size.") - count += 1 newdata.append(newrow) return newdata @@ -56,6 +54,7 @@ def asmatrix(data, dtype=None): Examples -------- + >>> import numpy as np >>> x = np.array([[1, 2], [3, 4]]) >>> m = np.asmatrix(x) @@ -103,6 +102,7 @@ class matrix(N.ndarray): Examples -------- + >>> import numpy as np >>> a = np.matrix('1 2; 3 4') >>> a matrix([[1, 2], @@ -1065,6 +1065,7 @@ def bmat(obj, ldict=None, gdict=None): Examples -------- + >>> import numpy as np >>> A = np.asmatrix('1 1; 1 1') >>> B = np.asmatrix('2 2; 2 2') >>> C = np.asmatrix('3 4; 5 6') diff --git a/numpy/meson.build b/numpy/meson.build index 80fa720b82e6..84dffaa3d880 100644 --- a/numpy/meson.build +++ b/numpy/meson.build @@ -214,6 +214,51 @@ else lapack_dep = declare_dependency(dependencies: [lapack, blas_dep]) endif +# Determine whether it is necessary to link libatomic with gcc. This +# could be the case on 32-bit platforms when atomic operations are used +# on 64-bit types or on RISC-V using 8-bit atomics, so we explicitly +# check for both 64 bit and 8 bit operations. The check is adapted from +# SciPy, who copied it from Mesa. +null_dep = dependency('', required : false) +atomic_dep = null_dep +code_non_lockfree = ''' + #include + int main() { + struct { + void *p; + uint8_t u8v; + } x; + x.p = NULL; + x.u8v = 0; + uint8_t res = __atomic_load_n(x.u8v, __ATOMIC_SEQ_CST); + __atomic_store_n(x.u8v, 1, ATOMIC_SEQ_CST); + void *p = __atomic_load_n(x.p, __ATOMIC_SEQ_CST); + __atomic_store_n((void **)x.p, NULL, __ATOMIC_SEQ_CST) + return 0; + } +''' +if cc.get_id() != 'msvc' + if not cc.links( + code_non_lockfree, + name : 'Check atomic builtins without -latomic' + ) + atomic_dep = cc.find_library('atomic', required: false) + if atomic_dep.found() + # We're not sure that with `-latomic` things will work for all compilers, + # so verify and only keep libatomic as a dependency if this works. It is + # possible the build will fail later otherwise - unclear under what + # circumstances (compilers, runtimes, etc.) exactly and this may need to + # be extended when support is added for new CPUs + if not cc.links( + code_non_lockfree, + dependencies: atomic_dep, + name : 'Check atomic builtins with -latomic' + ) + atomic_dep = null_dep + endif + endif + endif +endif # Copy the main __init__.py|pxd files to the build dir (needed for Cython) __init__py = fs.copyfile('__init__.py') @@ -226,6 +271,8 @@ python_sources = [ '__init__.pxd', '__init__.py', '__init__.pyi', + '_array_api_info.py', + '_array_api_info.pyi', '_configtool.py', '_distributor_init.py', '_globals.py', @@ -273,7 +320,6 @@ pure_subdirs = [ 'matrixlib', 'polynomial', 'testing', - 'tests', 'typing', 'rec', 'char', @@ -312,13 +358,18 @@ else endif foreach subdir: pure_subdirs - install_subdir(subdir, install_dir: np_dir, install_tag: 'python-runtime') + install_subdir(subdir, install_dir: np_dir, install_tag: 'python-runtime', exclude_directories: ['tests']) + if fs.is_dir(subdir/'tests') + install_subdir(subdir/'tests', install_dir: np_dir/subdir, install_tag: 'tests') + endif endforeach +install_subdir('tests', install_dir: np_dir, install_tag: 'tests') + compilers = { 'C': cc, 'CPP': cpp, - 'CYTHON': meson.get_compiler('cython') + 'CYTHON': cy, } machines = { @@ -362,7 +413,7 @@ conf_data.set('PYTHON_VERSION', py.language_version()) # `np.show_config()`; needs some special handling for the case BLAS was found # but CBLAS not (and hence BLAS was also disabled) dependency_map = { - 'LAPACK': lapack_dep, + 'LAPACK': lapack, } if have_blas dependency_map += {'BLAS': blas} diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py index 2a31e52f2aa4..b22ade5e28a8 100644 --- a/numpy/polynomial/__init__.py +++ b/numpy/polynomial/__init__.py @@ -41,6 +41,8 @@ `~chebyshev.Chebyshev.fit` class method:: >>> from numpy.polynomial import Chebyshev + >>> xdata = [1, 2, 3, 4] + >>> ydata = [1, 4, 9, 16] >>> c = Chebyshev.fit(xdata, ydata, deg=1) is preferred over the `chebyshev.chebfit` function from the diff --git a/numpy/polynomial/__init__.pyi b/numpy/polynomial/__init__.pyi index 0fc5ef0f53e4..d36605b89250 100644 --- a/numpy/polynomial/__init__.pyi +++ b/numpy/polynomial/__init__.pyi @@ -1,21 +1,23 @@ -from numpy._pytesttester import PytestTester +from typing import Final, Literal -from numpy.polynomial import ( - chebyshev as chebyshev, - hermite as hermite, - hermite_e as hermite_e, - laguerre as laguerre, - legendre as legendre, - polynomial as polynomial, -) -from numpy.polynomial.chebyshev import Chebyshev as Chebyshev -from numpy.polynomial.hermite import Hermite as Hermite -from numpy.polynomial.hermite_e import HermiteE as HermiteE -from numpy.polynomial.laguerre import Laguerre as Laguerre -from numpy.polynomial.legendre import Legendre as Legendre -from numpy.polynomial.polynomial import Polynomial as Polynomial +from .polynomial import Polynomial +from .chebyshev import Chebyshev +from .legendre import Legendre +from .hermite import Hermite +from .hermite_e import HermiteE +from .laguerre import Laguerre -__all__: list[str] -test: PytestTester +__all__ = [ + "set_default_printstyle", + "polynomial", "Polynomial", + "chebyshev", "Chebyshev", + "legendre", "Legendre", + "hermite", "Hermite", + "hermite_e", "HermiteE", + "laguerre", "Laguerre", +] -def set_default_printstyle(style): ... +def set_default_printstyle(style: Literal["ascii", "unicode"]) -> None: ... + +from numpy._pytesttester import PytestTester as _PytestTester +test: Final[_PytestTester] diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index 9c345553eedd..65c3ff43dc32 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -1041,6 +1041,9 @@ class domain in NumPy 1.4 and ``None`` in later versions. """ if domain is None: domain = pu.getdomain(x) + if domain[0] == domain[1]: + domain[0] -= 1 + domain[1] += 1 elif type(domain) is list and len(domain) == 0: domain = cls.domain diff --git a/numpy/polynomial/_polybase.pyi b/numpy/polynomial/_polybase.pyi index 25c740dbedd0..7519a755f528 100644 --- a/numpy/polynomial/_polybase.pyi +++ b/numpy/polynomial/_polybase.pyi @@ -1,71 +1,297 @@ import abc -from typing import Any, ClassVar +import decimal +import numbers +import sys +from collections.abc import Iterator, Mapping, Sequence +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Final, + Generic, + Literal, + SupportsIndex, + TypeAlias, + TypeGuard, + TypeVar, + overload, +) -__all__: list[str] +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _FloatLike_co, + _NumberLike_co, -class ABCPolyBase(abc.ABC): + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) + +from ._polytypes import ( + _AnyInt, + _CoefLike_co, + + _Array2, + _Tuple2, + + _Series, + _CoefSeries, + + _SeriesLikeInt_co, + _SeriesLikeCoef_co, + + _ArrayLikeCoefObject_co, + _ArrayLikeCoef_co, +) + +if sys.version_info >= (3, 11): + from typing import LiteralString +elif TYPE_CHECKING: + from typing_extensions import LiteralString +else: + LiteralString: TypeAlias = str + + +__all__: Final[Sequence[str]] = ("ABCPolyBase",) + + +_NameCo = TypeVar("_NameCo", bound=None | LiteralString, covariant=True) +_Self = TypeVar("_Self", bound="ABCPolyBase") +_Other = TypeVar("_Other", bound="ABCPolyBase") + +_AnyOther: TypeAlias = ABCPolyBase | _CoefLike_co | _SeriesLikeCoef_co +_Hundred: TypeAlias = Literal[100] + + +class ABCPolyBase(Generic[_NameCo], metaclass=abc.ABCMeta): __hash__: ClassVar[None] # type: ignore[assignment] __array_ufunc__: ClassVar[None] - maxpower: ClassVar[int] - coef: Any - @property - def symbol(self) -> str: ... - @property - @abc.abstractmethod - def domain(self): ... - @property - @abc.abstractmethod - def window(self): ... + + maxpower: ClassVar[_Hundred] + _superscript_mapping: ClassVar[Mapping[int, str]] + _subscript_mapping: ClassVar[Mapping[int, str]] + _use_unicode: ClassVar[bool] + + basis_name: _NameCo + coef: _CoefSeries + domain: _Array2[np.inexact[Any] | np.object_] + window: _Array2[np.inexact[Any] | np.object_] + + _symbol: LiteralString @property - @abc.abstractmethod - def basis_name(self): ... - def has_samecoef(self, other): ... - def has_samedomain(self, other): ... - def has_samewindow(self, other): ... - def has_sametype(self, other): ... - def __init__(self, coef, domain=..., window=..., symbol: str = ...) -> None: ... - def __format__(self, fmt_str): ... - def __call__(self, arg): ... - def __iter__(self): ... - def __len__(self): ... - def __neg__(self): ... - def __pos__(self): ... - def __add__(self, other): ... - def __sub__(self, other): ... - def __mul__(self, other): ... - def __truediv__(self, other): ... - def __floordiv__(self, other): ... - def __mod__(self, other): ... - def __divmod__(self, other): ... - def __pow__(self, other): ... - def __radd__(self, other): ... - def __rsub__(self, other): ... - def __rmul__(self, other): ... - def __rdiv__(self, other): ... - def __rtruediv__(self, other): ... - def __rfloordiv__(self, other): ... - def __rmod__(self, other): ... - def __rdivmod__(self, other): ... - def __eq__(self, other): ... - def __ne__(self, other): ... - def copy(self): ... - def degree(self): ... - def cutdeg(self, deg): ... - def trim(self, tol=...): ... - def truncate(self, size): ... - def convert(self, domain=..., kind=..., window=...): ... - def mapparms(self): ... - def integ(self, m=..., k = ..., lbnd=...): ... - def deriv(self, m=...): ... - def roots(self): ... - def linspace(self, n=..., domain=...): ... + def symbol(self, /) -> LiteralString: ... + + def __init__( + self, + /, + coef: _SeriesLikeCoef_co, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> None: ... + + @overload + def __call__(self, /, arg: _Other) -> _Other: ... + # TODO: Once `_ShapeType@ndarray` is covariant and bounded (see #26081), + # additionally include 0-d arrays as input types with scalar return type. + @overload + def __call__( + self, + /, + arg: _FloatLike_co | decimal.Decimal | numbers.Real | np.object_, + ) -> np.float64 | np.complex128: ... + @overload + def __call__( + self, + /, + arg: _NumberLike_co | numbers.Complex, + ) -> np.complex128: ... + @overload + def __call__(self, /, arg: _ArrayLikeFloat_co) -> ( + npt.NDArray[np.float64] + | npt.NDArray[np.complex128] + | npt.NDArray[np.object_] + ): ... + @overload + def __call__( + self, + /, + arg: _ArrayLikeComplex_co, + ) -> npt.NDArray[np.complex128] | npt.NDArray[np.object_]: ... + @overload + def __call__( + self, + /, + arg: _ArrayLikeCoefObject_co, + ) -> npt.NDArray[np.object_]: ... + + def __str__(self, /) -> str: ... + def __repr__(self, /) -> str: ... + def __format__(self, fmt_str: str, /) -> str: ... + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + def __neg__(self: _Self, /) -> _Self: ... + def __pos__(self: _Self, /) -> _Self: ... + def __add__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __sub__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __mul__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __truediv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __floordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __mod__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __divmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... + def __pow__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __radd__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rsub__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rmul__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rtruediv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rfloordiv__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rmod__(self: _Self, x: _AnyOther, /) -> _Self: ... + def __rdivmod__(self: _Self, x: _AnyOther, /) -> _Tuple2[_Self]: ... + def __len__(self, /) -> int: ... + def __iter__(self, /) -> Iterator[np.inexact[Any] | object]: ... + def __getstate__(self, /) -> dict[str, Any]: ... + def __setstate__(self, dict: dict[str, Any], /) -> None: ... + + def has_samecoef(self, /, other: ABCPolyBase) -> bool: ... + def has_samedomain(self, /, other: ABCPolyBase) -> bool: ... + def has_samewindow(self, /, other: ABCPolyBase) -> bool: ... + @overload + def has_sametype(self: _Self, /, other: ABCPolyBase) -> TypeGuard[_Self]: ... + @overload + def has_sametype(self, /, other: object) -> Literal[False]: ... + + def copy(self: _Self, /) -> _Self: ... + def degree(self, /) -> int: ... + def cutdeg(self: _Self, /) -> _Self: ... + def trim(self: _Self, /, tol: _FloatLike_co = ...) -> _Self: ... + def truncate(self: _Self, /, size: _AnyInt) -> _Self: ... + + @overload + def convert( + self, + domain: None | _SeriesLikeCoef_co, + kind: type[_Other], + /, + window: None | _SeriesLikeCoef_co = ..., + ) -> _Other: ... + @overload + def convert( + self, + /, + domain: None | _SeriesLikeCoef_co = ..., + *, + kind: type[_Other], + window: None | _SeriesLikeCoef_co = ..., + ) -> _Other: ... + @overload + def convert( + self: _Self, + /, + domain: None | _SeriesLikeCoef_co = ..., + kind: type[_Self] = ..., + window: None | _SeriesLikeCoef_co = ..., + ) -> _Self: ... + + def mapparms(self, /) -> _Tuple2[Any]: ... + + def integ( + self: _Self, /, + m: SupportsIndex = ..., + k: _CoefLike_co | _SeriesLikeCoef_co = ..., + lbnd: None | _CoefLike_co = ..., + ) -> _Self: ... + + def deriv(self: _Self, /, m: SupportsIndex = ...) -> _Self: ... + + def roots(self, /) -> _CoefSeries: ... + + def linspace( + self, /, + n: SupportsIndex = ..., + domain: None | _SeriesLikeCoef_co = ..., + ) -> _Tuple2[_Series[np.float64 | np.complex128]]: ... + + @overload @classmethod - def fit(cls, x, y, deg, domain=..., rcond=..., full=..., w=..., window=...): ... + def fit( + cls: type[_Self], /, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: _FloatLike_co = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> _Self: ... + @overload @classmethod - def fromroots(cls, roots, domain = ..., window=...): ... + def fit( + cls: type[_Self], /, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: _FloatLike_co = ..., + *, + full: Literal[True], + w: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... + @overload @classmethod - def identity(cls, domain=..., window=...): ... + def fit( + cls: type[_Self], + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: int | _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co, + rcond: _FloatLike_co, + full: Literal[True], /, + w: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> tuple[_Self, Sequence[np.inexact[Any] | np.int32]]: ... + @classmethod - def basis(cls, deg, domain=..., window=...): ... + def fromroots( + cls: type[_Self], /, + roots: _ArrayLikeCoef_co, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> _Self: ... + + @classmethod + def identity( + cls: type[_Self], /, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> _Self: ... + + @classmethod + def basis( + cls: type[_Self], /, + deg: _AnyInt, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + symbol: str = ..., + ) -> _Self: ... + + @classmethod + def cast( + cls: type[_Self], /, + series: ABCPolyBase, + domain: None | _SeriesLikeCoef_co = ..., + window: None | _SeriesLikeCoef_co = ..., + ) -> _Self: ... + @classmethod - def cast(cls, series, domain=..., window=...): ... + def _str_term_unicode(cls, i: str, arg_str: str) -> str: ... + @staticmethod + def _str_term_ascii(i: str, arg_str: str) -> str: ... + @staticmethod + def _repr_latex_term(i: str, arg_str: str, needs_parens: bool) -> str: ... diff --git a/numpy/polynomial/_polytypes.pyi b/numpy/polynomial/_polytypes.pyi new file mode 100644 index 000000000000..54771c0581e4 --- /dev/null +++ b/numpy/polynomial/_polytypes.pyi @@ -0,0 +1,912 @@ +import sys +from collections.abc import Callable, Sequence +from typing import ( + TYPE_CHECKING, + Any, + Literal, + NoReturn, + Protocol, + SupportsIndex, + SupportsInt, + TypeAlias, + TypeVar, + final, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + # array-likes + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, + _ArrayLikeNumber_co, + _ArrayLikeObject_co, + _NestedSequence, + + # scalar-likes + _IntLike_co, + _FloatLike_co, + _ComplexLike_co, + _NumberLike_co, +) + +if sys.version_info >= (3, 11): + from typing import LiteralString +elif TYPE_CHECKING: + from typing_extensions import LiteralString +else: + LiteralString: TypeAlias = str + +_T = TypeVar("_T") +_T_contra = TypeVar("_T_contra", contravariant=True) + +_Tuple2: TypeAlias = tuple[_T, _T] + +_V = TypeVar("_V") +_V_co = TypeVar("_V_co", covariant=True) +_Self = TypeVar("_Self", bound=object) + +_SCT = TypeVar("_SCT", bound=np.number[Any] | np.bool | np.object_) +_SCT_co = TypeVar( + "_SCT_co", + bound=np.number[Any] | np.bool | np.object_, + covariant=True, +) + +@final +class _SupportsArray(Protocol[_SCT_co]): + def __array__(self ,) -> npt.NDArray[_SCT_co]: ... + +@final +class _SupportsCoefOps(Protocol[_T_contra]): + # compatible with e.g. `int`, `float`, `complex`, `Decimal`, `Fraction`, + # and `ABCPolyBase` + def __eq__(self, x: object, /) -> bool: ... + def __ne__(self, x: object, /) -> bool: ... + + def __neg__(self: _Self, /) -> _Self: ... + def __pos__(self: _Self, /) -> _Self: ... + + def __add__(self: _Self, x: _T_contra, /) -> _Self: ... + def __sub__(self: _Self, x: _T_contra, /) -> _Self: ... + def __mul__(self: _Self, x: _T_contra, /) -> _Self: ... + def __truediv__(self: _Self, x: _T_contra, /) -> _Self | float: ... + def __pow__(self: _Self, x: _T_contra, /) -> _Self | float: ... + + def __radd__(self: _Self, x: _T_contra, /) -> _Self: ... + def __rsub__(self: _Self, x: _T_contra, /) -> _Self: ... + def __rmul__(self: _Self, x: _T_contra, /) -> _Self: ... + def __rtruediv__(self: _Self, x: _T_contra, /) -> _Self | float: ... + +_Series: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] + +_FloatSeries: TypeAlias = _Series[np.floating[Any]] +_ComplexSeries: TypeAlias = _Series[np.complexfloating[Any, Any]] +_NumberSeries: TypeAlias = _Series[np.number[Any]] +_ObjectSeries: TypeAlias = _Series[np.object_] +_CoefSeries: TypeAlias = _Series[np.inexact[Any] | np.object_] + +_FloatArray: TypeAlias = npt.NDArray[np.floating[Any]] +_ComplexArray: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_ObjectArray: TypeAlias = npt.NDArray[np.object_] +_CoefArray: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] + +_Array1: TypeAlias = np.ndarray[tuple[Literal[1]], np.dtype[_SCT]] +_Array2: TypeAlias = np.ndarray[tuple[Literal[2]], np.dtype[_SCT]] + +_AnyInt: TypeAlias = SupportsInt | SupportsIndex + +_CoefObjectLike_co: TypeAlias = np.object_ | _SupportsCoefOps +_CoefLike_co: TypeAlias = _NumberLike_co | _CoefObjectLike_co + +# The term "series" is used here to refer to 1-d arrays of numeric scalars. +_SeriesLikeBool_co: TypeAlias = ( + _SupportsArray[np.bool] + | Sequence[bool | np.bool] +) +_SeriesLikeInt_co: TypeAlias = ( + _SupportsArray[np.integer[Any] | np.bool] + | Sequence[_IntLike_co] +) +_SeriesLikeFloat_co: TypeAlias = ( + _SupportsArray[np.floating[Any] | np.integer[Any] | np.bool] + | Sequence[_FloatLike_co] +) +_SeriesLikeComplex_co: TypeAlias = ( + _SupportsArray[np.integer[Any] | np.inexact[Any] | np.bool] + | Sequence[_ComplexLike_co] +) +_SeriesLikeObject_co: TypeAlias = ( + _SupportsArray[np.object_] + | Sequence[_CoefObjectLike_co] +) +_SeriesLikeCoef_co: TypeAlias = ( + # npt.NDArray[np.number[Any] | np.bool | np.object_] + _SupportsArray[np.number[Any] | np.bool | np.object_] + | Sequence[_CoefLike_co] +) + +_ArrayLikeCoefObject_co: TypeAlias = ( + _CoefObjectLike_co + | _SeriesLikeObject_co + | _NestedSequence[_SeriesLikeObject_co] +) +_ArrayLikeCoef_co: TypeAlias = ( + npt.NDArray[np.number[Any] | np.bool | np.object_] + | _ArrayLikeNumber_co + | _ArrayLikeCoefObject_co +) + +_Name_co = TypeVar("_Name_co", bound=LiteralString, covariant=True) + +class _Named(Protocol[_Name_co]): + @property + def __name__(self, /) -> _Name_co: ... + +_Line: TypeAlias = np.ndarray[tuple[Literal[1, 2]], np.dtype[_SCT]] + +@final +class _FuncLine(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, off: _SCT, scl: _SCT) -> _Line[_SCT]: ... + @overload + def __call__(self, /, off: int, scl: int) -> _Line[np.int_] : ... + @overload + def __call__(self, /, off: float, scl: float) -> _Line[np.float64]: ... + @overload + def __call__( + self, + /, + off: complex, + scl: complex, + ) -> _Line[np.complex128]: ... + @overload + def __call__( + self, + /, + off: _SupportsCoefOps, + scl: _SupportsCoefOps, + ) -> _Line[np.object_]: ... + +@final +class _FuncFromRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, roots: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, roots: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@final +class _FuncBinOp(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c1: _SeriesLikeBool_co, + c2: _SeriesLikeBool_co, + ) -> NoReturn: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, + ) -> _FloatSeries: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, + ) -> _ComplexSeries: ... + @overload + def __call__( + self, + /, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, + ) -> _ObjectSeries: ... + +@final +class _FuncUnOp(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, c: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@final +class _FuncPoly2Ortho(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__(self, /, pol: _SeriesLikeFloat_co) -> _FloatSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeComplex_co) -> _ComplexSeries: ... + @overload + def __call__(self, /, pol: _SeriesLikeCoef_co) -> _ObjectSeries: ... + +@final +class _FuncPow(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + pow: _IntLike_co, + maxpower: None | _IntLike_co = ..., + ) -> _FloatSeries: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + pow: _IntLike_co, + maxpower: None | _IntLike_co = ..., + ) -> _ComplexSeries: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeCoef_co, + pow: _IntLike_co, + maxpower: None | _IntLike_co = ..., + ) -> _ObjectSeries: ... + +@final +class _FuncDer(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = ..., + scl: _FloatLike_co = ..., + axis: SupportsIndex = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = ..., + scl: _ComplexLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = ..., + scl: _CoefLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ObjectArray: ... + +@final +class _FuncInteg(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + m: SupportsIndex = ..., + k: _FloatLike_co | _SeriesLikeFloat_co = ..., + lbnd: _FloatLike_co = ..., + scl: _FloatLike_co = ..., + axis: SupportsIndex = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + m: SupportsIndex = ..., + k: _ComplexLike_co | _SeriesLikeComplex_co = ..., + lbnd: _ComplexLike_co = ..., + scl: _ComplexLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeCoef_co, + m: SupportsIndex = ..., + k: _SeriesLikeCoef_co | _SeriesLikeCoef_co = ..., + lbnd: _CoefLike_co = ..., + scl: _CoefLike_co = ..., + axis: SupportsIndex = ..., + ) -> _ObjectArray: ... + +@final +class _FuncValFromRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + r: _FloatLike_co, + tensor: bool = ..., + ) -> np.floating[Any]: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + r: _NumberLike_co, + tensor: bool = ..., + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, + /, + x: _FloatLike_co | _ArrayLikeFloat_co, + r: _ArrayLikeFloat_co, + tensor: bool = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co | _ArrayLikeComplex_co, + r: _ArrayLikeComplex_co, + tensor: bool = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co | _ArrayLikeCoef_co, + r: _ArrayLikeCoef_co, + tensor: bool = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + r: _CoefLike_co, + tensor: bool = ..., + ) -> _SupportsCoefOps: ... + +@final +class _FuncVal(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + c: _SeriesLikeFloat_co, + tensor: bool = ..., + ) -> np.floating[Any]: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + c: _SeriesLikeComplex_co, + tensor: bool = ..., + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + tensor: bool = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + tensor: bool = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + tensor: bool = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + c: _SeriesLikeObject_co, + tensor: bool = ..., + ) -> _SupportsCoefOps: ... + +@final +class _FuncVal2D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + c: _SeriesLikeFloat_co, + ) -> np.floating[Any]: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps: ... + +@final +class _FuncVal3D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _FloatLike_co, + y: _FloatLike_co, + z: _FloatLike_co, + c: _SeriesLikeFloat_co + ) -> np.floating[Any]: ... + @overload + def __call__( + self, + /, + x: _NumberLike_co, + y: _NumberLike_co, + z: _NumberLike_co, + c: _SeriesLikeComplex_co, + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + c: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + c: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + c: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: _CoefLike_co, + y: _CoefLike_co, + z: _CoefLike_co, + c: _SeriesLikeCoef_co, + ) -> _SupportsCoefOps: ... + +_AnyValF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike, bool], + _CoefArray, +] + +@final +class _FuncValND(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeFloat_co, + /, + *args: _FloatLike_co, + ) -> np.floating[Any]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeComplex_co, + /, + *args: _NumberLike_co, + ) -> np.complexfloating[Any, Any]: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeFloat_co, + /, + *args: _ArrayLikeFloat_co, + ) -> _FloatArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeComplex_co, + /, + *args: _ArrayLikeComplex_co, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _ArrayLikeCoef_co, + /, + *args: _ArrayLikeCoef_co, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + val_f: _AnyValF, + c: _SeriesLikeObject_co, + /, + *args: _CoefObjectLike_co, + ) -> _SupportsCoefOps: ... + +@final +class _FuncVander(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + deg: SupportsIndex, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + deg: SupportsIndex, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + deg: SupportsIndex, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + deg: SupportsIndex, + ) -> _CoefArray: ... + +_AnyDegrees: TypeAlias = Sequence[SupportsIndex] + +@final +class _FuncVander2D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +@final +class _FuncVander3D(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _ArrayLikeFloat_co, + y: _ArrayLikeFloat_co, + z: _ArrayLikeFloat_co, + deg: _AnyDegrees, + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeComplex_co, + y: _ArrayLikeComplex_co, + z: _ArrayLikeComplex_co, + deg: _AnyDegrees, + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + x: _ArrayLikeCoef_co, + y: _ArrayLikeCoef_co, + z: _ArrayLikeCoef_co, + deg: _AnyDegrees, + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + x: npt.ArrayLike, + y: npt.ArrayLike, + z: npt.ArrayLike, + deg: _AnyDegrees, + ) -> _CoefArray: ... + +# keep in sync with the broadest overload of `._FuncVander` +_AnyFuncVander: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] + +@final +class _FuncVanderND(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeFloat_co], + degrees: Sequence[SupportsIndex], + ) -> _FloatArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[_ArrayLikeComplex_co], + degrees: Sequence[SupportsIndex], + ) -> _ComplexArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[ + _ArrayLikeObject_co | _ArrayLikeComplex_co, + ], + degrees: Sequence[SupportsIndex], + ) -> _ObjectArray: ... + @overload + def __call__( + self, + /, + vander_fs: Sequence[_AnyFuncVander], + points: Sequence[npt.ArrayLike], + degrees: Sequence[SupportsIndex], + ) -> _CoefArray: ... + +_FullFitResult: TypeAlias = Sequence[np.inexact[Any] | np.int32] + +@final +class _FuncFit(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeFloat_co = ..., + ) -> _FloatArray: ... + @overload + def __call__( + self, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float, + full: Literal[True], + /, + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_FloatArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeFloat_co = ..., + ) -> _ComplexArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float, + full: Literal[True], + /, + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_ComplexArray, _FullFitResult]: ... + + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeFloat_co = ..., + ) -> _ObjectArray: ... + @overload + def __call__( + self, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float, + full: Literal[True], + /, + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + @overload + def __call__( + self, + /, + x: _SeriesLikeComplex_co, + y: _ArrayLikeCoef_co, + deg: int | _SeriesLikeInt_co, + rcond: None | float = ..., + *, + full: Literal[True], + w: None | _SeriesLikeFloat_co = ..., + ) -> tuple[_ObjectArray, _FullFitResult]: ... + +@final +class _FuncRoots(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + ) -> _Series[np.float64]: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + ) -> _Series[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _ObjectSeries: ... + + +_Companion: TypeAlias = np.ndarray[tuple[int, int], np.dtype[_SCT]] + +@final +class _FuncCompanion(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _SeriesLikeFloat_co, + ) -> _Companion[np.float64]: ... + @overload + def __call__( + self, + /, + c: _SeriesLikeComplex_co, + ) -> _Companion[np.complex128]: ... + @overload + def __call__(self, /, c: _SeriesLikeCoef_co) -> _Companion[np.object_]: ... + +@final +class _FuncGauss(_Named[_Name_co], Protocol[_Name_co]): + def __call__( + self, + /, + deg: SupportsIndex, + ) -> _Tuple2[_Series[np.float64]]: ... + +@final +class _FuncWeight(_Named[_Name_co], Protocol[_Name_co]): + @overload + def __call__( + self, + /, + c: _ArrayLikeFloat_co, + ) -> npt.NDArray[np.float64]: ... + @overload + def __call__( + self, + /, + c: _ArrayLikeComplex_co, + ) -> npt.NDArray[np.complex128]: ... + @overload + def __call__(self, /, c: _ArrayLikeCoef_co) -> _ObjectArray: ... + +@final +class _FuncPts(_Named[_Name_co], Protocol[_Name_co]): + def __call__(self, /, npts: _AnyInt) -> _Series[np.float64]: ... diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 20ee10c9980d..66fe7d60c040 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -670,6 +670,10 @@ def chebmulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + chebadd, chebsub, chebmul, chebdiv, chebpow + Notes ----- @@ -2007,9 +2011,9 @@ class Chebyshev(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/chebyshev.pyi b/numpy/polynomial/chebyshev.pyi index f8cbacfc2f96..067af81d635d 100644 --- a/numpy/polynomial/chebyshev.pyi +++ b/numpy/polynomial/chebyshev.pyi @@ -1,52 +1,192 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -chebtrim = trimcoef - -def poly2cheb(pol): ... -def cheb2poly(c): ... - -chebdomain: NDArray[int_] -chebzero: NDArray[int_] -chebone: NDArray[int_] -chebx: NDArray[int_] - -def chebline(off, scl): ... -def chebfromroots(roots): ... -def chebadd(c1, c2): ... -def chebsub(c1, c2): ... -def chebmulx(c): ... -def chebmul(c1, c2): ... -def chebdiv(c1, c2): ... -def chebpow(c, pow, maxpower=...): ... -def chebder(c, m=..., scl=..., axis=...): ... -def chebint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def chebval(x, c, tensor=...): ... -def chebval2d(x, y, c): ... -def chebgrid2d(x, y, c): ... -def chebval3d(x, y, z, c): ... -def chebgrid3d(x, y, z, c): ... -def chebvander(x, deg): ... -def chebvander2d(x, y, deg): ... -def chebvander3d(x, y, z, deg): ... -def chebfit(x, y, deg, rcond=..., full=..., w=...): ... -def chebcompanion(c): ... -def chebroots(c): ... -def chebinterpolate(func, deg, args = ...): ... -def chebgauss(deg): ... -def chebweight(x): ... -def chebpts1(npts): ... -def chebpts2(npts): ... - -class Chebyshev(ABCPolyBase): +from collections.abc import Callable, Iterable +from typing import ( + Any, + Concatenate, + Final, + Literal as L, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import _IntLike_co + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _SeriesLikeCoef_co, + _Array1, + _Series, + _Array2, + _CoefSeries, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncPts, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as chebtrim + +__all__ = [ + "chebzero", + "chebone", + "chebx", + "chebdomain", + "chebline", + "chebadd", + "chebsub", + "chebmulx", + "chebmul", + "chebdiv", + "chebpow", + "chebval", + "chebder", + "chebint", + "cheb2poly", + "poly2cheb", + "chebfromroots", + "chebvander", + "chebfit", + "chebtrim", + "chebroots", + "chebpts1", + "chebpts2", + "Chebyshev", + "chebval2d", + "chebval3d", + "chebgrid2d", + "chebgrid3d", + "chebvander2d", + "chebvander3d", + "chebcompanion", + "chebgauss", + "chebweight", + "chebinterpolate", +] + +_SCT = TypeVar("_SCT", bound=np.number[Any] | np.object_) +def _cseries_to_zseries(c: npt.NDArray[_SCT]) -> _Series[_SCT]: ... +def _zseries_to_cseries(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... +def _zseries_mul( + z1: npt.NDArray[_SCT], + z2: npt.NDArray[_SCT], +) -> _Series[_SCT]: ... +def _zseries_div( + z1: npt.NDArray[_SCT], + z2: npt.NDArray[_SCT], +) -> _Series[_SCT]: ... +def _zseries_der(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... +def _zseries_int(zs: npt.NDArray[_SCT]) -> _Series[_SCT]: ... + +poly2cheb: _FuncPoly2Ortho[L["poly2cheb"]] +cheb2poly: _FuncUnOp[L["cheb2poly"]] + +chebdomain: Final[_Array2[np.float64]] +chebzero: Final[_Array1[np.int_]] +chebone: Final[_Array1[np.int_]] +chebx: Final[_Array2[np.int_]] + +chebline: _FuncLine[L["chebline"]] +chebfromroots: _FuncFromRoots[L["chebfromroots"]] +chebadd: _FuncBinOp[L["chebadd"]] +chebsub: _FuncBinOp[L["chebsub"]] +chebmulx: _FuncUnOp[L["chebmulx"]] +chebmul: _FuncBinOp[L["chebmul"]] +chebdiv: _FuncBinOp[L["chebdiv"]] +chebpow: _FuncPow[L["chebpow"]] +chebder: _FuncDer[L["chebder"]] +chebint: _FuncInteg[L["chebint"]] +chebval: _FuncVal[L["chebval"]] +chebval2d: _FuncVal2D[L["chebval2d"]] +chebval3d: _FuncVal3D[L["chebval3d"]] +chebvalfromroots: _FuncValFromRoots[L["chebvalfromroots"]] +chebgrid2d: _FuncVal2D[L["chebgrid2d"]] +chebgrid3d: _FuncVal3D[L["chebgrid3d"]] +chebvander: _FuncVander[L["chebvander"]] +chebvander2d: _FuncVander2D[L["chebvander2d"]] +chebvander3d: _FuncVander3D[L["chebvander3d"]] +chebfit: _FuncFit[L["chebfit"]] +chebcompanion: _FuncCompanion[L["chebcompanion"]] +chebroots: _FuncRoots[L["chebroots"]] +chebgauss: _FuncGauss[L["chebgauss"]] +chebweight: _FuncWeight[L["chebweight"]] +chebpts1: _FuncPts[L["chebpts1"]] +chebpts2: _FuncPts[L["chebpts2"]] + +# keep in sync with `Chebyshev.interpolate` +_RT = TypeVar("_RT", bound=np.number[Any] | np.bool | np.object_) +@overload +def chebinterpolate( + func: np.ufunc, + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[np.float64 | np.complex128 | np.object_]: ... +@overload +def chebinterpolate( + func: Callable[[npt.NDArray[np.float64]], _RT], + deg: _IntLike_co, + args: tuple[()] = ..., +) -> npt.NDArray[_RT]: ... +@overload +def chebinterpolate( + func: Callable[Concatenate[npt.NDArray[np.float64], ...], _RT], + deg: _IntLike_co, + args: Iterable[Any], +) -> npt.NDArray[_RT]: ... + +_Self = TypeVar("_Self", bound=object) + +class Chebyshev(ABCPolyBase[L["T"]]): + @overload + @classmethod + def interpolate( + cls: type[_Self], + /, + func: Callable[[npt.NDArray[np.float64]], _CoefSeries], + deg: _IntLike_co, + domain: None | _SeriesLikeCoef_co = ..., + args: tuple[()] = ..., + ) -> _Self: ... + @overload + @classmethod + def interpolate( + cls: type[_Self], + /, + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefSeries, + ], + deg: _IntLike_co, + domain: None | _SeriesLikeCoef_co = ..., + *, + args: Iterable[Any], + ) -> _Self: ... + @overload @classmethod - def interpolate(cls, func, deg, domain=..., args = ...): ... - domain: Any - window: Any - basis_name: Any + def interpolate( + cls: type[_Self], + func: Callable[ + Concatenate[npt.NDArray[np.float64], ...], + _CoefSeries, + ], + deg: _IntLike_co, + domain: None | _SeriesLikeCoef_co, + args: Iterable[Any], + /, + ) -> _Self: ... diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 4671f93244bd..656ab567e524 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -948,7 +948,7 @@ def hermval2d(x, y, c): >>> y = [4, 5] >>> c = [[1, 2, 3], [4, 5, 6]] >>> hermval2d(x, y, c) - array ([1035., 2883.]) + array([1035., 2883.]) """ return pu._valnd(hermval, c, x, y) @@ -1074,7 +1074,7 @@ def hermval3d(x, y, z, c): >>> c = [[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]] >>> hermval3d(x, y, z, c) array([ 40077., 120131.]) - + """ return pu._valnd(hermval, c, x, y, z) @@ -1184,6 +1184,7 @@ def hermvander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermvander >>> x = np.array([-1, 0, 1]) >>> hermvander(x, 3) @@ -1260,6 +1261,7 @@ def hermvander2d(x, y, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermvander2d >>> x = np.array([-1, 0, 1]) >>> y = np.array([-1, 0, 1]) @@ -1332,7 +1334,7 @@ def hermvander3d(x, y, z, deg): array([[ 1., -2., 2., -2., 4., -4.], [ 1., 0., -2., 0., 0., -0.], [ 1., 2., 2., 2., 4., 4.]]) - + """ return pu._vander_nd_flat((hermvander, hermvander, hermvander), (x, y, z), deg) @@ -1458,12 +1460,14 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermfit, hermval >>> x = np.linspace(-10, 10) - >>> err = np.random.randn(len(x))/10 + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) - array([1.0218, 1.9986, 2.9999]) # may vary + array([1.02294967, 2.00016403, 2.99994614]) # may vary """ return pu._fit(hermvander, x, y, deg, rcond, full, w) @@ -1726,6 +1730,7 @@ def hermweight(x): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite import hermweight >>> x = np.arange(-2, 2) >>> hermweight(x) @@ -1755,9 +1760,9 @@ class Hermite(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/hermite.pyi b/numpy/polynomial/hermite.pyi index 0a1628ab39c1..07db43d0c000 100644 --- a/numpy/polynomial/hermite.pyi +++ b/numpy/polynomial/hermite.pyi @@ -1,47 +1,106 @@ -from typing import Any - -from numpy import int_, float64 -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -hermtrim = trimcoef - -def poly2herm(pol): ... -def herm2poly(c): ... - -hermdomain: NDArray[int_] -hermzero: NDArray[int_] -hermone: NDArray[int_] -hermx: NDArray[float64] - -def hermline(off, scl): ... -def hermfromroots(roots): ... -def hermadd(c1, c2): ... -def hermsub(c1, c2): ... -def hermmulx(c): ... -def hermmul(c1, c2): ... -def hermdiv(c1, c2): ... -def hermpow(c, pow, maxpower=...): ... -def hermder(c, m=..., scl=..., axis=...): ... -def hermint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def hermval(x, c, tensor=...): ... -def hermval2d(x, y, c): ... -def hermgrid2d(x, y, c): ... -def hermval3d(x, y, z, c): ... -def hermgrid3d(x, y, z, c): ... -def hermvander(x, deg): ... -def hermvander2d(x, y, deg): ... -def hermvander3d(x, y, z, deg): ... -def hermfit(x, y, deg, rcond=..., full=..., w=...): ... -def hermcompanion(c): ... -def hermroots(c): ... -def hermgauss(deg): ... -def hermweight(x): ... - -class Hermite(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, Final, Literal as L, TypeVar + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermtrim + +__all__ = [ + "hermzero", + "hermone", + "hermx", + "hermdomain", + "hermline", + "hermadd", + "hermsub", + "hermmulx", + "hermmul", + "hermdiv", + "hermpow", + "hermval", + "hermder", + "hermint", + "herm2poly", + "poly2herm", + "hermfromroots", + "hermvander", + "hermfit", + "hermtrim", + "hermroots", + "Hermite", + "hermval2d", + "hermval3d", + "hermgrid2d", + "hermgrid3d", + "hermvander2d", + "hermvander3d", + "hermcompanion", + "hermgauss", + "hermweight", +] + +poly2herm: _FuncPoly2Ortho[L["poly2herm"]] +herm2poly: _FuncUnOp[L["herm2poly"]] + +hermdomain: Final[_Array2[np.float64]] +hermzero: Final[_Array1[np.int_]] +hermone: Final[_Array1[np.int_]] +hermx: Final[_Array2[np.int_]] + +hermline: _FuncLine[L["hermline"]] +hermfromroots: _FuncFromRoots[L["hermfromroots"]] +hermadd: _FuncBinOp[L["hermadd"]] +hermsub: _FuncBinOp[L["hermsub"]] +hermmulx: _FuncUnOp[L["hermmulx"]] +hermmul: _FuncBinOp[L["hermmul"]] +hermdiv: _FuncBinOp[L["hermdiv"]] +hermpow: _FuncPow[L["hermpow"]] +hermder: _FuncDer[L["hermder"]] +hermint: _FuncInteg[L["hermint"]] +hermval: _FuncVal[L["hermval"]] +hermval2d: _FuncVal2D[L["hermval2d"]] +hermval3d: _FuncVal3D[L["hermval3d"]] +hermvalfromroots: _FuncValFromRoots[L["hermvalfromroots"]] +hermgrid2d: _FuncVal2D[L["hermgrid2d"]] +hermgrid3d: _FuncVal3D[L["hermgrid3d"]] +hermvander: _FuncVander[L["hermvander"]] +hermvander2d: _FuncVander2D[L["hermvander2d"]] +hermvander3d: _FuncVander3D[L["hermvander3d"]] +hermfit: _FuncFit[L["hermfit"]] +hermcompanion: _FuncCompanion[L["hermcompanion"]] +hermroots: _FuncRoots[L["hermroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermgauss: _FuncGauss[L["hermgauss"]] +hermweight: _FuncWeight[L["hermweight"]] + +class Hermite(ABCPolyBase[L["H"]]): ... diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index f50b9d2449f3..48b76894336e 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -127,6 +127,7 @@ def poly2herme(pol): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import poly2herme >>> poly2herme(np.arange(4)) array([ 2., 10., 2., 3.]) @@ -408,6 +409,10 @@ def hermemulx(c): out : ndarray Array representing the result of the multiplication. + See Also + -------- + hermeadd, hermesub, hermemul, hermediv, hermepow + Notes ----- The multiplication uses the recursion relationship for Hermite @@ -1133,6 +1138,7 @@ def hermevander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import hermevander >>> x = np.array([-1, 0, 1]) >>> hermevander(x, 3) @@ -1385,13 +1391,14 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) - >>> np.random.seed(123) - >>> err = np.random.randn(len(x))/10 + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) - array([ 1.01690445, 1.99951418, 2.99948696]) # may vary + array([1.02284196, 2.00032805, 2.99978457]) # may vary """ return pu._fit(hermevander, x, y, deg, rcond, full, w) @@ -1663,9 +1670,9 @@ class HermiteE(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/hermite_e.pyi b/numpy/polynomial/hermite_e.pyi index cca0dd636785..94ad7248f268 100644 --- a/numpy/polynomial/hermite_e.pyi +++ b/numpy/polynomial/hermite_e.pyi @@ -1,47 +1,106 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -hermetrim = trimcoef - -def poly2herme(pol): ... -def herme2poly(c): ... - -hermedomain: NDArray[int_] -hermezero: NDArray[int_] -hermeone: NDArray[int_] -hermex: NDArray[int_] - -def hermeline(off, scl): ... -def hermefromroots(roots): ... -def hermeadd(c1, c2): ... -def hermesub(c1, c2): ... -def hermemulx(c): ... -def hermemul(c1, c2): ... -def hermediv(c1, c2): ... -def hermepow(c, pow, maxpower=...): ... -def hermeder(c, m=..., scl=..., axis=...): ... -def hermeint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def hermeval(x, c, tensor=...): ... -def hermeval2d(x, y, c): ... -def hermegrid2d(x, y, c): ... -def hermeval3d(x, y, z, c): ... -def hermegrid3d(x, y, z, c): ... -def hermevander(x, deg): ... -def hermevander2d(x, y, deg): ... -def hermevander3d(x, y, z, deg): ... -def hermefit(x, y, deg, rcond=..., full=..., w=...): ... -def hermecompanion(c): ... -def hermeroots(c): ... -def hermegauss(deg): ... -def hermeweight(x): ... - -class HermiteE(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Any, Final, Literal as L, TypeVar + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as hermetrim + +__all__ = [ + "hermezero", + "hermeone", + "hermex", + "hermedomain", + "hermeline", + "hermeadd", + "hermesub", + "hermemulx", + "hermemul", + "hermediv", + "hermepow", + "hermeval", + "hermeder", + "hermeint", + "herme2poly", + "poly2herme", + "hermefromroots", + "hermevander", + "hermefit", + "hermetrim", + "hermeroots", + "HermiteE", + "hermeval2d", + "hermeval3d", + "hermegrid2d", + "hermegrid3d", + "hermevander2d", + "hermevander3d", + "hermecompanion", + "hermegauss", + "hermeweight", +] + +poly2herme: _FuncPoly2Ortho[L["poly2herme"]] +herme2poly: _FuncUnOp[L["herme2poly"]] + +hermedomain: Final[_Array2[np.float64]] +hermezero: Final[_Array1[np.int_]] +hermeone: Final[_Array1[np.int_]] +hermex: Final[_Array2[np.int_]] + +hermeline: _FuncLine[L["hermeline"]] +hermefromroots: _FuncFromRoots[L["hermefromroots"]] +hermeadd: _FuncBinOp[L["hermeadd"]] +hermesub: _FuncBinOp[L["hermesub"]] +hermemulx: _FuncUnOp[L["hermemulx"]] +hermemul: _FuncBinOp[L["hermemul"]] +hermediv: _FuncBinOp[L["hermediv"]] +hermepow: _FuncPow[L["hermepow"]] +hermeder: _FuncDer[L["hermeder"]] +hermeint: _FuncInteg[L["hermeint"]] +hermeval: _FuncVal[L["hermeval"]] +hermeval2d: _FuncVal2D[L["hermeval2d"]] +hermeval3d: _FuncVal3D[L["hermeval3d"]] +hermevalfromroots: _FuncValFromRoots[L["hermevalfromroots"]] +hermegrid2d: _FuncVal2D[L["hermegrid2d"]] +hermegrid3d: _FuncVal3D[L["hermegrid3d"]] +hermevander: _FuncVander[L["hermevander"]] +hermevander2d: _FuncVander2D[L["hermevander2d"]] +hermevander3d: _FuncVander3D[L["hermevander3d"]] +hermefit: _FuncFit[L["hermefit"]] +hermecompanion: _FuncCompanion[L["hermecompanion"]] +hermeroots: _FuncRoots[L["hermeroots"]] + +_ND = TypeVar("_ND", bound=Any) +def _normed_hermite_e_n( + x: np.ndarray[_ND, np.dtype[np.float64]], + n: int | np.intp, +) -> np.ndarray[_ND, np.dtype[np.float64]]: ... + +hermegauss: _FuncGauss[L["hermegauss"]] +hermeweight: _FuncWeight[L["hermeweight"]] + +class HermiteE(ABCPolyBase[L["He"]]): ... diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 11e2ac7229ca..87f3ffa6ffd7 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -126,6 +126,7 @@ def poly2lag(pol): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import poly2lag >>> poly2lag(np.arange(4)) array([ 23., -63., 58., -18.]) @@ -1062,7 +1063,7 @@ def lagval3d(x, y, z, c): >>> c = [[[1, 2], [3, 4]], [[5, 6], [7, 8]]] >>> lagval3d(1, 1, 2, c) -1.0 - + """ return pu._valnd(lagval, c, x, y, z) @@ -1128,7 +1129,7 @@ def laggrid3d(x, y, z, c): [ -2., -18.]], [[ -2., -14.], [ -1., -5.]]]) - + """ return pu._gridnd(lagval, c, x, y, z) @@ -1169,6 +1170,7 @@ def lagvander(x, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander >>> x = np.array([0, 1, 2]) >>> lagvander(x, 3) @@ -1244,12 +1246,13 @@ def lagvander2d(x, y, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander2d >>> x = np.array([0]) >>> y = np.array([2]) >>> lagvander2d(x, y, [2, 1]) array([[ 1., -1., 1., -1., 1., -1.]]) - + """ return pu._vander_nd_flat((lagvander, lagvander), (x, y), deg) @@ -1306,6 +1309,7 @@ def lagvander3d(x, y, z, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagvander3d >>> x = np.array([0]) >>> y = np.array([2]) @@ -1439,12 +1443,14 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.laguerre import lagfit, lagval >>> x = np.linspace(0, 10) - >>> err = np.random.randn(len(x))/10 + >>> rng = np.random.default_rng() + >>> err = rng.normal(scale=1./10, size=len(x)) >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) - array([ 0.96971004, 2.00193749, 3.00288744]) # may vary + array([1.00578369, 1.99417356, 2.99827656]) # may vary """ return pu._fit(lagvander, x, y, deg, rcond, full, w) @@ -1480,7 +1486,7 @@ def lagcompanion(c): >>> lagcompanion([1, 2, 3]) array([[ 1. , -0.33333333], [-1. , 4.33333333]]) - + """ # c is a trimmed copy [c] = pu.as_series([c]) @@ -1687,9 +1693,9 @@ class Laguerre(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [0, 1]. + The default value is [0., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [0, 1]. + Window, see `domain` for its use. The default value is [0., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/laguerre.pyi b/numpy/polynomial/laguerre.pyi index 541d3911832f..ee8115795748 100644 --- a/numpy/polynomial/laguerre.pyi +++ b/numpy/polynomial/laguerre.pyi @@ -1,47 +1,100 @@ -from typing import Any - -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef - -__all__: list[str] - -lagtrim = trimcoef - -def poly2lag(pol): ... -def lag2poly(c): ... - -lagdomain: NDArray[int_] -lagzero: NDArray[int_] -lagone: NDArray[int_] -lagx: NDArray[int_] - -def lagline(off, scl): ... -def lagfromroots(roots): ... -def lagadd(c1, c2): ... -def lagsub(c1, c2): ... -def lagmulx(c): ... -def lagmul(c1, c2): ... -def lagdiv(c1, c2): ... -def lagpow(c, pow, maxpower=...): ... -def lagder(c, m=..., scl=..., axis=...): ... -def lagint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def lagval(x, c, tensor=...): ... -def lagval2d(x, y, c): ... -def laggrid2d(x, y, c): ... -def lagval3d(x, y, z, c): ... -def laggrid3d(x, y, z, c): ... -def lagvander(x, deg): ... -def lagvander2d(x, y, deg): ... -def lagvander3d(x, y, z, deg): ... -def lagfit(x, y, deg, rcond=..., full=..., w=...): ... -def lagcompanion(c): ... -def lagroots(c): ... -def laggauss(deg): ... -def lagweight(x): ... - -class Laguerre(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +from typing import Final, Literal as L + +import numpy as np + +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as lagtrim + +__all__ = [ + "lagzero", + "lagone", + "lagx", + "lagdomain", + "lagline", + "lagadd", + "lagsub", + "lagmulx", + "lagmul", + "lagdiv", + "lagpow", + "lagval", + "lagder", + "lagint", + "lag2poly", + "poly2lag", + "lagfromroots", + "lagvander", + "lagfit", + "lagtrim", + "lagroots", + "Laguerre", + "lagval2d", + "lagval3d", + "laggrid2d", + "laggrid3d", + "lagvander2d", + "lagvander3d", + "lagcompanion", + "laggauss", + "lagweight", +] + +poly2lag: _FuncPoly2Ortho[L["poly2lag"]] +lag2poly: _FuncUnOp[L["lag2poly"]] + +lagdomain: Final[_Array2[np.float64]] +lagzero: Final[_Array1[np.int_]] +lagone: Final[_Array1[np.int_]] +lagx: Final[_Array2[np.int_]] + +lagline: _FuncLine[L["lagline"]] +lagfromroots: _FuncFromRoots[L["lagfromroots"]] +lagadd: _FuncBinOp[L["lagadd"]] +lagsub: _FuncBinOp[L["lagsub"]] +lagmulx: _FuncUnOp[L["lagmulx"]] +lagmul: _FuncBinOp[L["lagmul"]] +lagdiv: _FuncBinOp[L["lagdiv"]] +lagpow: _FuncPow[L["lagpow"]] +lagder: _FuncDer[L["lagder"]] +lagint: _FuncInteg[L["lagint"]] +lagval: _FuncVal[L["lagval"]] +lagval2d: _FuncVal2D[L["lagval2d"]] +lagval3d: _FuncVal3D[L["lagval3d"]] +lagvalfromroots: _FuncValFromRoots[L["lagvalfromroots"]] +laggrid2d: _FuncVal2D[L["laggrid2d"]] +laggrid3d: _FuncVal3D[L["laggrid3d"]] +lagvander: _FuncVander[L["lagvander"]] +lagvander2d: _FuncVander2D[L["lagvander2d"]] +lagvander3d: _FuncVander3D[L["lagvander3d"]] +lagfit: _FuncFit[L["lagfit"]] +lagcompanion: _FuncCompanion[L["lagcompanion"]] +lagroots: _FuncRoots[L["lagroots"]] +laggauss: _FuncGauss[L["laggauss"]] +lagweight: _FuncWeight[L["lagweight"]] + + +class Laguerre(ABCPolyBase[L["L"]]): ... diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index cfbf1486d486..674b7f1bb82b 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -128,6 +128,7 @@ def poly2leg(pol): Examples -------- + >>> import numpy as np >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p @@ -426,7 +427,7 @@ def legmulx(c): See Also -------- - legadd, legmul, legdiv, legpow + legadd, legsub, legmul, legdiv, legpow Notes ----- @@ -1632,9 +1633,9 @@ class Legendre(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/legendre.pyi b/numpy/polynomial/legendre.pyi index 97c6478f80f8..d81f3e6f54a4 100644 --- a/numpy/polynomial/legendre.pyi +++ b/numpy/polynomial/legendre.pyi @@ -1,47 +1,99 @@ -from typing import Any +from typing import Final, Literal as L -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef +import numpy as np -__all__: list[str] +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncGauss, + _FuncInteg, + _FuncLine, + _FuncPoly2Ortho, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVal2D, + _FuncVal3D, + _FuncValFromRoots, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncWeight, +) +from .polyutils import trimcoef as legtrim -legtrim = trimcoef +__all__ = [ + "legzero", + "legone", + "legx", + "legdomain", + "legline", + "legadd", + "legsub", + "legmulx", + "legmul", + "legdiv", + "legpow", + "legval", + "legder", + "legint", + "leg2poly", + "poly2leg", + "legfromroots", + "legvander", + "legfit", + "legtrim", + "legroots", + "Legendre", + "legval2d", + "legval3d", + "leggrid2d", + "leggrid3d", + "legvander2d", + "legvander3d", + "legcompanion", + "leggauss", + "legweight", +] -def poly2leg(pol): ... -def leg2poly(c): ... +poly2leg: _FuncPoly2Ortho[L["poly2leg"]] +leg2poly: _FuncUnOp[L["leg2poly"]] -legdomain: NDArray[int_] -legzero: NDArray[int_] -legone: NDArray[int_] -legx: NDArray[int_] +legdomain: Final[_Array2[np.float64]] +legzero: Final[_Array1[np.int_]] +legone: Final[_Array1[np.int_]] +legx: Final[_Array2[np.int_]] -def legline(off, scl): ... -def legfromroots(roots): ... -def legadd(c1, c2): ... -def legsub(c1, c2): ... -def legmulx(c): ... -def legmul(c1, c2): ... -def legdiv(c1, c2): ... -def legpow(c, pow, maxpower=...): ... -def legder(c, m=..., scl=..., axis=...): ... -def legint(c, m=..., k = ..., lbnd=..., scl=..., axis=...): ... -def legval(x, c, tensor=...): ... -def legval2d(x, y, c): ... -def leggrid2d(x, y, c): ... -def legval3d(x, y, z, c): ... -def leggrid3d(x, y, z, c): ... -def legvander(x, deg): ... -def legvander2d(x, y, deg): ... -def legvander3d(x, y, z, deg): ... -def legfit(x, y, deg, rcond=..., full=..., w=...): ... -def legcompanion(c): ... -def legroots(c): ... -def leggauss(deg): ... -def legweight(x): ... +legline: _FuncLine[L["legline"]] +legfromroots: _FuncFromRoots[L["legfromroots"]] +legadd: _FuncBinOp[L["legadd"]] +legsub: _FuncBinOp[L["legsub"]] +legmulx: _FuncUnOp[L["legmulx"]] +legmul: _FuncBinOp[L["legmul"]] +legdiv: _FuncBinOp[L["legdiv"]] +legpow: _FuncPow[L["legpow"]] +legder: _FuncDer[L["legder"]] +legint: _FuncInteg[L["legint"]] +legval: _FuncVal[L["legval"]] +legval2d: _FuncVal2D[L["legval2d"]] +legval3d: _FuncVal3D[L["legval3d"]] +legvalfromroots: _FuncValFromRoots[L["legvalfromroots"]] +leggrid2d: _FuncVal2D[L["leggrid2d"]] +leggrid3d: _FuncVal3D[L["leggrid3d"]] +legvander: _FuncVander[L["legvander"]] +legvander2d: _FuncVander2D[L["legvander2d"]] +legvander3d: _FuncVander3D[L["legvander3d"]] +legfit: _FuncFit[L["legfit"]] +legcompanion: _FuncCompanion[L["legcompanion"]] +legroots: _FuncRoots[L["legroots"]] +leggauss: _FuncGauss[L["leggauss"]] +legweight: _FuncWeight[L["legweight"]] -class Legendre(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +class Legendre(ABCPolyBase[L["P"]]): ... diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 2241c49235a4..12ab1ba34f47 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -729,6 +729,7 @@ def polyval(x, c, tensor=True): Examples -------- + >>> import numpy as np >>> from numpy.polynomial.polynomial import polyval >>> polyval(1, [1,2,3]) 6.0 @@ -1207,6 +1208,8 @@ def polyvander2d(x, y, deg): Examples -------- + >>> import numpy as np + The 2-D pseudo-Vandermonde matrix of degree ``[1, 2]`` and sample points ``x = [-1, 2]`` and ``y = [1, 3]`` is as follows: @@ -1290,6 +1293,7 @@ def polyvander3d(x, y, z, deg): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polynomial as P >>> x = np.asarray([-1, 2, 1]) >>> y = np.asarray([1, -2, -3]) @@ -1441,27 +1445,32 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- - >>> np.random.seed(123) + >>> import numpy as np >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] - >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + Gaussian noise + >>> rng = np.random.default_rng() + >>> err = rng.normal(size=len(x)) + >>> y = x**3 - x + err # x^3 - x + Gaussian noise >>> c, stats = P.polyfit(x,y,3,full=True) - >>> np.random.seed(123) - >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 - array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary + >>> c # c[0], c[1] approx. -1, c[2] should be approx. 0, c[3] approx. 1 + array([ 0.23111996, -1.02785049, -0.2241444 , 1.08405657]) # may vary >>> stats # note the large SSR, explaining the rather poor results - [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary - 0.28853036]), 1.1324274851176597e-014] + [array([48.312088]), # may vary + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] Same thing without the added noise >>> y = x**3 - x >>> c, stats = P.polyfit(x,y,3,full=True) - >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 - array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) + >>> c # c[0], c[1] ~= -1, c[2] should be "very close to 0", c[3] ~= 1 + array([-6.73496154e-17, -1.00000000e+00, 0.00000000e+00, 1.00000000e+00]) >>> stats # note the minuscule SSR - [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary - 0.50443316, 0.28853036]), 1.1324274851176597e-014] + [array([8.79579319e-31]), + 4, + array([1.38446749, 1.32119158, 0.50443316, 0.28853036]), + 1.1324274851176597e-14] """ return pu._fit(polyvander, x, y, deg, rcond, full, w) @@ -1597,9 +1606,9 @@ class Polynomial(ABCPolyBase): domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. - The default value is [-1, 1]. + The default value is [-1., 1.]. window : (2,) array_like, optional - Window, see `domain` for its use. The default value is [-1, 1]. + Window, see `domain` for its use. The default value is [-1., 1.]. .. versionadded:: 1.6.0 symbol : str, optional diff --git a/numpy/polynomial/polynomial.pyi b/numpy/polynomial/polynomial.pyi index f8b62e529f23..89a8b57185f3 100644 --- a/numpy/polynomial/polynomial.pyi +++ b/numpy/polynomial/polynomial.pyi @@ -1,42 +1,87 @@ -from typing import Any +from typing import Final, Literal as L -from numpy import int_ -from numpy.typing import NDArray -from numpy.polynomial._polybase import ABCPolyBase -from numpy.polynomial.polyutils import trimcoef +import numpy as np +from ._polybase import ABCPolyBase +from ._polytypes import ( + _Array1, + _Array2, + _FuncVal2D, + _FuncVal3D, + _FuncBinOp, + _FuncCompanion, + _FuncDer, + _FuncFit, + _FuncFromRoots, + _FuncInteg, + _FuncLine, + _FuncPow, + _FuncRoots, + _FuncUnOp, + _FuncVal, + _FuncVander, + _FuncVander2D, + _FuncVander3D, + _FuncValFromRoots, +) +from .polyutils import trimcoef as polytrim -__all__: list[str] +__all__ = [ + "polyzero", + "polyone", + "polyx", + "polydomain", + "polyline", + "polyadd", + "polysub", + "polymulx", + "polymul", + "polydiv", + "polypow", + "polyval", + "polyvalfromroots", + "polyder", + "polyint", + "polyfromroots", + "polyvander", + "polyfit", + "polytrim", + "polyroots", + "Polynomial", + "polyval2d", + "polyval3d", + "polygrid2d", + "polygrid3d", + "polyvander2d", + "polyvander3d", + "polycompanion", +] -polytrim = trimcoef +polydomain: Final[_Array2[np.float64]] +polyzero: Final[_Array1[np.int_]] +polyone: Final[_Array1[np.int_]] +polyx: Final[_Array2[np.int_]] -polydomain: NDArray[int_] -polyzero: NDArray[int_] -polyone: NDArray[int_] -polyx: NDArray[int_] +polyline: _FuncLine[L["Polyline"]] +polyfromroots: _FuncFromRoots[L["polyfromroots"]] +polyadd: _FuncBinOp[L["polyadd"]] +polysub: _FuncBinOp[L["polysub"]] +polymulx: _FuncUnOp[L["polymulx"]] +polymul: _FuncBinOp[L["polymul"]] +polydiv: _FuncBinOp[L["polydiv"]] +polypow: _FuncPow[L["polypow"]] +polyder: _FuncDer[L["polyder"]] +polyint: _FuncInteg[L["polyint"]] +polyval: _FuncVal[L["polyval"]] +polyval2d: _FuncVal2D[L["polyval2d"]] +polyval3d: _FuncVal3D[L["polyval3d"]] +polyvalfromroots: _FuncValFromRoots[L["polyvalfromroots"]] +polygrid2d: _FuncVal2D[L["polygrid2d"]] +polygrid3d: _FuncVal3D[L["polygrid3d"]] +polyvander: _FuncVander[L["polyvander"]] +polyvander2d: _FuncVander2D[L["polyvander2d"]] +polyvander3d: _FuncVander3D[L["polyvander3d"]] +polyfit: _FuncFit[L["polyfit"]] +polycompanion: _FuncCompanion[L["polycompanion"]] +polyroots: _FuncRoots[L["polyroots"]] -def polyline(off, scl): ... -def polyfromroots(roots): ... -def polyadd(c1, c2): ... -def polysub(c1, c2): ... -def polymulx(c): ... -def polymul(c1, c2): ... -def polydiv(c1, c2): ... -def polypow(c, pow, maxpower=...): ... -def polyder(c, m=..., scl=..., axis=...): ... -def polyint(c, m=..., k=..., lbnd=..., scl=..., axis=...): ... -def polyval(x, c, tensor=...): ... -def polyvalfromroots(x, r, tensor=...): ... -def polyval2d(x, y, c): ... -def polygrid2d(x, y, c): ... -def polyval3d(x, y, z, c): ... -def polygrid3d(x, y, z, c): ... -def polyvander(x, deg): ... -def polyvander2d(x, y, deg): ... -def polyvander3d(x, y, z, deg): ... -def polyfit(x, y, deg, rcond=..., full=..., w=...): ... -def polyroots(c): ... - -class Polynomial(ABCPolyBase): - domain: Any - window: Any - basis_name: Any +class Polynomial(ABCPolyBase[None]): ... diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index 54ffe5937e8c..b3987d0c623b 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -95,6 +95,7 @@ def as_series(alist, trim=True): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> a = np.arange(4) >>> pu.as_series(a) @@ -218,6 +219,7 @@ def getdomain(x): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> points = np.arange(4)**2 - 5; points array([-5, -4, -1, 4]) @@ -323,6 +325,7 @@ def mapdomain(x, old, new): Examples -------- + >>> import numpy as np >>> from numpy.polynomial import polyutils as pu >>> old_domain = (-1,1) >>> new_domain = (0,2*np.pi) @@ -346,7 +349,8 @@ def mapdomain(x, old, new): array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary """ - x = np.asanyarray(x) + if type(x) not in (int, float, complex) and not isinstance(x, np.generic): + x = np.asanyarray(x) off, scl = mapparms(old, new) return off + scl*x @@ -479,7 +483,7 @@ def _valnd(val_f, c, *args): """ args = [np.asanyarray(a) for a in args] shape0 = args[0].shape - if not all((a.shape == shape0 for a in args[1:])): + if not all(a.shape == shape0 for a in args[1:]): if len(args) == 3: raise ValueError('x, y, z are incompatible') elif len(args) == 2: diff --git a/numpy/polynomial/polyutils.pyi b/numpy/polynomial/polyutils.pyi index 0eccd6cdc2a4..9299b23975b1 100644 --- a/numpy/polynomial/polyutils.pyi +++ b/numpy/polynomial/polyutils.pyi @@ -1,9 +1,431 @@ -__all__: list[str] - -def trimseq(seq): ... -def as_series(alist, trim=...): ... -def trimcoef(c, tol=...): ... -def getdomain(x): ... -def mapparms(old, new): ... -def mapdomain(x, old, new): ... -def format_float(x, parens=...): ... +from collections.abc import Callable, Iterable, Sequence +from typing import ( + Any, + Final, + Literal, + SupportsIndex, + TypeAlias, + TypeVar, + overload, +) + +import numpy as np +import numpy.typing as npt +from numpy._typing import ( + _FloatLike_co, + _NumberLike_co, + + _ArrayLikeFloat_co, + _ArrayLikeComplex_co, +) + +from ._polytypes import ( + _AnyInt, + _CoefLike_co, + + _Array2, + _Tuple2, + + _FloatSeries, + _CoefSeries, + _ComplexSeries, + _ObjectSeries, + + _ComplexArray, + _FloatArray, + _CoefArray, + _ObjectArray, + + _SeriesLikeInt_co, + _SeriesLikeFloat_co, + _SeriesLikeComplex_co, + _SeriesLikeCoef_co, + + _ArrayLikeCoef_co, + + _FuncBinOp, + _FuncValND, + _FuncVanderND, +) + +__all__: Final[Sequence[str]] = [ + "as_series", + "format_float", + "getdomain", + "mapdomain", + "mapparms", + "trimcoef", + "trimseq", +] + +_AnyLineF: TypeAlias = Callable[ + [_CoefLike_co, _CoefLike_co], + _CoefArray, +] +_AnyMulF: TypeAlias = Callable[ + [npt.ArrayLike, npt.ArrayLike], + _CoefArray, +] +_AnyVanderF: TypeAlias = Callable[ + [npt.ArrayLike, SupportsIndex], + _CoefArray, +] + +@overload +def as_series( + alist: npt.NDArray[np.integer[Any]] | _FloatArray, + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: _ComplexArray, + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: _ObjectArray, + trim: bool = ..., +) -> list[_ObjectSeries]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_FloatArray | npt.NDArray[np.integer[Any]]], + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: Iterable[_ComplexArray], + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: Iterable[_ObjectArray], + trim: bool = ..., +) -> list[_ObjectSeries]: ... +@overload +def as_series( # type: ignore[overload-overlap] + alist: Iterable[_SeriesLikeFloat_co | float], + trim: bool = ..., +) -> list[_FloatSeries]: ... +@overload +def as_series( + alist: Iterable[_SeriesLikeComplex_co | complex], + trim: bool = ..., +) -> list[_ComplexSeries]: ... +@overload +def as_series( + alist: Iterable[_SeriesLikeCoef_co | object], + trim: bool = ..., +) -> list[_ObjectSeries]: ... + +_T_seq = TypeVar("_T_seq", bound=_CoefArray | Sequence[_CoefLike_co]) +def trimseq(seq: _T_seq) -> _T_seq: ... + +@overload +def trimcoef( # type: ignore[overload-overlap] + c: npt.NDArray[np.integer[Any]] | _FloatArray, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... +@overload +def trimcoef( + c: _ComplexArray, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... +@overload +def trimcoef( + c: _ObjectArray, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... +@overload +def trimcoef( # type: ignore[overload-overlap] + c: _SeriesLikeFloat_co | float, + tol: _FloatLike_co = ..., +) -> _FloatSeries: ... +@overload +def trimcoef( + c: _SeriesLikeComplex_co | complex, + tol: _FloatLike_co = ..., +) -> _ComplexSeries: ... +@overload +def trimcoef( + c: _SeriesLikeCoef_co | object, + tol: _FloatLike_co = ..., +) -> _ObjectSeries: ... + +@overload +def getdomain( # type: ignore[overload-overlap] + x: _FloatArray | npt.NDArray[np.integer[Any]], +) -> _Array2[np.float64]: ... +@overload +def getdomain( + x: _ComplexArray, +) -> _Array2[np.complex128]: ... +@overload +def getdomain( + x: _ObjectArray, +) -> _Array2[np.object_]: ... +@overload +def getdomain( # type: ignore[overload-overlap] + x: _SeriesLikeFloat_co | float, +) -> _Array2[np.float64]: ... +@overload +def getdomain( + x: _SeriesLikeComplex_co | complex, +) -> _Array2[np.complex128]: ... +@overload +def getdomain( + x: _SeriesLikeCoef_co | object, +) -> _Array2[np.object_]: ... + +@overload +def mapparms( # type: ignore[overload-overlap] + old: npt.NDArray[np.floating[Any] | np.integer[Any]], + new: npt.NDArray[np.floating[Any] | np.integer[Any]], +) -> _Tuple2[np.floating[Any]]: ... +@overload +def mapparms( + old: npt.NDArray[np.number[Any]], + new: npt.NDArray[np.number[Any]], +) -> _Tuple2[np.complexfloating[Any, Any]]: ... +@overload +def mapparms( + old: npt.NDArray[np.object_ | np.number[Any]], + new: npt.NDArray[np.object_ | np.number[Any]], +) -> _Tuple2[object]: ... +@overload +def mapparms( # type: ignore[overload-overlap] + old: Sequence[float], + new: Sequence[float], +) -> _Tuple2[float]: ... +@overload +def mapparms( + old: Sequence[complex], + new: Sequence[complex], +) -> _Tuple2[complex]: ... +@overload +def mapparms( + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _Tuple2[np.floating[Any]]: ... +@overload +def mapparms( + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _Tuple2[np.complexfloating[Any, Any]]: ... +@overload +def mapparms( + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _Tuple2[object]: ... + +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _FloatLike_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> np.floating[Any]: ... +@overload +def mapdomain( + x: _NumberLike_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> np.complexfloating[Any, Any]: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: npt.NDArray[np.floating[Any] | np.integer[Any]], + old: npt.NDArray[np.floating[Any] | np.integer[Any]], + new: npt.NDArray[np.floating[Any] | np.integer[Any]], +) -> _FloatSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.number[Any]], + old: npt.NDArray[np.number[Any]], + new: npt.NDArray[np.number[Any]], +) -> _ComplexSeries: ... +@overload +def mapdomain( + x: npt.NDArray[np.object_ | np.number[Any]], + old: npt.NDArray[np.object_ | np.number[Any]], + new: npt.NDArray[np.object_ | np.number[Any]], +) -> _ObjectSeries: ... +@overload +def mapdomain( # type: ignore[overload-overlap] + x: _SeriesLikeFloat_co, + old: _SeriesLikeFloat_co, + new: _SeriesLikeFloat_co, +) -> _FloatSeries: ... +@overload +def mapdomain( + x: _SeriesLikeComplex_co, + old: _SeriesLikeComplex_co, + new: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... +@overload +def mapdomain( + x: _SeriesLikeCoef_co, + old:_SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... +@overload +def mapdomain( + x: _CoefLike_co, + old: _SeriesLikeCoef_co, + new: _SeriesLikeCoef_co, +) -> object: ... + +def _nth_slice( + i: SupportsIndex, + ndim: SupportsIndex, +) -> tuple[None | slice, ...]: ... + +_vander_nd: _FuncVanderND[Literal["_vander_nd"]] +_vander_nd_flat: _FuncVanderND[Literal["_vander_nd_flat"]] + +# keep in sync with `._polytypes._FuncFromRoots` +@overload +def _fromroots( # type: ignore[overload-overlap] + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeFloat_co, +) -> _FloatSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeComplex_co, +) -> _ComplexSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeCoef_co, +) -> _ObjectSeries: ... +@overload +def _fromroots( + line_f: _AnyLineF, + mul_f: _AnyMulF, + roots: _SeriesLikeCoef_co, +) -> _CoefSeries: ... + +_valnd: _FuncValND[Literal["_valnd"]] +_gridnd: _FuncValND[Literal["_gridnd"]] + +# keep in sync with `_polytypes._FuncBinOp` +@overload +def _div( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c1: _SeriesLikeFloat_co, + c2: _SeriesLikeFloat_co, +) -> _Tuple2[_FloatSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeComplex_co, + c2: _SeriesLikeComplex_co, +) -> _Tuple2[_ComplexSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_ObjectSeries]: ... +@overload +def _div( + mul_f: _AnyMulF, + c1: _SeriesLikeCoef_co, + c2: _SeriesLikeCoef_co, +) -> _Tuple2[_CoefSeries]: ... + +_add: Final[_FuncBinOp] +_sub: Final[_FuncBinOp] + +# keep in sync with `_polytypes._FuncPow` +@overload +def _pow( # type: ignore[overload-overlap] + mul_f: _AnyMulF, + c: _SeriesLikeFloat_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _FloatSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeComplex_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _ComplexSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _ObjectSeries: ... +@overload +def _pow( + mul_f: _AnyMulF, + c: _SeriesLikeCoef_co, + pow: _AnyInt, + maxpower: None | _AnyInt = ..., +) -> _CoefSeries: ... + +# keep in sync with `_polytypes._FuncFit` +@overload +def _fit( # type: ignore[overload-overlap] + vander_f: _AnyVanderF, + x: _SeriesLikeFloat_co, + y: _ArrayLikeFloat_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeFloat_co = ..., + rcond: None | _FloatLike_co = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeFloat_co = ..., +) -> _FloatArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeComplex_co, + y: _ArrayLikeComplex_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeComplex_co = ..., + rcond: None | _FloatLike_co = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeComplex_co = ..., +) -> _ComplexArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _ArrayLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: None | _FloatLike_co = ..., + full: Literal[False] = ..., + w: None | _SeriesLikeCoef_co = ..., +) -> _CoefArray: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co, + rcond: None | _FloatLike_co , + full: Literal[True], + /, + w: None | _SeriesLikeCoef_co = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact[Any] | np.int32]]: ... +@overload +def _fit( + vander_f: _AnyVanderF, + x: _SeriesLikeCoef_co, + y: _SeriesLikeCoef_co, + deg: _SeriesLikeInt_co, + domain: None | _SeriesLikeCoef_co = ..., + rcond: None | _FloatLike_co = ..., + *, + full: Literal[True], + w: None | _SeriesLikeCoef_co = ..., +) -> tuple[_CoefSeries, Sequence[np.inexact[Any] | np.int32]]: ... + +def _as_int(x: SupportsIndex, desc: str) -> int: ... +def format_float(x: _FloatLike_co, parens: bool = ...) -> str: ... diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index b761668a3b82..162cb0a9bea0 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -5,11 +5,12 @@ from fractions import Fraction import numpy as np import numpy.polynomial.polynomial as poly +import numpy.polynomial.polyutils as pu import pickle from copy import deepcopy from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, - assert_array_equal, assert_raises_regex) + assert_array_equal, assert_raises_regex, assert_warns) def trim(x): @@ -627,3 +628,20 @@ def test_polyline(self): def test_polyline_zero(self): assert_equal(poly.polyline(3, 0), [3]) + + def test_fit_degenerate_domain(self): + p = poly.Polynomial.fit([1], [2], deg=0) + assert_equal(p.coef, [2.]) + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=0) + assert_almost_equal(p.coef, [2.05]) + with assert_warns(pu.RankWarning): + p = poly.Polynomial.fit([1, 1], [2, 2.1], deg=1) + + def test_result_type(self): + w = np.array([-1, 1], dtype=np.float32) + p = np.polynomial.Polynomial(w, domain=w, window=w) + v = p(2) + assert_equal(v.dtype, np.float32) + + arr = np.polydiv(1, np.float32(1)) + assert_equal(arr[0].dtype, np.float64) diff --git a/numpy/random/_bounded_integers.pxd.in b/numpy/random/_bounded_integers.pxd.in index 5ae5a806715c..bdcb32a7e212 100644 --- a/numpy/random/_bounded_integers.pxd.in +++ b/numpy/random/_bounded_integers.pxd.in @@ -6,7 +6,7 @@ ctypedef np.npy_bool bool_t from numpy.random cimport bitgen_t -cdef inline uint64_t _gen_mask(uint64_t max_val) nogil: +cdef inline uint64_t _gen_mask(uint64_t max_val) noexcept nogil: """Mask generator for use in bounded random numbers""" # Smallest bit mask >= max cdef uint64_t mask = max_val diff --git a/numpy/random/_examples/cython/meson.build b/numpy/random/_examples/cython/meson.build index 1ad754c53691..7aa367d13787 100644 --- a/numpy/random/_examples/cython/meson.build +++ b/numpy/random/_examples/cython/meson.build @@ -11,6 +11,11 @@ if not cy.version().version_compare('>=3.0.6') error('tests requires Cython >= 3.0.6') endif +base_cython_args = [] +if cy.version().version_compare('>=3.1.0') + base_cython_args += ['-Xfreethreading_compatible=True'] +endif + _numpy_abs = run_command(py3, ['-c', 'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include() + "../../.."))'], check: true).stdout().strip() @@ -27,6 +32,7 @@ py3.extension_module( install: false, include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, ) py3.extension_module( 'extending', @@ -34,13 +40,14 @@ py3.extension_module( install: false, include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], + cython_args: base_cython_args, ) py3.extension_module( 'extending_cpp', 'extending_distributions.pyx', install: false, override_options : ['cython_language=cpp'], - cython_args: ['--module-name', 'extending_cpp'], + cython_args: base_cython_args + ['--module-name', 'extending_cpp'], include_directories: [npy_include_path], dependencies: [npyrandom_lib, npymath_lib], ) diff --git a/numpy/random/_generator.pyi b/numpy/random/_generator.pyi index 5dc2ebf6c1ef..16a0e5e0ff8d 100644 --- a/numpy/random/_generator.pyi +++ b/numpy/random/_generator.pyi @@ -68,9 +68,12 @@ class Generator: def __init__(self, bit_generator: BitGenerator) -> None: ... def __repr__(self) -> str: ... def __str__(self) -> str: ... - def __getstate__(self) -> dict[str, Any]: ... - def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[str], Generator], tuple[str], dict[str, Any]]: ... + def __getstate__(self) -> None: ... + def __setstate__(self, state: dict[str, Any] | None) -> None: ... + def __reduce__(self) -> tuple[ + Callable[[BitGenerator], Generator], + tuple[BitGenerator], + None]: ... @property def bit_generator(self) -> BitGenerator: ... def spawn(self, n_children: int) -> list[Generator]: ... @@ -210,6 +213,7 @@ class Generator: self, low: int, high: None | int = ..., + size: None = ..., ) -> int: ... @overload def integers( # type: ignore[misc] @@ -221,6 +225,15 @@ class Generator: endpoint: bool = ..., ) -> bool: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: type[np.bool] = ..., + endpoint: bool = ..., + ) -> np.bool: ... + @overload def integers( # type: ignore[misc] self, low: int, @@ -230,6 +243,96 @@ class Generator: endpoint: bool = ..., ) -> int: ... @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + endpoint: bool = ..., + ) -> uint8: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + endpoint: bool = ..., + ) -> uint16: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + endpoint: bool = ..., + ) -> uint32: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + endpoint: bool = ..., + ) -> uint: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + endpoint: bool = ..., + ) -> uint64: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + endpoint: bool = ..., + ) -> int8: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + endpoint: bool = ..., + ) -> int16: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + endpoint: bool = ..., + ) -> int32: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int_] | type[int] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + endpoint: bool = ..., + ) -> int_: ... + @overload + def integers( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + endpoint: bool = ..., + ) -> int64: ... + @overload def integers( # type: ignore[misc] self, low: _ArrayLikeInt_co, diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx index ab8a15555ae3..221ac817b783 100644 --- a/numpy/random/_generator.pyx +++ b/numpy/random/_generator.pyx @@ -146,7 +146,7 @@ cdef class Generator: Container for the BitGenerators. - ``Generator`` exposes a number of methods for generating random + `Generator` exposes a number of methods for generating random numbers drawn from a variety of probability distributions. In addition to the distribution-specific arguments, each method takes a keyword argument `size` that defaults to ``None``. If `size` is ``None``, then a single @@ -159,7 +159,7 @@ cdef class Generator: **No Compatibility Guarantee** - ``Generator`` does not provide a version compatibility guarantee. In + `Generator` does not provide a version compatibility guarantee. In particular, as better algorithms evolve the bit stream may change. Parameters @@ -169,10 +169,11 @@ cdef class Generator: Notes ----- - The Python stdlib module `random` contains pseudo-random number generator - with a number of methods that are similar to the ones available in - ``Generator``. It uses Mersenne Twister, and this bit generator can - be accessed using ``MT19937``. ``Generator``, besides being + The Python stdlib module :external+python:mod:`random` contains + pseudo-random number generator with a number of methods that are similar + to the ones available in `Generator`. + It uses Mersenne Twister, and this bit generator can + be accessed using `MT19937`. `Generator`, besides being NumPy-aware, has the advantage that it provides a much larger number of probability distributions to choose from. @@ -214,17 +215,19 @@ cdef class Generator: # Pickling support: def __getstate__(self): - return self.bit_generator.state + return None - def __setstate__(self, state): - self.bit_generator.state = state + def __setstate__(self, bit_gen): + if isinstance(bit_gen, dict): + # Legacy path + # Prior to 2.0.x only the state of the underlying bit generator + # was preserved and any seed sequence information was lost + self.bit_generator.state = bit_gen def __reduce__(self): - ctor, name_tpl, state = self._bit_generator.__reduce__() - from ._pickle import __generator_ctor - # Requirements of __generator_ctor are (name, ctor) - return __generator_ctor, (name_tpl[0], ctor), state + # Requirements of __generator_ctor are (bit_generator, ) + return __generator_ctor, (self._bit_generator, ), None @property def bit_generator(self): @@ -990,7 +993,7 @@ cdef class Generator: if a.ndim == 0: return idx - if not is_scalar and idx.ndim == 0: + if not is_scalar and idx.ndim == 0 and a.ndim == 1: # If size == () then the user requested a 0-d array as opposed to # a scalar object when size is None. However a[idx] is always a # scalar and not an array. So this makes sure the result is an @@ -1546,7 +1549,7 @@ cdef class Generator: and ``m = 20`` is: >>> import matplotlib.pyplot as plt - >>> from scipy import stats # doctest: +SKIP + >>> from scipy import stats >>> dfnum, dfden, size = 20, 20, 10000 >>> s = rng.f(dfnum=dfnum, dfden=dfden, size=size) >>> bins, density, _ = plt.hist(s, 30, density=True) @@ -5020,14 +5023,14 @@ def default_rng(seed=None): is instantiated. This function does not manage a default global instance. See :ref:`seeding_and_entropy` for more information about seeding. - + Examples -------- - ``default_rng`` is the recommended constructor for the random number class - ``Generator``. Here are several ways we can construct a random - number generator using ``default_rng`` and the ``Generator`` class. - - Here we use ``default_rng`` to generate a random float: + `default_rng` is the recommended constructor for the random number class + `Generator`. Here are several ways we can construct a random + number generator using `default_rng` and the `Generator` class. + + Here we use `default_rng` to generate a random float: >>> import numpy as np >>> rng = np.random.default_rng(12345) @@ -5039,7 +5042,7 @@ def default_rng(seed=None): >>> type(rfloat) - Here we use ``default_rng`` to generate 3 random integers between 0 + Here we use `default_rng` to generate 3 random integers between 0 (inclusive) and 10 (exclusive): >>> import numpy as np diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx index 1ebf43faa117..826cb8441ef1 100644 --- a/numpy/random/_mt19937.pyx +++ b/numpy/random/_mt19937.pyx @@ -67,9 +67,9 @@ cdef class MT19937(BitGenerator): Notes ----- - ``MT19937`` provides a capsule containing function pointers that produce + `MT19937` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers [1]_. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. The Python stdlib module "random" also contains a Mersenne Twister @@ -77,7 +77,7 @@ cdef class MT19937(BitGenerator): **State and Seeding** - The ``MT19937`` state vector consists of a 624-element array of + The `MT19937` state vector consists of a 624-element array of 32-bit unsigned integers plus a single integer value between 0 and 624 that indexes the current position within the main array. @@ -111,7 +111,7 @@ cdef class MT19937(BitGenerator): **Compatibility Guarantee** - ``MT19937`` makes a guarantee that a fixed seed will always produce + `MT19937` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx index 77e2090e72bf..250bf967bba2 100644 --- a/numpy/random/_pcg64.pyx +++ b/numpy/random/_pcg64.pyx @@ -73,9 +73,9 @@ cdef class PCG64(BitGenerator): The specific member of the PCG family that we use is PCG XSL RR 128/64 as described in the paper ([2]_). - ``PCG64`` provides a capsule containing function pointers that produce + `PCG64` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. Supports the method :meth:`advance` to advance the RNG an arbitrary number of @@ -84,7 +84,7 @@ cdef class PCG64(BitGenerator): **State and Seeding** - The ``PCG64`` state vector consists of 2 unsigned 128-bit values, + The `PCG64` state vector consists of 2 unsigned 128-bit values, which are represented externally as Python ints. One is the state of the PRNG, which is advanced by a linear congruential generator (LCG). The second is a fixed odd increment used in the LCG. @@ -104,7 +104,7 @@ cdef class PCG64(BitGenerator): **Compatibility Guarantee** - ``PCG64`` makes a guarantee that a fixed seed will always produce + `PCG64` makes a guarantee that a fixed seed will always produce the same random integer stream. References @@ -305,13 +305,13 @@ cdef class PCG64DXSM(BitGenerator): generator ([1]_, [2]_). PCG-64 DXSM has a period of :math:`2^{128}` and supports advancing an arbitrary number of steps as well as :math:`2^{127}` streams. The specific member of the PCG family that we use is PCG CM DXSM 128/64. It - differs from ``PCG64`` in that it uses the stronger DXSM output function, + differs from `PCG64` in that it uses the stronger DXSM output function, a 64-bit "cheap multiplier" in the LCG, and outputs from the state before advancing it rather than advance-then-output. - ``PCG64DXSM`` provides a capsule containing function pointers that produce + `PCG64DXSM` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. Supports the method :meth:`advance` to advance the RNG an arbitrary number of @@ -320,7 +320,7 @@ cdef class PCG64DXSM(BitGenerator): **State and Seeding** - The ``PCG64DXSM`` state vector consists of 2 unsigned 128-bit values, + The `PCG64DXSM` state vector consists of 2 unsigned 128-bit values, which are represented externally as Python ints. One is the state of the PRNG, which is advanced by a linear congruential generator (LCG). The second is a fixed odd increment used in the LCG. @@ -340,7 +340,7 @@ cdef class PCG64DXSM(BitGenerator): **Compatibility Guarantee** - ``PCG64DXSM`` makes a guarantee that a fixed seed will always produce + `PCG64DXSM` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx index d90da6a9b657..a046d9441fae 100644 --- a/numpy/random/_philox.pyx +++ b/numpy/random/_philox.pyx @@ -93,14 +93,14 @@ cdef class Philox(BitGenerator): the sequence in increments of :math:`2^{128}`. These features allow multiple non-overlapping sequences to be generated. - ``Philox`` provides a capsule containing function pointers that produce + `Philox` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. **State and Seeding** - The ``Philox`` state vector consists of a 256-bit value encoded as + The `Philox` state vector consists of a 256-bit value encoded as a 4-element uint64 array and a 128-bit value encoded as a 2-element uint64 array. The former is a counter which is incremented by 1 for every 4 64-bit randoms produced. The second is a key which determined the sequence @@ -122,10 +122,10 @@ cdef class Philox(BitGenerator): >>> sg = SeedSequence(1234) >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)] - ``Philox`` can be used in parallel applications by calling the ``jumped`` - method to advances the state as-if :math:`2^{128}` random numbers have - been generated. Alternatively, ``advance`` can be used to advance the - counter for any positive step in [0, 2**256). When using ``jumped``, all + `Philox` can be used in parallel applications by calling the :meth:`jumped` + method to advance the state as-if :math:`2^{128}` random numbers have + been generated. Alternatively, :meth:`advance` can be used to advance the + counter for any positive step in [0, 2**256). When using :meth:`jumped`, all generators should be chained to ensure that the segments come from the same sequence. @@ -136,7 +136,7 @@ cdef class Philox(BitGenerator): ... rg.append(Generator(bit_generator)) ... bit_generator = bit_generator.jumped() - Alternatively, ``Philox`` can be used in parallel applications by using + Alternatively, `Philox` can be used in parallel applications by using a sequence of distinct keys where each instance uses different key. >>> key = 2**96 + 2**33 + 2**17 + 2**9 @@ -144,7 +144,7 @@ cdef class Philox(BitGenerator): **Compatibility Guarantee** - ``Philox`` makes a guarantee that a fixed ``seed`` will always produce + `Philox` makes a guarantee that a fixed ``seed`` will always produce the same random integer stream. Examples diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py index 073993726eb3..842bd441a502 100644 --- a/numpy/random/_pickle.py +++ b/numpy/random/_pickle.py @@ -1,3 +1,4 @@ +from .bit_generator import BitGenerator from .mtrand import RandomState from ._philox import Philox from ._pcg64 import PCG64, PCG64DXSM @@ -14,27 +15,30 @@ } -def __bit_generator_ctor(bit_generator_name='MT19937'): +def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'): """ Pickling helper function that returns a bit generator object Parameters ---------- - bit_generator_name : str - String containing the name of the BitGenerator + bit_generator : type[BitGenerator] or str + BitGenerator class or string containing the name of the BitGenerator Returns ------- - bit_generator : BitGenerator + BitGenerator BitGenerator instance """ - if bit_generator_name in BitGenerators: - bit_generator = BitGenerators[bit_generator_name] + if isinstance(bit_generator, type): + bit_gen_class = bit_generator + elif bit_generator in BitGenerators: + bit_gen_class = BitGenerators[bit_generator] else: - raise ValueError(str(bit_generator_name) + ' is not a known ' - 'BitGenerator module.') + raise ValueError( + str(bit_generator) + ' is not a known BitGenerator module.' + ) - return bit_generator() + return bit_gen_class() def __generator_ctor(bit_generator_name="MT19937", @@ -44,8 +48,9 @@ def __generator_ctor(bit_generator_name="MT19937", Parameters ---------- - bit_generator_name : str - String containing the core BitGenerator's name + bit_generator_name : str or BitGenerator + String containing the core BitGenerator's name or a + BitGenerator instance bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. @@ -55,6 +60,9 @@ def __generator_ctor(bit_generator_name="MT19937", rg : Generator Generator using the named core BitGenerator """ + if isinstance(bit_generator_name, BitGenerator): + return Generator(bit_generator_name) + # Legacy path that uses a bit generator name and ctor return Generator(bit_generator_ctor(bit_generator_name)) @@ -76,5 +84,6 @@ def __randomstate_ctor(bit_generator_name="MT19937", rs : RandomState Legacy RandomState using the named core BitGenerator """ - + if isinstance(bit_generator_name, BitGenerator): + return RandomState(bit_generator_name) return RandomState(bit_generator_ctor(bit_generator_name)) diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx index 81a4bc764026..12b48059cef2 100644 --- a/numpy/random/_sfc64.pyx +++ b/numpy/random/_sfc64.pyx @@ -50,30 +50,30 @@ cdef class SFC64(BitGenerator): Notes ----- - ``SFC64`` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast - Chaotic PRNG ([1]_). ``SFC64`` has a few different cycles that one might be + `SFC64` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast + Chaotic PRNG ([1]_). `SFC64` has a few different cycles that one might be on, depending on the seed; the expected period will be about - :math:`2^{255}` ([2]_). ``SFC64`` incorporates a 64-bit counter which means + :math:`2^{255}` ([2]_). `SFC64` incorporates a 64-bit counter which means that the absolute minimum cycle length is :math:`2^{64}` and that distinct seeds will not run into each other for at least :math:`2^{64}` iterations. - ``SFC64`` provides a capsule containing function pointers that produce + `SFC64` provides a capsule containing function pointers that produce doubles, and unsigned 32 and 64- bit integers. These are not - directly consumable in Python and must be consumed by a ``Generator`` + directly consumable in Python and must be consumed by a `Generator` or similar object that supports low-level access. **State and Seeding** - The ``SFC64`` state vector consists of 4 unsigned 64-bit values. The last + The `SFC64` state vector consists of 4 unsigned 64-bit values. The last is a 64-bit counter that increments by 1 each iteration. The input seed is processed by `SeedSequence` to generate the first - 3 values, then the ``SFC64`` algorithm is iterated a small number of times + 3 values, then the `SFC64` algorithm is iterated a small number of times to mix. **Compatibility Guarantee** - ``SFC64`` makes a guarantee that a fixed seed will always produce the same + `SFC64` makes a guarantee that a fixed seed will always produce the same random integer stream. References diff --git a/numpy/random/bit_generator.pyi b/numpy/random/bit_generator.pyi index 4556658efff4..d99278e861ea 100644 --- a/numpy/random/bit_generator.pyi +++ b/numpy/random/bit_generator.pyi @@ -92,11 +92,17 @@ class SeedSequence(ISpawnableSeedSequence): class BitGenerator(abc.ABC): lock: Lock def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ... - def __getstate__(self) -> dict[str, Any]: ... - def __setstate__(self, state: dict[str, Any]) -> None: ... + def __getstate__(self) -> tuple[dict[str, Any], ISeedSequence]: ... + def __setstate__( + self, state_seed_seq: dict[str, Any] | tuple[dict[str, Any], ISeedSequence] + ) -> None: ... def __reduce__( self, - ) -> tuple[Callable[[str], BitGenerator], tuple[str], tuple[dict[str, Any]]]: ... + ) -> tuple[ + Callable[[str], BitGenerator], + tuple[str], + tuple[dict[str, Any], ISeedSequence] + ]: ... @abc.abstractmethod @property def state(self) -> Mapping[str, Any]: ... diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx index e49902f5c330..c999e6e32794 100644 --- a/numpy/random/bit_generator.pyx +++ b/numpy/random/bit_generator.pyx @@ -537,14 +537,27 @@ cdef class BitGenerator(): # Pickling support: def __getstate__(self): - return self.state + return self.state, self._seed_seq - def __setstate__(self, state): - self.state = state + def __setstate__(self, state_seed_seq): + + if isinstance(state_seed_seq, dict): + # Legacy path + # Prior to 2.0.x only the state of the underlying bit generator + # was preserved and any seed sequence information was lost + self.state = state_seed_seq + else: + self._seed_seq = state_seed_seq[1] + self.state = state_seed_seq[0] def __reduce__(self): from ._pickle import __bit_generator_ctor - return __bit_generator_ctor, (self.state['bit_generator'],), self.state + + return ( + __bit_generator_ctor, + (type(self), ), + (self.state, self._seed_seq) + ) @property def state(self): diff --git a/numpy/random/meson.build b/numpy/random/meson.build index 2da23a168b8a..f2f2e0ac755c 100644 --- a/numpy/random/meson.build +++ b/numpy/random/meson.build @@ -52,6 +52,11 @@ if host_machine.system() == 'cygwin' c_args_random += ['-Wl,--export-all-symbols'] endif +cython_args = [] +if cy.version().version_compare('>=3.1.0') + cython_args += ['-Xfreethreading_compatible=True'] +endif + # name, sources, extra c_args, extra static libs to link random_pyx_sources = [ ['_bounded_integers', _bounded_integers_pyx, [], [npyrandom_lib, npymath_lib]], @@ -83,6 +88,7 @@ foreach gen: random_pyx_sources link_with: gen[3], install: true, subdir: 'numpy/random', + cython_args: cython_args, ) endforeach @@ -123,7 +129,8 @@ py.install_sources( 'tests/test_seed_sequence.py', 'tests/test_smoke.py', ], - subdir: 'numpy/random/tests' + subdir: 'numpy/random/tests', + install_tag: 'tests' ) py.install_sources( @@ -139,8 +146,12 @@ py.install_sources( 'tests/data/philox-testset-2.csv', 'tests/data/sfc64-testset-1.csv', 'tests/data/sfc64-testset-2.csv', + 'tests/data/sfc64_np126.pkl.gz', + 'tests/data/generator_pcg64_np126.pkl.gz', + 'tests/data/generator_pcg64_np121.pkl.gz', ], - subdir: 'numpy/random/tests/data' + subdir: 'numpy/random/tests/data', + install_tag: 'tests' ) py.install_sources( diff --git a/numpy/random/mtrand.pyi b/numpy/random/mtrand.pyi index dcbc91292647..dbd3cd609495 100644 --- a/numpy/random/mtrand.pyi +++ b/numpy/random/mtrand.pyi @@ -11,12 +11,14 @@ from numpy import ( int16, int32, int64, + int_, long, - ulong, uint8, uint16, uint32, uint64, + uint, + ulong, ) from numpy.random.bit_generator import BitGenerator from numpy._typing import ( @@ -34,6 +36,7 @@ from numpy._typing import ( _Int16Codes, _Int32Codes, _Int64Codes, + _IntCodes, _LongCodes, _ShapeLike, _SingleCodes, @@ -42,6 +45,7 @@ from numpy._typing import ( _UInt16Codes, _UInt32Codes, _UInt64Codes, + _UIntCodes, _ULongCodes, ) @@ -69,7 +73,7 @@ class RandomState: def __str__(self) -> str: ... def __getstate__(self) -> dict[str, Any]: ... def __setstate__(self, state: dict[str, Any]) -> None: ... - def __reduce__(self) -> tuple[Callable[[str], RandomState], tuple[str], dict[str, Any]]: ... + def __reduce__(self) -> tuple[Callable[[BitGenerator], RandomState], tuple[BitGenerator], dict[str, Any]]: ... def seed(self, seed: None | _ArrayLikeFloat_co = ...) -> None: ... @overload def get_state(self, legacy: Literal[False] = ...) -> dict[str, Any]: ... @@ -114,6 +118,7 @@ class RandomState: self, low: int, high: None | int = ..., + size: None = ..., ) -> int: ... @overload def randint( # type: ignore[misc] @@ -124,6 +129,14 @@ class RandomState: dtype: type[bool] = ..., ) -> bool: ... @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: type[np.bool] = ..., + ) -> np.bool: ... + @overload def randint( # type: ignore[misc] self, low: int, @@ -132,6 +145,102 @@ class RandomState: dtype: type[int] = ..., ) -> int: ... @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint8] | type[uint8] | _UInt8Codes | _SupportsDType[dtype[uint8]] = ..., + ) -> uint8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint16] | type[uint16] | _UInt16Codes | _SupportsDType[dtype[uint16]] = ..., + ) -> uint16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint32] | type[uint32] | _UInt32Codes | _SupportsDType[dtype[uint32]] = ..., + ) -> uint32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint] | type[uint] | _UIntCodes | _SupportsDType[dtype[uint]] = ..., + ) -> uint: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[ulong] | type[ulong] | _ULongCodes | _SupportsDType[dtype[ulong]] = ..., + ) -> ulong: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[uint64] | type[uint64] | _UInt64Codes | _SupportsDType[dtype[uint64]] = ..., + ) -> uint64: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int8] | type[int8] | _Int8Codes | _SupportsDType[dtype[int8]] = ..., + ) -> int8: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int16] | type[int16] | _Int16Codes | _SupportsDType[dtype[int16]] = ..., + ) -> int16: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int32] | type[int32] | _Int32Codes | _SupportsDType[dtype[int32]] = ..., + ) -> int32: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int_] | type[int_] | _IntCodes | _SupportsDType[dtype[int_]] = ..., + ) -> int_: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[long] | type[long] | _LongCodes | _SupportsDType[dtype[long]] = ..., + ) -> long: ... + @overload + def randint( # type: ignore[misc] + self, + low: int, + high: None | int = ..., + size: None = ..., + dtype: dtype[int64] | type[int64] | _Int64Codes | _SupportsDType[dtype[int64]] = ..., + ) -> int64: ... + @overload def randint( # type: ignore[misc] self, low: _ArrayLikeInt_co, diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx index d67e4533f663..b42b0a7764b8 100644 --- a/numpy/random/mtrand.pyx +++ b/numpy/random/mtrand.pyx @@ -205,10 +205,13 @@ cdef class RandomState: self.set_state(state) def __reduce__(self): - ctor, name_tpl, _ = self._bit_generator.__reduce__() - from ._pickle import __randomstate_ctor - return __randomstate_ctor, (name_tpl[0], ctor), self.get_state(legacy=False) + # The third argument containing the state is required here since + # RandomState contains state information in addition to the state + # contained in the bit generator that described the gaussian + # generator. This argument is passed to __setstate__ after the + # Generator is created. + return __randomstate_ctor, (self._bit_generator, ), self.get_state(legacy=False) cdef _initialize_bit_generator(self, bit_generator): self._bit_generator = bit_generator diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c index 1241329151a9..9f988f857d61 100644 --- a/numpy/random/src/distributions/distributions.c +++ b/numpy/random/src/distributions/distributions.c @@ -436,16 +436,23 @@ double random_beta(bitgen_t *bitgen_state, double a, double b) { XpY = X + Y; /* Reject if both U and V are 0.0, which is approx 1 in 10^106 */ if ((XpY <= 1.0) && (U + V > 0.0)) { - if (XpY > 0) { + if ((X > 0) && (Y > 0)) { return X / XpY; } else { - double logX = log(U) / a; - double logY = log(V) / b; - double logM = logX > logY ? logX : logY; - logX -= logM; - logY -= logM; - - return exp(logX - log(exp(logX) + exp(logY))); + /* + * Either X or Y underflowed to 0, so we lost information in + * U**(1/a) or V**(1/b). We still compute X/(X+Y) here, but we + * work with logarithms as much as we can to avoid the underflow. + */ + double logX = log(U)/a; + double logY = log(V)/b; + double delta = logX - logY; + if (delta > 0) { + return exp(-log1p(exp(-delta))); + } + else { + return exp(delta - log1p(exp(delta))); + } } } } @@ -998,14 +1005,34 @@ int64_t random_geometric(bitgen_t *bitgen_state, double p) { } RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) { - double am1, b; + double am1, b, Umin; + if (a >= 1025) { + /* + * If a exceeds 1025, the calculation of b will overflow and the loop + * will not terminate. It is safe to simply return 1 here, because the + * probability of generating a value greater than 1 in this case is + * less than 3e-309. + */ + return (RAND_INT_TYPE) 1; + } am1 = a - 1.0; b = pow(2.0, am1); + /* + * In the while loop, X is generated from the uniform distribution (Umin, 1]. + * Values below Umin would result in X being rejected because it is too + * large, so there is no point in including them in the distribution of U. + */ + Umin = pow(RAND_INT_MAX, -am1); while (1) { - double T, U, V, X; + double U01, T, U, V, X; - U = 1.0 - next_double(bitgen_state); + /* + * U is sampled from (Umin, 1]. Note that Umin might be 0, and we don't + * want U to be 0. + */ + U01 = next_double(bitgen_state); + U = U01*Umin + (1 - U01); V = next_double(bitgen_state); X = floor(pow(U, -1.0 / am1)); /* diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c index b518b8a03994..14d9ce25f255 100644 --- a/numpy/random/src/legacy/legacy-distributions.c +++ b/numpy/random/src/legacy/legacy-distributions.c @@ -388,7 +388,31 @@ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) { } int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) { - return (int64_t)random_zipf(bitgen_state, a); + double am1, b; + + am1 = a - 1.0; + b = pow(2.0, am1); + while (1) { + double T, U, V, X; + + U = 1.0 - next_double(bitgen_state); + V = next_double(bitgen_state); + X = floor(pow(U, -1.0 / am1)); + /* + * The real result may be above what can be represented in a signed + * long. Since this is a straightforward rejection algorithm, we can + * just reject this value. This function then models a Zipf + * distribution truncated to sys.maxint. + */ + if (X > (double)RAND_INT_MAX || X < 1.0) { + continue; + } + + T = pow(1.0 + 1.0 / X, am1); + if (V * X * (T - 1.0) / (b - 1.0) <= T / b) { + return (RAND_INT_TYPE)X; + } + } } diff --git a/numpy/random/tests/data/generator_pcg64_np121.pkl.gz b/numpy/random/tests/data/generator_pcg64_np121.pkl.gz new file mode 100644 index 000000000000..b7ad03d8e63b Binary files /dev/null and b/numpy/random/tests/data/generator_pcg64_np121.pkl.gz differ diff --git a/numpy/random/tests/data/generator_pcg64_np126.pkl.gz b/numpy/random/tests/data/generator_pcg64_np126.pkl.gz new file mode 100644 index 000000000000..6c5130b5e745 Binary files /dev/null and b/numpy/random/tests/data/generator_pcg64_np126.pkl.gz differ diff --git a/numpy/random/tests/data/sfc64_np126.pkl.gz b/numpy/random/tests/data/sfc64_np126.pkl.gz new file mode 100644 index 000000000000..94fbceb38f92 Binary files /dev/null and b/numpy/random/tests/data/sfc64_np126.pkl.gz differ diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index fa2ae866beeb..12c2f1d5ab57 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -298,6 +298,24 @@ def test_pickle(self): aa = pickle.loads(pickle.dumps(ss)) assert_equal(ss.state, aa.state) + def test_pickle_preserves_seed_sequence(self): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + bit_generator = self.bit_generator(*self.data1['seed']) + ss = bit_generator.seed_seq + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + bit_generator.seed_seq.spawn(10) + bg_plk = pickle.loads(pickle.dumps(bit_generator)) + ss_plk = bg_plk.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned) + def test_invalid_state_type(self): bit_generator = self.bit_generator(*self.data1['seed']) with pytest.raises(TypeError): @@ -349,8 +367,9 @@ def test_getstate(self): bit_generator = self.bit_generator(*self.data1['seed']) state = bit_generator.state alt_state = bit_generator.__getstate__() - assert_state_equal(state, alt_state) - + assert isinstance(alt_state, tuple) + assert_state_equal(state, alt_state[0]) + assert isinstance(alt_state[1], SeedSequence) class TestPhilox(Base): @classmethod @@ -413,6 +432,7 @@ def test_advange_large(self): assert state["state"] == advanced_state + class TestPCG64DXSM(Base): @classmethod def setup_class(cls): @@ -502,6 +522,29 @@ def setup_class(cls): cls.invalid_init_types = [(3.2,), ([None],), (1, None)] cls.invalid_init_values = [(-1,)] + def test_legacy_pickle(self): + # Pickling format was changed in 2.0.x + import gzip + import pickle + + expected_state = np.array( + [ + 9957867060933711493, + 532597980065565856, + 14769588338631205282, + 13 + ], + dtype=np.uint64 + ) + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join(base_path, "data", f"sfc64_np126.pkl.gz") + with gzip.open(pkl_file) as gz: + sfc = pickle.load(gz) + + assert isinstance(sfc, SFC64) + assert_equal(sfc.state["state"]["state"], expected_state) + class TestDefaultRNG: def test_seed(self): diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index a4a84de2ee7c..791fbaba9850 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -10,7 +10,7 @@ import warnings import numpy as np -from numpy.testing import IS_WASM +from numpy.testing import IS_WASM, IS_EDITABLE try: @@ -46,6 +46,10 @@ cython = None +@pytest.mark.skipif( + IS_EDITABLE, + reason='Editable install cannot find .pxd headers' +) @pytest.mark.skipif( sys.platform == "win32" and sys.maxsize < 2**32, reason="Failing in 32-bit Windows wheel build job, skip for now" diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index a0bee225d20b..514f9af2ce8c 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1,3 +1,4 @@ +import os.path import sys import hashlib @@ -932,6 +933,15 @@ def test_choice_large_sample(self): res = hashlib.sha256(actual.view(np.int8)).hexdigest() assert_(choice_hash == res) + def test_choice_array_size_empty_tuple(self): + random = Generator(MT19937(self.seed)) + assert_array_equal(random.choice([1, 2, 3], size=()), np.array(1), + strict=True) + assert_array_equal(random.choice([[1, 2, 3]], size=()), [1, 2, 3]) + assert_array_equal(random.choice([[1]], size=()), [1], strict=True) + assert_array_equal(random.choice([[1]], size=(), axis=1), [1], + strict=True) + def test_bytes(self): random = Generator(MT19937(self.seed)) actual = random.bytes(10) @@ -2738,10 +2748,50 @@ def test_generator_ctor_old_style_pickle(): rg = np.random.Generator(np.random.PCG64DXSM(0)) rg.standard_normal(1) # Directly call reduce which is used in pickling - ctor, args, state_a = rg.__reduce__() + ctor, (bit_gen, ), _ = rg.__reduce__() # Simulate unpickling an old pickle that only has the name - assert args[:1] == ("PCG64DXSM",) - b = ctor(*args[:1]) - b.bit_generator.state = state_a + assert bit_gen.__class__.__name__ == "PCG64DXSM" + print(ctor) + b = ctor(*("PCG64DXSM",)) + print(b) + b.bit_generator.state = bit_gen.state state_b = b.bit_generator.state - assert state_a == state_b + assert bit_gen.state == state_b + + +def test_pickle_preserves_seed_sequence(): + # GH 26234 + # Add explicit test that bit generators preserve seed sequences + import pickle + + rg = np.random.Generator(np.random.PCG64DXSM(20240411)) + ss = rg.bit_generator.seed_seq + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + assert_equal(ss.pool, ss_plk.pool) + + rg.bit_generator.seed_seq.spawn(10) + rg_plk = pickle.loads(pickle.dumps(rg)) + ss_plk = rg_plk.bit_generator.seed_seq + assert_equal(ss.state, ss_plk.state) + + +@pytest.mark.parametrize("version", [121, 126]) +def test_legacy_pickle(version): + # Pickling format was changes in 1.22.x and in 2.0.x + import pickle + import gzip + + base_path = os.path.split(os.path.abspath(__file__))[0] + pkl_file = os.path.join( + base_path, "data", f"generator_pcg64_np{version}.pkl.gz" + ) + with gzip.open(pkl_file) as gz: + rg = pickle.load(gz) + state = rg.bit_generator.state['state'] + + assert isinstance(rg, Generator) + assert isinstance(rg.bit_generator, np.random.PCG64) + assert state['state'] == 35399562948360463058890781895381311971 + assert state['inc'] == 87136372517582989555478159403783844777 diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py index d451c6acd16d..c34e6bb3ba74 100644 --- a/numpy/random/tests/test_generator_mt19937_regressions.py +++ b/numpy/random/tests/test_generator_mt19937_regressions.py @@ -86,6 +86,29 @@ def test_beta_ridiculously_small_parameters(self): x = self.mt19937.beta(tiny/32, tiny/40, size=50) assert not np.any(np.isnan(x)) + def test_beta_expected_zero_frequency(self): + # gh-24475: For small a and b (e.g. a=0.0025, b=0.0025), beta + # would generate too many zeros. + a = 0.0025 + b = 0.0025 + n = 1000000 + x = self.mt19937.beta(a, b, size=n) + nzeros = np.count_nonzero(x == 0) + # beta CDF at x = np.finfo(np.double).smallest_subnormal/2 + # is p = 0.0776169083131899, e.g, + # + # import numpy as np + # from mpmath import mp + # mp.dps = 160 + # x = mp.mpf(np.finfo(np.float64).smallest_subnormal)/2 + # # CDF of the beta distribution at x: + # p = mp.betainc(a, b, x1=0, x2=x, regularized=True) + # n = 1000000 + # exprected_freq = float(n*p) + # + expected_freq = 77616.90831318991 + assert 0.95*expected_freq < nzeros < 1.05*expected_freq + def test_choice_sum_of_probs_tolerance(self): # The sum of probs should be 1.0 with some tolerance. # For low precision dtypes the tolerance was too tight. @@ -163,3 +186,21 @@ def test_geometric_tiny_prob(self): # is 0.9999999999907766, so we expect the result to be all 2**63-1. assert_array_equal(self.mt19937.geometric(p=1e-30, size=3), np.iinfo(np.int64).max) + + def test_zipf_large_parameter(self): + # Regression test for part of gh-9829: a call such as rng.zipf(10000) + # would hang. + n = 8 + sample = self.mt19937.zipf(10000, size=n) + assert_array_equal(sample, np.ones(n, dtype=np.int64)) + + def test_zipf_a_near_1(self): + # Regression test for gh-9829: a call such as rng.zipf(1.0000000000001) + # would hang. + n = 100000 + sample = self.mt19937.zipf(1.0000000000001, size=n) + # Not much of a test, but let's do something more than verify that + # it doesn't hang. Certainly for a monotonically decreasing + # discrete distribution truncated to signed 64 bit integers, more + # than half should be less than 2**62. + assert np.count_nonzero(sample < 2**62) > n/2 diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py index aa24936bae2b..5121a684f693 100644 --- a/numpy/random/tests/test_randomstate.py +++ b/numpy/random/tests/test_randomstate.py @@ -2052,8 +2052,8 @@ def test_randomstate_ctor_old_style_pickle(): # Directly call reduce which is used in pickling ctor, args, state_a = rs.__reduce__() # Simulate unpickling an old pickle that only has the name - assert args[:1] == ("MT19937",) - b = ctor(*args[:1]) + assert args[0].__class__.__name__ == "MT19937" + b = ctor(*("MT19937",)) b.set_state(state_a) state_b = b.get_state(legacy=False) diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py index 7e12561962a9..b402e87384d6 100644 --- a/numpy/random/tests/test_smoke.py +++ b/numpy/random/tests/test_smoke.py @@ -434,13 +434,13 @@ def test_dirichlet(self): def test_pickle(self): pick = pickle.dumps(self.rg) unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) + assert_(type(self.rg) == type(unpick)) assert_(comp_state(self.rg.bit_generator.state, unpick.bit_generator.state)) pick = pickle.dumps(self.rg) unpick = pickle.loads(pick) - assert_((type(self.rg) == type(unpick))) + assert_(type(self.rg) == type(unpick)) assert_(comp_state(self.rg.bit_generator.state, unpick.bit_generator.state)) @@ -735,7 +735,7 @@ def test_numpy_state(self): self.rg.bit_generator.state = state state2 = self.rg.bit_generator.state assert_((state[1] == state2['state']['key']).all()) - assert_((state[2] == state2['state']['pos'])) + assert_(state[2] == state2['state']['pos']) class TestPhilox(RNG): diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index 65465ed19760..08cbb0564e67 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -54,7 +54,11 @@ def build_and_import_extension( >>> assert mod.test_bytes(b'abc') """ body = prologue + _make_methods(functions, modname) - init = """PyObject *mod = PyModule_Create(&moduledef); + init = """ + PyObject *mod = PyModule_Create(&moduledef); + #ifdef Py_GIL_DISABLED + PyUnstable_Module_SetGIL(mod, Py_MOD_GIL_NOT_USED); + #endif """ if not build_dir: build_dir = pathlib.Path('.') diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index bae98964f9d4..5133ef7b15e7 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -17,6 +17,7 @@ from warnings import WarningMessage import pprint import sysconfig +import concurrent.futures import numpy as np from numpy._core import ( @@ -39,7 +40,8 @@ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY', 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare', 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON', - '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE' + '_OLD_PROMOTION', 'IS_MUSL', 'check_support_sve', 'NOGIL_BUILD', + 'IS_EDITABLE', 'run_threaded', ] @@ -54,6 +56,7 @@ class KnownFailureException(Exception): IS_WASM = platform.machine() in ["wasm32", "wasm64"] IS_PYPY = sys.implementation.name == 'pypy' IS_PYSTON = hasattr(sys, "pyston_version_info") +IS_EDITABLE = not bool(np.__path__) or 'editable' in np.__path__[0] HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64 @@ -68,6 +71,7 @@ class KnownFailureException(Exception): if 'musl' in _v: IS_MUSL = True +NOGIL_BUILD = bool(sysconfig.get_config_var("Py_GIL_DISABLED")) def assert_(val, msg=''): """ @@ -1376,21 +1380,24 @@ def rundocs(filename=None, raise_on_error=True): raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg)) -def check_support_sve(): +def check_support_sve(__cache=[]): """ gh-22982 """ + if __cache: + return __cache[0] + import subprocess cmd = 'lscpu' try: output = subprocess.run(cmd, capture_output=True, text=True) - return 'sve' in output.stdout - except OSError: - return False - + result = 'sve' in output.stdout + except (OSError, subprocess.SubprocessError): + result = False + __cache.append(result) + return __cache[0] -_SUPPORTS_SVE = check_support_sve() # # assert_raises and assert_raises_regex are taken from unittest. @@ -1949,8 +1956,15 @@ def assert_warns(warning_class, *args, **kwargs): >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4) >>> assert ret == 16 """ - if not args: + if not args and not kwargs: return _assert_warns_context(warning_class) + elif len(args) < 1: + if "match" in kwargs: + raise RuntimeError( + "assert_warns does not use 'match' kwarg, " + "use pytest.warns instead" + ) + raise RuntimeError("assert_warns(...) needs at least one arg") func = args[0] args = args[1:] @@ -2687,3 +2701,14 @@ def _get_glibc_version(): _glibcver = _get_glibc_version() _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x) + + +def run_threaded(func, iters, pass_count=False): + """Runs a function many times in parallel""" + with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe: + if pass_count: + futures = [tpe.submit(func, i) for i in range(iters)] + else: + futures = [tpe.submit(func) for _ in range(iters)] + for f in futures: + f.result() diff --git a/numpy/testing/_private/utils.pyi b/numpy/testing/_private/utils.pyi index e2272ad2f7d0..113457ae1c55 100644 --- a/numpy/testing/_private/utils.pyi +++ b/numpy/testing/_private/utils.pyi @@ -18,11 +18,8 @@ from typing import ( TypeVar, Final, SupportsIndex, + ParamSpec ) -if sys.version_info >= (3, 10): - from typing import ParamSpec -else: - from typing_extensions import ParamSpec import numpy as np from numpy import number, object_, _FloatValue diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py index 36f9c1617f44..3983ec902356 100644 --- a/numpy/testing/tests/test_utils.py +++ b/numpy/testing/tests/test_utils.py @@ -1035,6 +1035,27 @@ def no_warnings(): assert_equal(before_filters, after_filters, "assert_warns does not preserver warnings state") + def test_args(self): + def f(a=0, b=1): + warnings.warn("yo") + return a + b + + assert assert_warns(UserWarning, f, b=20) == 20 + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, match="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" in str(exc) + + with pytest.raises(RuntimeError) as exc: + # assert_warns cannot do regexp matching, use pytest.warns + with assert_warns(UserWarning, wrong="A"): + warnings.warn("B", UserWarning) + assert "assert_warns" in str(exc) + assert "pytest.warns" not in str(exc) + def test_warn_wrong_warning(self): def f(): warnings.warn("yo", DeprecationWarning) @@ -1902,8 +1923,7 @@ def test_xy_rename(assert_func): assert_func(1, y=1) type_message = '...got multiple values for argument' - # explicit linebreak to support Python 3.9 - with pytest.warns(DeprecationWarning, match=dep_message), \ - pytest.raises(TypeError, match=type_message): + with (pytest.warns(DeprecationWarning, match=dep_message), + pytest.raises(TypeError, match=type_message)): assert_func(1, x=1) assert_func(1, 2, y=2) diff --git a/numpy/tests/test_numpy_version.py b/numpy/tests/test_numpy_version.py index 61643426c8d7..d3abcb92c1c3 100644 --- a/numpy/tests/test_numpy_version.py +++ b/numpy/tests/test_numpy_version.py @@ -39,3 +39,16 @@ def test_short_version(): else: assert_(np.__version__.split("+")[0] == np.version.short_version, "short_version mismatch in development version") + + +def test_version_module(): + contents = set([s for s in dir(np.version) if not s.startswith('_')]) + expected = set([ + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', + ]) + + assert contents == expected diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py index 780e1fccb79e..eb96560b9c9a 100644 --- a/numpy/tests/test_public_api.py +++ b/numpy/tests/test_public_api.py @@ -114,14 +114,14 @@ def test_NPY_NO_EXPORT(): "f2py", "fft", "lib", - "lib.format", # was this meant to be public? + "lib.array_utils", + "lib.format", + "lib.introspect", "lib.mixins", - "lib.recfunctions", + "lib.npyio", + "lib.recfunctions", # note: still needs cleaning, was forgotten for 2.0 "lib.scimath", "lib.stride_tricks", - "lib.npyio", - "lib.introspect", - "lib.array_utils", "linalg", "ma", "ma.extras", @@ -134,11 +134,12 @@ def test_NPY_NO_EXPORT(): "polynomial.legendre", "polynomial.polynomial", "random", + "strings", "testing", "testing.overrides", "typing", "typing.mypy_plugin", - "version" # Should be removed for NumPy 2.0 + "version", ]] if sys.version_info < (3, 12): PUBLIC_MODULES += [ @@ -158,7 +159,6 @@ def test_NPY_NO_EXPORT(): "numpy.char", "numpy.emath", "numpy.rec", - "numpy.strings", ] @@ -535,7 +535,7 @@ def test_core_shims_coherence(): # no need to add it to np.core if ( member_name.startswith("_") - or member_name == "tests" + or member_name in ["tests", "strings"] or f"numpy.{member_name}" in PUBLIC_ALIASED_MODULES ): continue @@ -614,8 +614,7 @@ def test_functions_single_location(): # else check if we got a function-like object elif ( inspect.isfunction(member) or - isinstance(member, dispatched_function) or - isinstance(member, np.ufunc) + isinstance(member, (dispatched_function, np.ufunc)) ): if member in visited_functions: diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py index df90fcef8c59..9304c1346cbf 100644 --- a/numpy/tests/test_warnings.py +++ b/numpy/tests/test_warnings.py @@ -67,6 +67,8 @@ def test_warning_calls(): continue if path == base / "random" / "__init__.py": continue + if path == base / "conftest.py": + continue # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: diff --git a/numpy/typing/tests/data/fail/shape.pyi b/numpy/typing/tests/data/fail/shape.pyi new file mode 100644 index 000000000000..3dd6d14f4222 --- /dev/null +++ b/numpy/typing/tests/data/fail/shape.pyi @@ -0,0 +1,6 @@ +from typing import Any +import numpy as np + +# test bounds of _ShapeType_co + +np.ndarray[tuple[str, str], Any] # E: Value of type variable diff --git a/numpy/typing/tests/data/fail/ufuncs.pyi b/numpy/typing/tests/data/fail/ufuncs.pyi index e827267c6072..bbab0dfe3fc2 100644 --- a/numpy/typing/tests/data/fail/ufuncs.pyi +++ b/numpy/typing/tests/data/fail/ufuncs.pyi @@ -15,27 +15,3 @@ np.matmul(AR_f8, AR_f8, where=True) # E: No overload variant np.frexp(AR_f8, out=None) # E: No overload variant np.frexp(AR_f8, out=AR_f8) # E: No overload variant - -np.absolute.outer() # E: "None" not callable -np.frexp.outer() # E: "None" not callable -np.divmod.outer() # E: "None" not callable -np.matmul.outer() # E: "None" not callable - -np.absolute.reduceat() # E: "None" not callable -np.frexp.reduceat() # E: "None" not callable -np.divmod.reduceat() # E: "None" not callable -np.matmul.reduceat() # E: "None" not callable - -np.absolute.reduce() # E: "None" not callable -np.frexp.reduce() # E: "None" not callable -np.divmod.reduce() # E: "None" not callable -np.matmul.reduce() # E: "None" not callable - -np.absolute.accumulate() # E: "None" not callable -np.frexp.accumulate() # E: "None" not callable -np.divmod.accumulate() # E: "None" not callable -np.matmul.accumulate() # E: "None" not callable - -np.frexp.at() # E: "None" not callable -np.divmod.at() # E: "None" not callable -np.matmul.at() # E: "None" not callable diff --git a/numpy/typing/tests/data/pass/arithmetic.py b/numpy/typing/tests/data/pass/arithmetic.py index 496586821582..4ac4e957445c 100644 --- a/numpy/typing/tests/data/pass/arithmetic.py +++ b/numpy/typing/tests/data/pass/arithmetic.py @@ -1,6 +1,6 @@ from __future__ import annotations -from typing import Any, Optional +from typing import Any import numpy as np import pytest @@ -26,8 +26,8 @@ class Object: - def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, - copy: Optional[bool] = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self, dtype: np.typing.DTypeLike = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret diff --git a/numpy/typing/tests/data/pass/fromnumeric.py b/numpy/typing/tests/data/pass/fromnumeric.py index 3d7ef2938e20..7cc2bcfd8b50 100644 --- a/numpy/typing/tests/data/pass/fromnumeric.py +++ b/numpy/typing/tests/data/pass/fromnumeric.py @@ -159,6 +159,12 @@ np.cumsum(A) np.cumsum(B) +np.cumulative_sum(a) +np.cumulative_sum(b) +np.cumulative_sum(c) +np.cumulative_sum(A, axis=0) +np.cumulative_sum(B, axis=0) + np.ptp(b) np.ptp(c) np.ptp(B) @@ -205,6 +211,12 @@ np.cumprod(A) np.cumprod(B) +np.cumulative_prod(a) +np.cumulative_prod(b) +np.cumulative_prod(c) +np.cumulative_prod(A, axis=0) +np.cumulative_prod(B, axis=0) + np.ndim(a) np.ndim(b) np.ndim(c) diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py new file mode 100644 index 000000000000..6b3b138119bb --- /dev/null +++ b/numpy/typing/tests/data/pass/ma.py @@ -0,0 +1,8 @@ +from typing import Any + +import numpy as np +import numpy.ma + + +m : np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) + diff --git a/numpy/typing/tests/data/pass/random.py b/numpy/typing/tests/data/pass/random.py index 40b88ce4dfe4..69afb28c48ec 100644 --- a/numpy/typing/tests/data/pass/random.py +++ b/numpy/typing/tests/data/pass/random.py @@ -911,9 +911,7 @@ def_gen.__str__() def_gen.__repr__() -def_gen_state: dict[str, Any] -def_gen_state = def_gen.__getstate__() -def_gen.__setstate__(def_gen_state) +def_gen.__setstate__(dict(def_gen.bit_generator.state)) # RandomState random_st: np.random.RandomState = np.random.RandomState() diff --git a/numpy/typing/tests/data/pass/scalars.py b/numpy/typing/tests/data/pass/scalars.py index 7b8931f607eb..53caf7ff817d 100644 --- a/numpy/typing/tests/data/pass/scalars.py +++ b/numpy/typing/tests/data/pass/scalars.py @@ -5,6 +5,7 @@ import numpy as np b = np.bool() +b_ = np.bool_() u8 = np.uint64() i8 = np.int64() f8 = np.float64() @@ -121,7 +122,7 @@ def __float__(self) -> float: u8 = np.uint64() f8 = np.float64() c16 = np.complex128() -b_ = np.bool() +b = np.bool() td = np.timedelta64() U = np.str_("1") S = np.bytes_("1") @@ -130,7 +131,7 @@ def __float__(self) -> float: int(i8) int(u8) int(f8) -int(b_) +int(b) int(td) int(U) int(S) diff --git a/numpy/typing/tests/data/pass/shape.py b/numpy/typing/tests/data/pass/shape.py new file mode 100644 index 000000000000..8e2e2faad9a8 --- /dev/null +++ b/numpy/typing/tests/data/pass/shape.py @@ -0,0 +1,18 @@ +from typing import Any, NamedTuple + +import numpy as np +from typing_extensions import assert_type + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] = np.empty(XYGrid(2, 2)) + +# Test variance of _ShapeType_co +def accepts_2d(a: np.ndarray[tuple[int, int], Any]) -> None: + return None + +accepts_2d(arr) diff --git a/numpy/typing/tests/data/pass/ufunclike.py b/numpy/typing/tests/data/pass/ufunclike.py index 4baa0334a404..f993939ddba1 100644 --- a/numpy/typing/tests/data/pass/ufunclike.py +++ b/numpy/typing/tests/data/pass/ufunclike.py @@ -1,5 +1,5 @@ from __future__ import annotations -from typing import Any, Optional +from typing import Any import numpy as np @@ -13,8 +13,8 @@ def __floor__(self) -> Object: def __ge__(self, value: object) -> bool: return True - def __array__(self, dtype: Optional[np.typing.DTypeLike] = None, - copy: Optional[bool] = None) -> np.ndarray[Any, np.dtype[np.object_]]: + def __array__(self, dtype: np.typing.DTypeLike | None = None, + copy: bool | None = None) -> np.ndarray[Any, np.dtype[np.object_]]: ret = np.empty((), dtype=object) ret[()] = self return ret diff --git a/numpy/typing/tests/data/reveal/array_api_info.pyi b/numpy/typing/tests/data/reveal/array_api_info.pyi new file mode 100644 index 000000000000..b7dd2b934aec --- /dev/null +++ b/numpy/typing/tests/data/reveal/array_api_info.pyi @@ -0,0 +1,76 @@ +import sys +from typing import Literal + +import numpy as np + +if sys.version_info >= (3, 11): + from typing import Never, assert_type +else: + from typing_extensions import Never, assert_type + +info = np.__array_namespace_info__() + +assert_type(info.__module__, Literal["numpy"]) + +assert_type(info.default_device(), Literal["cpu"]) +assert_type(info.devices()[0], Literal["cpu"]) +assert_type(info.devices()[-1], Literal["cpu"]) + +assert_type(info.capabilities()["boolean indexing"], Literal[True]) +assert_type(info.capabilities()["data-dependent shapes"], Literal[True]) + +assert_type(info.default_dtypes()["real floating"], np.dtype[np.float64]) +assert_type(info.default_dtypes()["complex floating"], np.dtype[np.complex128]) +assert_type(info.default_dtypes()["integral"], np.dtype[np.int_]) +assert_type(info.default_dtypes()["indexing"], np.dtype[np.intp]) + +assert_type(info.dtypes()["bool"], np.dtype[np.bool]) +assert_type(info.dtypes()["int8"], np.dtype[np.int8]) +assert_type(info.dtypes()["uint8"], np.dtype[np.uint8]) +assert_type(info.dtypes()["float32"], np.dtype[np.float32]) +assert_type(info.dtypes()["complex64"], np.dtype[np.complex64]) + +assert_type(info.dtypes(kind="bool")["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind="signed integer")["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind="unsigned integer")["uint64"], np.dtype[np.uint64]) +assert_type(info.dtypes(kind="integral")["int32"], np.dtype[np.int32]) +assert_type(info.dtypes(kind="integral")["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind="real floating")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="complex floating")["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind="numeric")["int16"], np.dtype[np.int16]) +assert_type(info.dtypes(kind="numeric")["uint16"], np.dtype[np.uint16]) +assert_type(info.dtypes(kind="numeric")["float64"], np.dtype[np.float64]) +assert_type(info.dtypes(kind="numeric")["complex128"], np.dtype[np.complex128]) + +assert_type(info.dtypes(kind=()), dict[Never, Never]) + +assert_type(info.dtypes(kind=("bool",))["bool"], np.dtype[np.bool]) +assert_type(info.dtypes(kind=("signed integer",))["int64"], np.dtype[np.int64]) +assert_type(info.dtypes(kind=("integral",))["uint32"], np.dtype[np.uint32]) +assert_type(info.dtypes(kind=("complex floating",))["complex128"], np.dtype[np.complex128]) +assert_type(info.dtypes(kind=("numeric",))["float64"], np.dtype[np.float64]) + +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["int8"], + np.dtype[np.int8], +) +assert_type( + info.dtypes(kind=("signed integer", "unsigned integer"))["uint8"], + np.dtype[np.uint8], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["int16"], + np.dtype[np.int16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["uint16"], + np.dtype[np.uint16], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["float32"], + np.dtype[np.float32], +) +assert_type( + info.dtypes(kind=("integral", "real floating", "complex floating"))["complex64"], + np.dtype[np.complex64], +) diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 814da1b9d639..2559acbd0e94 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -20,6 +20,7 @@ i8: np.int64 A: npt.NDArray[np.float64] B: SubClass[np.float64] C: list[int] +D: SubClass[np.float64 | np.int64] def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ... @@ -31,12 +32,16 @@ assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any]) assert_type(np.array(A), npt.NDArray[np.float64]) assert_type(np.array(B), npt.NDArray[np.float64]) -assert_type(np.array(B, subok=True), SubClass[np.float64]) assert_type(np.array([1, 1.0]), npt.NDArray[Any]) assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any]) assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.array(A, dtype='c16'), npt.NDArray[Any]) assert_type(np.array(A, like=A), npt.NDArray[np.float64]) +assert_type(np.array(A, subok=True), npt.NDArray[np.float64]) +assert_type(np.array(B, subok=True), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=0), SubClass[np.float64]) +assert_type(np.array(B, subok=True, ndmin=1), SubClass[np.float64]) +assert_type(np.array(D), npt.NDArray[np.float64 | np.int64]) assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64]) assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/comparisons.pyi b/numpy/typing/tests/data/reveal/comparisons.pyi index 78c6a8e207fe..034efbef377e 100644 --- a/numpy/typing/tests/data/reveal/comparisons.pyi +++ b/numpy/typing/tests/data/reveal/comparisons.pyi @@ -38,10 +38,10 @@ SEQ = (0, 1, 2, 3, 4) # object-like comparisons -assert_type(i8 > fractions.Fraction(1, 5), Any) -assert_type(i8 > [fractions.Fraction(1, 5)], Any) -assert_type(i8 > decimal.Decimal("1.5"), Any) -assert_type(i8 > [decimal.Decimal("1.5")], Any) +assert_type(i8 > fractions.Fraction(1, 5), np.bool) +assert_type(i8 > [fractions.Fraction(1, 5)], npt.NDArray[np.bool]) +assert_type(i8 > decimal.Decimal("1.5"), np.bool) +assert_type(i8 > [decimal.Decimal("1.5")], npt.NDArray[np.bool]) # Time structures diff --git a/numpy/typing/tests/data/reveal/flatiter.pyi b/numpy/typing/tests/data/reveal/flatiter.pyi index 84d3b03b7d37..efbe75cee26a 100644 --- a/numpy/typing/tests/data/reveal/flatiter.pyi +++ b/numpy/typing/tests/data/reveal/flatiter.pyi @@ -1,5 +1,5 @@ import sys -from typing import Any +from typing import Any, Literal, TypeAlias import numpy as np import numpy.typing as npt @@ -10,6 +10,10 @@ else: from typing_extensions import assert_type a: np.flatiter[npt.NDArray[np.str_]] +a_1d: np.flatiter[np.ndarray[tuple[int], np.dtype[np.bytes_]]] + +Size: TypeAlias = Literal[42] +a_1d_fixed: np.flatiter[np.ndarray[tuple[Size], np.dtype[np.object_]]] assert_type(a.base, npt.NDArray[np.str_]) assert_type(a.copy(), npt.NDArray[np.str_]) @@ -23,8 +27,26 @@ assert_type(a[...], npt.NDArray[np.str_]) assert_type(a[:], npt.NDArray[np.str_]) assert_type(a[(...,)], npt.NDArray[np.str_]) assert_type(a[(0,)], np.str_) + assert_type(a.__array__(), npt.NDArray[np.str_]) assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64]) +assert_type( + a_1d.__array__(), + np.ndarray[tuple[int], np.dtype[np.bytes_]], +) +assert_type( + a_1d.__array__(np.dtype(np.float64)), + np.ndarray[tuple[int], np.dtype[np.float64]], +) +assert_type( + a_1d_fixed.__array__(), + np.ndarray[tuple[Size], np.dtype[np.object_]], +) +assert_type( + a_1d_fixed.__array__(np.dtype(np.float64)), + np.ndarray[tuple[Size], np.dtype[np.float64]], +) + a[0] = "a" a[:5] = "a" a[...] = "a" diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index 7fa2260bc312..94b3f5e5496d 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -193,6 +193,15 @@ assert_type(np.cumsum(f, dtype=float), npt.NDArray[Any]) assert_type(np.cumsum(f, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumsum(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.cumulative_sum(b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f), npt.NDArray[Any]) +assert_type(np.cumulative_sum(AR_b), npt.NDArray[np.bool]) +assert_type(np.cumulative_sum(AR_f4), npt.NDArray[np.float32]) +assert_type(np.cumulative_sum(f, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_sum(f, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_sum(AR_f4, out=AR_subclass), NDArraySubclass) + assert_type(np.ptp(b), np.bool) assert_type(np.ptp(f4), np.float32) assert_type(np.ptp(f), Any) @@ -249,6 +258,17 @@ assert_type(np.cumprod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) assert_type(np.cumprod(AR_f4, dtype=float), npt.NDArray[Any]) assert_type(np.cumprod(AR_f4, out=AR_subclass), NDArraySubclass) +assert_type(np.cumulative_prod(AR_b), npt.NDArray[np.int_]) +assert_type(np.cumulative_prod(AR_u8), npt.NDArray[np.uint64]) +assert_type(np.cumulative_prod(AR_i8), npt.NDArray[np.int64]) +assert_type(np.cumulative_prod(AR_f4), npt.NDArray[np.floating[Any]]) +assert_type(np.cumulative_prod(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(np.cumulative_prod(AR_O), npt.NDArray[np.object_]) +assert_type(np.cumulative_prod(AR_f4, axis=0), npt.NDArray[np.floating[Any]]) +assert_type(np.cumulative_prod(AR_f4, dtype=np.float64), npt.NDArray[np.float64]) +assert_type(np.cumulative_prod(AR_f4, dtype=float), npt.NDArray[Any]) +assert_type(np.cumulative_prod(AR_f4, out=AR_subclass), NDArraySubclass) + assert_type(np.ndim(b), int) assert_type(np.ndim(f4), int) assert_type(np.ndim(f), int) diff --git a/numpy/typing/tests/data/reveal/getlimits.pyi b/numpy/typing/tests/data/reveal/getlimits.pyi index f53fdf48824e..57af90cccb8a 100644 --- a/numpy/typing/tests/data/reveal/getlimits.pyi +++ b/numpy/typing/tests/data/reveal/getlimits.pyi @@ -4,9 +4,9 @@ from typing import Any import numpy as np if sys.version_info >= (3, 11): - from typing import assert_type + from typing import assert_type, LiteralString else: - from typing_extensions import assert_type + from typing_extensions import assert_type, LiteralString f: float f8: np.float64 @@ -49,8 +49,8 @@ assert_type(np.iinfo(u4), np.iinfo[np.uint32]) assert_type(np.iinfo('i2'), np.iinfo[Any]) assert_type(iinfo_i8.dtype, np.dtype[np.int64]) -assert_type(iinfo_i8.kind, str) +assert_type(iinfo_i8.kind, LiteralString) assert_type(iinfo_i8.bits, int) -assert_type(iinfo_i8.key, str) +assert_type(iinfo_i8.key, LiteralString) assert_type(iinfo_i8.min, int) assert_type(iinfo_i8.max, int) diff --git a/numpy/typing/tests/data/reveal/index_tricks.pyi b/numpy/typing/tests/data/reveal/index_tricks.pyi index 029c8228cae7..ad8be765fbc1 100644 --- a/numpy/typing/tests/data/reveal/index_tricks.pyi +++ b/numpy/typing/tests/data/reveal/index_tricks.pyi @@ -13,24 +13,31 @@ AR_LIKE_b: list[bool] AR_LIKE_i: list[int] AR_LIKE_f: list[float] AR_LIKE_U: list[str] +AR_LIKE_O: list[object] AR_i8: npt.NDArray[np.int64] +AR_O: npt.NDArray[np.object_] assert_type(np.ndenumerate(AR_i8), np.ndenumerate[np.int64]) assert_type(np.ndenumerate(AR_LIKE_f), np.ndenumerate[np.float64]) assert_type(np.ndenumerate(AR_LIKE_U), np.ndenumerate[np.str_]) +assert_type(np.ndenumerate(AR_LIKE_O), np.ndenumerate[np.object_]) assert_type(np.ndenumerate(AR_i8).iter, np.flatiter[npt.NDArray[np.int64]]) assert_type(np.ndenumerate(AR_LIKE_f).iter, np.flatiter[npt.NDArray[np.float64]]) assert_type(np.ndenumerate(AR_LIKE_U).iter, np.flatiter[npt.NDArray[np.str_]]) +assert_type(np.ndenumerate(AR_LIKE_O).iter, np.flatiter[npt.NDArray[np.object_]]) assert_type(next(np.ndenumerate(AR_i8)), tuple[tuple[int, ...], np.int64]) assert_type(next(np.ndenumerate(AR_LIKE_f)), tuple[tuple[int, ...], np.float64]) assert_type(next(np.ndenumerate(AR_LIKE_U)), tuple[tuple[int, ...], np.str_]) +# this fails due to an unknown mypy bug +# assert_type(next(np.ndenumerate(AR_LIKE_O)), tuple[tuple[int, ...], Any]) assert_type(iter(np.ndenumerate(AR_i8)), np.ndenumerate[np.int64]) assert_type(iter(np.ndenumerate(AR_LIKE_f)), np.ndenumerate[np.float64]) assert_type(iter(np.ndenumerate(AR_LIKE_U)), np.ndenumerate[np.str_]) +assert_type(iter(np.ndenumerate(AR_LIKE_O)), np.ndenumerate[np.object_]) assert_type(np.ndindex(1, 2, 3), np.ndindex) assert_type(np.ndindex((1, 2, 3)), np.ndindex) diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi index 72974dce64bf..b630a130633a 100644 --- a/numpy/typing/tests/data/reveal/lib_function_base.pyi +++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi @@ -1,4 +1,5 @@ import sys +from fractions import Fraction from typing import Any from collections.abc import Callable @@ -14,6 +15,8 @@ vectorized_func: np.vectorize f8: np.float64 AR_LIKE_f8: list[float] +AR_LIKE_c16: list[complex] +AR_LIKE_O: list[Fraction] AR_i8: npt.NDArray[np.int64] AR_f8: npt.NDArray[np.float64] @@ -159,6 +162,21 @@ assert_type(np.quantile(AR_f8, [0.5], keepdims=True), Any) assert_type(np.quantile(AR_f8, [0.5], axis=[1]), Any) assert_type(np.quantile(AR_f8, [0.5], out=AR_c16), npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_f8), np.float64) +assert_type(np.trapezoid(AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_c16, AR_LIKE_f8), np.complex128) +assert_type(np.trapezoid(AR_LIKE_f8, AR_LIKE_c16), np.complex128) +assert_type(np.trapezoid(AR_LIKE_O), float) +assert_type(np.trapezoid(AR_LIKE_O, AR_LIKE_f8), float) +assert_type(np.trapezoid(AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_f8, AR_f8), np.float64 | npt.NDArray[np.float64]) +assert_type(np.trapezoid(AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_c16, AR_c16), np.complex128 | npt.NDArray[np.complex128]) +assert_type(np.trapezoid(AR_m), np.timedelta64 | npt.NDArray[np.timedelta64]) +assert_type(np.trapezoid(AR_O), float | npt.NDArray[np.object_]) +assert_type(np.trapezoid(AR_O, AR_LIKE_f8), float | npt.NDArray[np.object_]) + assert_type(np.meshgrid(AR_f8, AR_i8, copy=False), tuple[npt.NDArray[Any], ...]) assert_type(np.meshgrid(AR_f8, AR_i8, AR_c16, indexing="ij"), tuple[npt.NDArray[Any], ...]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 5f3526a72d45..783e18f5c632 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -22,9 +22,13 @@ else: class SubClass(npt.NDArray[np.object_]): ... f8: np.float64 +i8: np.int64 B: SubClass AR_f8: npt.NDArray[np.float64] AR_i8: npt.NDArray[np.int64] +AR_u1: npt.NDArray[np.uint8] +AR_c8: npt.NDArray[np.complex64] +AR_m: npt.NDArray[np.timedelta64] AR_U: npt.NDArray[np.str_] AR_V: npt.NDArray[np.void] @@ -213,3 +217,17 @@ with open("test_file", "wb") as f: assert_type(AR_f8.__array_finalize__(None), None) assert_type(AR_f8.__array_finalize__(B), None) assert_type(AR_f8.__array_finalize__(AR_f8), None) + +assert_type(f8.device, Literal["cpu"]) +assert_type(AR_f8.device, Literal["cpu"]) + +assert_type(f8.to_device("cpu"), np.float64) +assert_type(i8.to_device("cpu"), np.int64) +assert_type(AR_f8.to_device("cpu"), npt.NDArray[np.float64]) +assert_type(AR_i8.to_device("cpu"), npt.NDArray[np.int64]) +assert_type(AR_u1.to_device("cpu"), npt.NDArray[np.uint8]) +assert_type(AR_c8.to_device("cpu"), npt.NDArray[np.complex64]) +assert_type(AR_m.to_device("cpu"), npt.NDArray[np.timedelta64]) + +assert_type(f8.__array_namespace__(), Any) +assert_type(AR_f8.__array_namespace__(), Any) diff --git a/numpy/typing/tests/data/reveal/numeric.pyi b/numpy/typing/tests/data/reveal/numeric.pyi index 8f21ce405b89..1f0a8b36fff8 100644 --- a/numpy/typing/tests/data/reveal/numeric.pyi +++ b/numpy/typing/tests/data/reveal/numeric.pyi @@ -91,12 +91,6 @@ assert_type(np.tensordot(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any assert_type(np.tensordot(AR_i8, AR_m), npt.NDArray[np.timedelta64]) assert_type(np.tensordot(AR_O, AR_O), npt.NDArray[np.object_]) -assert_type(np.vecdot(AR_i8, AR_i8), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vecdot(AR_b, AR_b), npt.NDArray[np.bool]) -assert_type(np.vecdot(AR_b, AR_u8), npt.NDArray[np.unsignedinteger[Any]]) -assert_type(np.vecdot(AR_i8, AR_b), npt.NDArray[np.signedinteger[Any]]) -assert_type(np.vecdot(AR_i8, AR_f8), npt.NDArray[np.floating[Any]]) - assert_type(np.isscalar(i8), bool) assert_type(np.isscalar(AR_i8), bool) assert_type(np.isscalar(B), bool) diff --git a/numpy/typing/tests/data/reveal/numerictypes.pyi b/numpy/typing/tests/data/reveal/numerictypes.pyi index 091aa7e5ab06..cf558ddc9718 100644 --- a/numpy/typing/tests/data/reveal/numerictypes.pyi +++ b/numpy/typing/tests/data/reveal/numerictypes.pyi @@ -48,7 +48,8 @@ assert_type(np.ScalarType[0], type[int]) assert_type(np.ScalarType[3], type[bool]) assert_type(np.ScalarType[8], type[np.csingle]) assert_type(np.ScalarType[10], type[np.clongdouble]) +assert_type(np.bool_, type[np.bool]) assert_type(np.typecodes["Character"], Literal["c"]) assert_type(np.typecodes["Complex"], Literal["FDG"]) -assert_type(np.typecodes["All"], Literal["?bhilqpBHILQPefdgFDGSUVOMm"]) +assert_type(np.typecodes["All"], Literal["?bhilqnpBHILQNPefdgFDGSUVOMm"]) diff --git a/numpy/typing/tests/data/reveal/polynomial_polybase.pyi b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi new file mode 100644 index 000000000000..60e92709a2e6 --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_polybase.pyi @@ -0,0 +1,225 @@ +from fractions import Fraction +import sys +from collections.abc import Sequence +from decimal import Decimal +from typing import Any, Literal as L, TypeAlias, TypeVar + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import LiteralString, assert_type +else: + from typing_extensions import LiteralString, assert_type + +_Ar_x: TypeAlias = npt.NDArray[np.inexact[Any] | np.object_] +_Ar_f: TypeAlias = npt.NDArray[np.floating[Any]] +_Ar_c: TypeAlias = npt.NDArray[np.complexfloating[Any, Any]] +_Ar_O: TypeAlias = npt.NDArray[np.object_] + +_Ar_x_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.inexact[Any] | np.object_]] +_Ar_f_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_Ar_c_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_Ar_O_n: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_Ar_x_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.inexact[Any] | np.object_]] +_Ar_f_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.floating[Any]]] +_Ar_c_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complexfloating[Any, Any]]] +_Ar_O_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +_SCT = TypeVar("_SCT", bound=np.generic) +_Ar_1d: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]] + +_BasisName: TypeAlias = L["X"] + +SC_i: np.int_ +SC_i_co: int | np.int_ +SC_f: np.float64 +SC_f_co: float | np.float64 | np.int_ +SC_c: np.complex128 +SC_c_co: complex | np.complex128 +SC_O: Decimal + +AR_i: npt.NDArray[np.int_] +AR_f: npt.NDArray[np.float64] +AR_f_co: npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_c: npt.NDArray[np.complex128] +AR_c_co: npt.NDArray[np.complex128] |npt.NDArray[np.float64] | npt.NDArray[np.int_] +AR_O: npt.NDArray[np.object_] +AR_O_co: npt.NDArray[np.object_ | np.number[Any]] + +SQ_i: Sequence[int] +SQ_f: Sequence[float] +SQ_c: Sequence[complex] +SQ_O: Sequence[Decimal] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev +PS_herm: npp.Hermite +PS_herme: npp.HermiteE +PS_lag: npp.Laguerre +PS_leg: npp.Legendre +PS_all: ( + npp.Polynomial + | npp.Chebyshev + | npp.Hermite + | npp.HermiteE + | npp.Laguerre + | npp.Legendre +) + +# static- and classmethods + +assert_type(type(PS_poly).basis_name, None) +assert_type(type(PS_cheb).basis_name, L['T']) +assert_type(type(PS_herm).basis_name, L['H']) +assert_type(type(PS_herme).basis_name, L['He']) +assert_type(type(PS_lag).basis_name, L['L']) +assert_type(type(PS_leg).basis_name, L['P']) + +assert_type(type(PS_all).__hash__, None) +assert_type(type(PS_all).__array_ufunc__, None) +assert_type(type(PS_all).maxpower, L[100]) + +assert_type(type(PS_poly).fromroots(SC_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(SQ_i), npp.Polynomial) +assert_type(type(PS_poly).fromroots(AR_i), npp.Polynomial) +assert_type(type(PS_cheb).fromroots(SC_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(SQ_f), npp.Chebyshev) +assert_type(type(PS_cheb).fromroots(AR_f_co), npp.Chebyshev) +assert_type(type(PS_herm).fromroots(SC_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(SQ_c), npp.Hermite) +assert_type(type(PS_herm).fromroots(AR_c_co), npp.Hermite) +assert_type(type(PS_leg).fromroots(SC_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(SQ_O), npp.Legendre) +assert_type(type(PS_leg).fromroots(AR_O_co), npp.Legendre) + +assert_type(type(PS_poly).identity(), npp.Polynomial) +assert_type(type(PS_cheb).identity(symbol='z'), npp.Chebyshev) + +assert_type(type(PS_lag).basis(SC_i), npp.Laguerre) +assert_type(type(PS_leg).basis(32, symbol='u'), npp.Legendre) + +assert_type(type(PS_herm).cast(PS_poly), npp.Hermite) +assert_type(type(PS_herme).cast(PS_leg), npp.HermiteE) + +# attributes / properties + +assert_type(PS_all.coef, _Ar_x_n) +assert_type(PS_all.domain, _Ar_x_2) +assert_type(PS_all.window, _Ar_x_2) +assert_type(PS_all.symbol, LiteralString) + +# instance methods + +assert_type(PS_all.has_samecoef(PS_all), bool) +assert_type(PS_all.has_samedomain(PS_all), bool) +assert_type(PS_all.has_samewindow(PS_all), bool) +assert_type(PS_all.has_sametype(PS_all), bool) +assert_type(PS_poly.has_sametype(PS_poly), bool) +assert_type(PS_poly.has_sametype(PS_leg), bool) +assert_type(PS_poly.has_sametype(NotADirectoryError), L[False]) + +assert_type(PS_poly.copy(), npp.Polynomial) +assert_type(PS_cheb.copy(), npp.Chebyshev) +assert_type(PS_herm.copy(), npp.Hermite) +assert_type(PS_herme.copy(), npp.HermiteE) +assert_type(PS_lag.copy(), npp.Laguerre) +assert_type(PS_leg.copy(), npp.Legendre) + +assert_type(PS_leg.cutdeg(), npp.Legendre) +assert_type(PS_leg.trim(), npp.Legendre) +assert_type(PS_leg.trim(tol=SC_f_co), npp.Legendre) +assert_type(PS_leg.truncate(SC_i_co), npp.Legendre) + +assert_type(PS_all.convert(None, npp.Chebyshev), npp.Chebyshev) +assert_type(PS_all.convert((0, 1), npp.Laguerre), npp.Laguerre) +assert_type(PS_all.convert([0, 1], npp.Hermite, [-1, 1]), npp.Hermite) + +assert_type(PS_all.degree(), int) +assert_type(PS_all.mapparms(), tuple[Any, Any]) + +assert_type(PS_poly.integ(), npp.Polynomial) +assert_type(PS_herme.integ(SC_i_co), npp.HermiteE) +assert_type(PS_lag.integ(SC_i_co, SC_f_co), npp.Laguerre) +assert_type(PS_poly.deriv(), npp.Polynomial) +assert_type(PS_herm.deriv(SC_i_co), npp.Hermite) + +assert_type(PS_poly.roots(), _Ar_x_n) + +assert_type( + PS_poly.linspace(), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type( + PS_poly.linspace(9), + tuple[_Ar_1d[np.float64 | np.complex128], _Ar_1d[np.float64 | np.complex128]], +) + +assert_type(PS_cheb.fit(AR_c_co, AR_c_co, SC_i_co), npp.Chebyshev) +assert_type(PS_leg.fit(AR_c_co, AR_c_co, AR_i), npp.Legendre) +assert_type(PS_herm.fit(AR_c_co, AR_c_co, SQ_i), npp.Hermite) +assert_type(PS_poly.fit(AR_c_co, SQ_c, SQ_i), npp.Polynomial) +assert_type(PS_lag.fit(SQ_c, SQ_c, SQ_i, full=False), npp.Laguerre) +assert_type( + PS_herme.fit(SQ_c, AR_c_co, SC_i_co, full=True), + tuple[npp.HermiteE, Sequence[np.inexact[Any] | np.int32]], +) + +# custom operations + +assert_type(PS_all.__hash__, None) +assert_type(PS_all.__array_ufunc__, None) + +assert_type(str(PS_all), str) +assert_type(repr(PS_all), str) +assert_type(format(PS_all), str) + +assert_type(len(PS_all), int) +assert_type(next(iter(PS_all)), np.inexact[Any] | object) + +assert_type(PS_all(SC_f_co), np.float64 | np.complex128) +assert_type(PS_all(SC_c_co), np.complex128) +assert_type(PS_all(Decimal()), np.float64 | np.complex128) +assert_type(PS_all(Fraction()), np.float64 | np.complex128) +assert_type(PS_poly(SQ_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(SQ_O), npt.NDArray[np.object_]) +assert_type(PS_poly(AR_f), npt.NDArray[np.float64] | npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_c), npt.NDArray[np.complex128] | npt.NDArray[np.object_]) +assert_type(PS_poly(AR_O), npt.NDArray[np.object_]) +assert_type(PS_all(PS_poly), npp.Polynomial) + +assert_type(PS_poly == PS_poly, bool) +assert_type(PS_poly != PS_poly, bool) + +assert_type(-PS_poly, npp.Polynomial) +assert_type(+PS_poly, npp.Polynomial) + +assert_type(PS_poly + 5, npp.Polynomial) +assert_type(PS_poly - 5, npp.Polynomial) +assert_type(PS_poly * 5, npp.Polynomial) +assert_type(PS_poly / 5, npp.Polynomial) +assert_type(PS_poly // 5, npp.Polynomial) +assert_type(PS_poly % 5, npp.Polynomial) + +assert_type(PS_poly + PS_leg, npp.Polynomial) +assert_type(PS_poly - PS_leg, npp.Polynomial) +assert_type(PS_poly * PS_leg, npp.Polynomial) +assert_type(PS_poly / PS_leg, npp.Polynomial) +assert_type(PS_poly // PS_leg, npp.Polynomial) +assert_type(PS_poly % PS_leg, npp.Polynomial) + +assert_type(5 + PS_poly, npp.Polynomial) +assert_type(5 - PS_poly, npp.Polynomial) +assert_type(5 * PS_poly, npp.Polynomial) +assert_type(5 / PS_poly, npp.Polynomial) +assert_type(5 // PS_poly, npp.Polynomial) +assert_type(5 % PS_poly, npp.Polynomial) +assert_type(divmod(PS_poly, 5), tuple[npp.Polynomial, npp.Polynomial]) +assert_type(divmod(5, PS_poly), tuple[npp.Polynomial, npp.Polynomial]) + +assert_type(PS_poly**1, npp.Polynomial) +assert_type(PS_poly**1.0, npp.Polynomial) diff --git a/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi new file mode 100644 index 000000000000..eecdb14e1c3c --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_polyutils.pyi @@ -0,0 +1,224 @@ +import sys +from collections.abc import Sequence +from decimal import Decimal +from fractions import Fraction +from typing import Any, Literal as L, TypeAlias + +import numpy as np +import numpy.typing as npt +import numpy.polynomial.polyutils as pu +from numpy.polynomial._polytypes import _Tuple2 + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +_ArrFloat1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.float64]] +_ArrComplex1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.complex128]] +_ArrObject1D_2: TypeAlias = np.ndarray[tuple[L[2]], np.dtype[np.object_]] + +num_int: int +num_float: float +num_complex: complex +# will result in an `object_` dtype +num_object: Decimal | Fraction + +sct_int: np.int_ +sct_float: np.float64 +sct_complex: np.complex128 +sct_object: np.object_ # doesn't exist at runtime + +arr_int: npt.NDArray[np.int_] +arr_float: npt.NDArray[np.float64] +arr_complex: npt.NDArray[np.complex128] +arr_object: npt.NDArray[np.object_] + +seq_num_int: Sequence[int] +seq_num_float: Sequence[float] +seq_num_complex: Sequence[complex] +seq_num_object: Sequence[Decimal | Fraction] + +seq_sct_int: Sequence[np.int_] +seq_sct_float: Sequence[np.float64] +seq_sct_complex: Sequence[np.complex128] +seq_sct_object: Sequence[np.object_] + +seq_arr_int: Sequence[npt.NDArray[np.int_]] +seq_arr_float: Sequence[npt.NDArray[np.float64]] +seq_arr_complex: Sequence[npt.NDArray[np.complex128]] +seq_arr_object: Sequence[npt.NDArray[np.object_]] + +seq_seq_num_int: Sequence[Sequence[int]] +seq_seq_num_float: Sequence[Sequence[float]] +seq_seq_num_complex: Sequence[Sequence[complex]] +seq_seq_num_object: Sequence[Sequence[Decimal | Fraction]] + +seq_seq_sct_int: Sequence[Sequence[np.int_]] +seq_seq_sct_float: Sequence[Sequence[np.float64]] +seq_seq_sct_complex: Sequence[Sequence[np.complex128]] +seq_seq_sct_object: Sequence[Sequence[np.object_]] # doesn't exist at runtime + +# as_series + +assert_type(pu.as_series(arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_sct_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_arr_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_arr_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_arr_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_num_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_num_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_num_object), list[_ArrObject1D]) + +assert_type(pu.as_series(seq_seq_sct_int), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_float), list[_ArrFloat1D]) +assert_type(pu.as_series(seq_seq_sct_complex), list[_ArrComplex1D]) +assert_type(pu.as_series(seq_seq_sct_object), list[_ArrObject1D]) + +# trimcoef + +assert_type(pu.trimcoef(num_int), _ArrFloat1D) +assert_type(pu.trimcoef(num_float), _ArrFloat1D) +assert_type(pu.trimcoef(num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) +assert_type(pu.trimcoef(num_object), _ArrObject1D) + +assert_type(pu.trimcoef(sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(sct_object), _ArrObject1D) + +assert_type(pu.trimcoef(arr_int), _ArrFloat1D) +assert_type(pu.trimcoef(arr_float), _ArrFloat1D) +assert_type(pu.trimcoef(arr_complex), _ArrComplex1D) +assert_type(pu.trimcoef(arr_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_num_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_num_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_num_object), _ArrObject1D) + +assert_type(pu.trimcoef(seq_sct_int), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_float), _ArrFloat1D) +assert_type(pu.trimcoef(seq_sct_complex), _ArrComplex1D) +assert_type(pu.trimcoef(seq_sct_object), _ArrObject1D) + +# getdomain + +assert_type(pu.getdomain(num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) +assert_type(pu.getdomain(num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(sct_object), _ArrObject1D_2) + +assert_type(pu.getdomain(arr_int), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_float), _ArrFloat1D_2) +assert_type(pu.getdomain(arr_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(arr_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_num_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_num_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_num_object), _ArrObject1D_2) + +assert_type(pu.getdomain(seq_sct_int), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_float), _ArrFloat1D_2) +assert_type(pu.getdomain(seq_sct_complex), _ArrComplex1D_2) +assert_type(pu.getdomain(seq_sct_object), _ArrObject1D_2) + +# mapparms + +assert_type(pu.mapparms(seq_num_int, seq_num_int), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_int, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_float), _Tuple2[float]) +assert_type(pu.mapparms(seq_num_float, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_complex), _Tuple2[complex]) +assert_type(pu.mapparms(seq_num_complex, seq_num_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_num_object, seq_num_object), _Tuple2[object]) + +assert_type(pu.mapparms(seq_sct_int, seq_sct_int), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_int, seq_sct_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(seq_sct_float, seq_sct_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(seq_sct_complex, seq_sct_object), _Tuple2[object]) +assert_type(pu.mapparms(seq_sct_object, seq_sct_object), _Tuple2[object]) + +assert_type(pu.mapparms(arr_int, arr_int), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(arr_int, arr_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(arr_float, arr_float), _Tuple2[np.floating[Any]]) +assert_type(pu.mapparms(arr_float, arr_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(arr_complex, arr_complex), _Tuple2[np.complexfloating[Any, Any]]) +assert_type(pu.mapparms(arr_complex, arr_object), _Tuple2[object]) +assert_type(pu.mapparms(arr_object, arr_object), _Tuple2[object]) + +# mapdomain + +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_int), np.floating[Any]) +assert_type(pu.mapdomain(num_int, seq_num_int, seq_num_float), np.floating[Any]) +assert_type(pu.mapdomain(num_int, seq_num_float, seq_num_float), np.floating[Any]) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_float), np.floating[Any]) +assert_type(pu.mapdomain(num_float, seq_num_float, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_float, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_complex), np.complexfloating[Any, Any]) +assert_type(pu.mapdomain(num_complex, seq_num_complex, seq_num_object), object) +assert_type(pu.mapdomain(num_complex, seq_num_object, seq_num_object), object) +assert_type(pu.mapdomain(num_object, seq_num_object, seq_num_object), object) + +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_int, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_int, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_float, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_float, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_complex, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_complex, seq_num_object, seq_num_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_num_object, seq_num_object, seq_num_object), _ArrObject1D) + +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_int), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_int, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_int, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_float), _ArrFloat1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_float, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_float, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_complex), _ArrComplex1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_complex, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_complex, seq_sct_object, seq_sct_object), _ArrObject1D) +assert_type(pu.mapdomain(seq_sct_object, seq_sct_object, seq_sct_object), _ArrObject1D) + +assert_type(pu.mapdomain(arr_int, arr_int, arr_int), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_int, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_int, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_float), _ArrFloat1D) +assert_type(pu.mapdomain(arr_float, arr_float, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_float, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_complex), _ArrComplex1D) +assert_type(pu.mapdomain(arr_complex, arr_complex, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_complex, arr_object, arr_object), _ArrObject1D) +assert_type(pu.mapdomain(arr_object, arr_object, arr_object), _ArrObject1D) diff --git a/numpy/typing/tests/data/reveal/polynomial_series.pyi b/numpy/typing/tests/data/reveal/polynomial_series.pyi new file mode 100644 index 000000000000..a60d05afd01d --- /dev/null +++ b/numpy/typing/tests/data/reveal/polynomial_series.pyi @@ -0,0 +1,144 @@ +from collections.abc import Sequence +import sys +from typing import Any, TypeAlias + +import numpy as np +import numpy.polynomial as npp +import numpy.typing as npt + +if sys.version_info >= (3, 11): + from typing import assert_type +else: + from typing_extensions import assert_type + +_ArrFloat1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.floating[Any]]] +_ArrFloat1D64: TypeAlias = np.ndarray[tuple[int], np.dtype[np.float64]] +_ArrComplex1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complexfloating[Any, Any]]] +_ArrComplex1D128: TypeAlias = np.ndarray[tuple[int], np.dtype[np.complex128]] +_ArrObject1D: TypeAlias = np.ndarray[tuple[int], np.dtype[np.object_]] + +AR_b: npt.NDArray[np.bool] +AR_u4: npt.NDArray[np.uint32] +AR_i8: npt.NDArray[np.int64] +AR_f8: npt.NDArray[np.float64] +AR_c16: npt.NDArray[np.complex128] +AR_O: npt.NDArray[np.object_] + +PS_poly: npp.Polynomial +PS_cheb: npp.Chebyshev + +assert_type(npp.polynomial.polyroots(AR_f8), _ArrFloat1D64) +assert_type(npp.polynomial.polyroots(AR_c16), _ArrComplex1D128) +assert_type(npp.polynomial.polyroots(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polyfromroots(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polyfromroots(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyfromroots(AR_O), _ArrObject1D) + +# assert_type(npp.polynomial.polyadd(AR_b, AR_b), NoReturn) +assert_type(npp.polynomial.polyadd(AR_u4, AR_b), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_f8, AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polyadd(AR_i8, AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polyadd(AR_O, AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polymulx(AR_u4), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_i8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_f8), _ArrFloat1D) +assert_type(npp.polynomial.polymulx(AR_c16), _ArrComplex1D) +assert_type(npp.polynomial.polymulx(AR_O), _ArrObject1D) + +assert_type(npp.polynomial.polypow(AR_u4, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_i8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_f8, 2), _ArrFloat1D) +assert_type(npp.polynomial.polypow(AR_c16, 2), _ArrComplex1D) +assert_type(npp.polynomial.polypow(AR_O, 2), _ArrObject1D) + +# assert_type(npp.polynomial.polyder(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyder(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyder(AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyder(AR_O, m=2), npt.NDArray[np.object_]) + +# assert_type(npp.polynomial.polyint(PS_poly), npt.NDArray[np.object_]) +assert_type(npp.polynomial.polyint(AR_f8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyint(AR_f8, k=AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyint(AR_O, m=2), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval(AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval2d(AR_b, AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_u4, AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval2d(AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval2d(AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyval3d(AR_b, AR_b, AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_u4, AR_u4, AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_f8, AR_f8, AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyval3d(AR_i8, AR_i8, AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyval3d(AR_O, AR_O, AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvalfromroots(AR_b, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_u4, AR_b), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_f8, AR_i8), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_i8, AR_c16), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvalfromroots(AR_O, AR_O), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander(AR_f8, 3), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvander(AR_c16, 3), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander(AR_O, 3), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander2d(AR_f8, AR_f8, [4, 2]), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvander2d(AR_c16, AR_c16, [4, 2]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander2d(AR_O, AR_O, [4, 2]), npt.NDArray[np.object_]) + +assert_type(npp.polynomial.polyvander3d(AR_f8, AR_f8, AR_f8, [4, 3, 2]), npt.NDArray[np.floating[Any]]) +assert_type(npp.polynomial.polyvander3d(AR_c16, AR_c16, AR_c16, [4, 3, 2]), npt.NDArray[np.complexfloating[Any, Any]]) +assert_type(npp.polynomial.polyvander3d(AR_O, AR_O, AR_O, [4, 3, 2]), npt.NDArray[np.object_]) + +assert_type( + npp.polynomial.polyfit(AR_f8, AR_f8, 2), + npt.NDArray[np.floating[Any]], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_i8, 1, full=True), + tuple[npt.NDArray[np.floating[Any]], Sequence[np.inexact[Any] | np.int32]], +) +assert_type( + npp.polynomial.polyfit(AR_c16, AR_f8, 2), + npt.NDArray[np.complexfloating[Any, Any]], +) +assert_type( + npp.polynomial.polyfit(AR_f8, AR_c16, 1, full=True)[0], + npt.NDArray[np.complexfloating[Any, Any]], +) + +assert_type(npp.chebyshev.chebgauss(2), tuple[_ArrFloat1D64, _ArrFloat1D64]) + +assert_type(npp.chebyshev.chebweight(AR_f8), npt.NDArray[np.float64]) +assert_type(npp.chebyshev.chebweight(AR_c16), npt.NDArray[np.complex128]) +assert_type(npp.chebyshev.chebweight(AR_O), npt.NDArray[np.object_]) + +assert_type(npp.chebyshev.poly2cheb(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.poly2cheb(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.poly2cheb(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.cheb2poly(AR_f8), _ArrFloat1D) +assert_type(npp.chebyshev.cheb2poly(AR_c16), _ArrComplex1D) +assert_type(npp.chebyshev.cheb2poly(AR_O), _ArrObject1D) + +assert_type(npp.chebyshev.chebpts1(6), _ArrFloat1D64) +assert_type(npp.chebyshev.chebpts2(6), _ArrFloat1D64) + +assert_type( + npp.chebyshev.chebinterpolate(np.tanh, 3), + npt.NDArray[np.float64 | np.complex128 | np.object_], +) diff --git a/numpy/typing/tests/data/reveal/random.pyi b/numpy/typing/tests/data/reveal/random.pyi index 42a24936b903..b31b4b56f870 100644 --- a/numpy/typing/tests/data/reveal/random.pyi +++ b/numpy/typing/tests/data/reveal/random.pyi @@ -530,12 +530,10 @@ assert_type(def_gen.integers(I_bool_high_closed, dtype=bool, endpoint=True), npt assert_type(def_gen.integers(I_bool_low, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) assert_type(def_gen.integers(0, I_bool_high_closed, dtype=bool, endpoint=True), npt.NDArray[np.bool]) -# TODO: Commented out tests are currently incorrectly typed as arrays rather -# than scalars. -#assert_type(def_gen.integers(2, dtype=np.bool), np.bool) -#assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) -#assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) -#assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(0, 2, dtype=np.bool), np.bool) +assert_type(def_gen.integers(1, dtype=np.bool, endpoint=True), np.bool) +assert_type(def_gen.integers(0, 1, dtype=np.bool, endpoint=True), np.bool) assert_type(def_gen.integers(I_bool_low_like, 1, dtype=np.bool, endpoint=True), npt.NDArray[np.bool]) assert_type(def_gen.integers(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(def_gen.integers(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) @@ -549,10 +547,10 @@ I_u1_low_like: list[int] = [0] I_u1_high_open: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) I_u1_high_closed: npt.NDArray[np.uint8] = np.array([255], dtype=np.uint8) -# assert_type(def_gen.integers(256, dtype="u1"), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) -# assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="u1"), np.uint8) +assert_type(def_gen.integers(255, dtype="u1", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="u1", endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) @@ -561,10 +559,10 @@ assert_type(def_gen.integers(I_u1_high_closed, dtype="u1", endpoint=True), npt.N assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(0, I_u1_high_closed, dtype="u1", endpoint=True), npt.NDArray[np.uint8]) -# assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) -# assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(0, 256, dtype="uint8"), np.uint8) +assert_type(def_gen.integers(255, dtype="uint8", endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype="uint8", endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) @@ -573,10 +571,10 @@ assert_type(def_gen.integers(I_u1_high_closed, dtype="uint8", endpoint=True), np assert_type(def_gen.integers(I_u1_low, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(0, I_u1_high_closed, dtype="uint8", endpoint=True), npt.NDArray[np.uint8]) -# assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) -# assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) -# assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) -# assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(0, 256, dtype=np.uint8), np.uint8) +assert_type(def_gen.integers(255, dtype=np.uint8, endpoint=True), np.uint8) +assert_type(def_gen.integers(0, 255, dtype=np.uint8, endpoint=True), np.uint8) assert_type(def_gen.integers(I_u1_low_like, 255, dtype=np.uint8, endpoint=True), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(def_gen.integers(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) @@ -590,10 +588,10 @@ I_u2_low_like: list[int] = [0] I_u2_high_open: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) I_u2_high_closed: npt.NDArray[np.uint16] = np.array([65535], dtype=np.uint16) -# assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) -# assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="u2"), np.uint16) +assert_type(def_gen.integers(65535, dtype="u2", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="u2", endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) @@ -602,10 +600,10 @@ assert_type(def_gen.integers(I_u2_high_closed, dtype="u2", endpoint=True), npt.N assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(0, I_u2_high_closed, dtype="u2", endpoint=True), npt.NDArray[np.uint16]) -# assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) -# assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype="uint16"), np.uint16) +assert_type(def_gen.integers(65535, dtype="uint16", endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype="uint16", endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) @@ -614,10 +612,10 @@ assert_type(def_gen.integers(I_u2_high_closed, dtype="uint16", endpoint=True), n assert_type(def_gen.integers(I_u2_low, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(0, I_u2_high_closed, dtype="uint16", endpoint=True), npt.NDArray[np.uint16]) -# assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) -# assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) -# assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) -# assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(0, 65536, dtype=np.uint16), np.uint16) +assert_type(def_gen.integers(65535, dtype=np.uint16, endpoint=True), np.uint16) +assert_type(def_gen.integers(0, 65535, dtype=np.uint16, endpoint=True), np.uint16) assert_type(def_gen.integers(I_u2_low_like, 65535, dtype=np.uint16, endpoint=True), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(def_gen.integers(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) @@ -631,10 +629,10 @@ I_u4_low_like: list[int] = [0] I_u4_high_open: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) I_u4_high_closed: npt.NDArray[np.uint32] = np.array([4294967295], dtype=np.uint32) -# assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) -# assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(0, 4294967296, dtype=np.int_), np.int_) +assert_type(def_gen.integers(4294967295, dtype=np.int_, endpoint=True), np.int_) +assert_type(def_gen.integers(0, 4294967295, dtype=np.int_, endpoint=True), np.int_) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.int_), npt.NDArray[np.int_]) @@ -644,10 +642,10 @@ assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.int_, endpoint assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.int_, endpoint=True), npt.NDArray[np.int_]) -# assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="u4"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="u4", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="u4", endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) @@ -656,10 +654,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype="u4", endpoint=True), npt.N assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype="u4", endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(def_gen.integers(4294967295, dtype="uint32", endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype="uint32", endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) @@ -668,10 +666,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype="uint32", endpoint=True), n assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype="uint32", endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) -# assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(def_gen.integers(4294967295, dtype=np.uint32, endpoint=True), np.uint32) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint32, endpoint=True), np.uint32) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) @@ -680,10 +678,10 @@ assert_type(def_gen.integers(I_u4_high_closed, dtype=np.uint32, endpoint=True), assert_type(def_gen.integers(I_u4_low, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) assert_type(def_gen.integers(0, I_u4_high_closed, dtype=np.uint32, endpoint=True), npt.NDArray[np.uint32]) -# assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) -# assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) -# assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) -# assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(0, 4294967296, dtype=np.uint), np.uint) +assert_type(def_gen.integers(4294967295, dtype=np.uint, endpoint=True), np.uint) +assert_type(def_gen.integers(0, 4294967295, dtype=np.uint, endpoint=True), np.uint) assert_type(def_gen.integers(I_u4_low_like, 4294967295, dtype=np.uint, endpoint=True), npt.NDArray[np.uint]) assert_type(def_gen.integers(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(def_gen.integers(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) @@ -697,10 +695,10 @@ I_u8_low_like: list[int] = [0] I_u8_high_open: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) I_u8_high_closed: npt.NDArray[np.uint64] = np.array([18446744073709551615], dtype=np.uint64) -# assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="u8", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="u8", endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) @@ -709,10 +707,10 @@ assert_type(def_gen.integers(I_u8_high_closed, dtype="u8", endpoint=True), npt.N assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(0, I_u8_high_closed, dtype="u8", endpoint=True), npt.NDArray[np.uint64]) -# assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype="uint64", endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype="uint64", endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) @@ -721,10 +719,10 @@ assert_type(def_gen.integers(I_u8_high_closed, dtype="uint64", endpoint=True), n assert_type(def_gen.integers(I_u8_low, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(0, I_u8_high_closed, dtype="uint64", endpoint=True), npt.NDArray[np.uint64]) -# assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) -# assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(def_gen.integers(18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) +assert_type(def_gen.integers(0, 18446744073709551615, dtype=np.uint64, endpoint=True), np.uint64) assert_type(def_gen.integers(I_u8_low_like, 18446744073709551615, dtype=np.uint64, endpoint=True), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(def_gen.integers(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) @@ -738,10 +736,10 @@ I_i1_low_like: list[int] = [-128] I_i1_high_open: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) I_i1_high_closed: npt.NDArray[np.int8] = np.array([127], dtype=np.int8) -# assert_type(def_gen.integers(128, dtype="i1"), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) -# assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype="i1"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="i1"), np.int8) +assert_type(def_gen.integers(127, dtype="i1", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="i1", endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype="i1", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) @@ -750,10 +748,10 @@ assert_type(def_gen.integers(I_i1_high_closed, dtype="i1", endpoint=True), npt.N assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="i1", endpoint=True), npt.NDArray[np.int8]) -# assert_type(def_gen.integers(128, dtype="int8"), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) -# assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype="int8"), np.int8) +assert_type(def_gen.integers(-128, 128, dtype="int8"), np.int8) +assert_type(def_gen.integers(127, dtype="int8", endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype="int8", endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype="int8", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) @@ -762,10 +760,10 @@ assert_type(def_gen.integers(I_i1_high_closed, dtype="int8", endpoint=True), npt assert_type(def_gen.integers(I_i1_low, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(-128, I_i1_high_closed, dtype="int8", endpoint=True), npt.NDArray[np.int8]) -# assert_type(def_gen.integers(128, dtype=np.int8), np.int8) -# assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) -# assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) -# assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(-128, 128, dtype=np.int8), np.int8) +assert_type(def_gen.integers(127, dtype=np.int8, endpoint=True), np.int8) +assert_type(def_gen.integers(-128, 127, dtype=np.int8, endpoint=True), np.int8) assert_type(def_gen.integers(I_i1_low_like, 127, dtype=np.int8, endpoint=True), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(def_gen.integers(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) @@ -779,10 +777,10 @@ I_i2_low_like: list[int] = [-32768] I_i2_high_open: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) I_i2_high_closed: npt.NDArray[np.int16] = np.array([32767], dtype=np.int16) -# assert_type(def_gen.integers(32768, dtype="i2"), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) -# assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="i2"), np.int16) +assert_type(def_gen.integers(32767, dtype="i2", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="i2", endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="i2", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) @@ -791,10 +789,10 @@ assert_type(def_gen.integers(I_i2_high_closed, dtype="i2", endpoint=True), npt.N assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="i2", endpoint=True), npt.NDArray[np.int16]) -# assert_type(def_gen.integers(32768, dtype="int16"), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) -# assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype="int16"), np.int16) +assert_type(def_gen.integers(32767, dtype="int16", endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype="int16", endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype="int16", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) @@ -803,10 +801,10 @@ assert_type(def_gen.integers(I_i2_high_closed, dtype="int16", endpoint=True), np assert_type(def_gen.integers(I_i2_low, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(-32768, I_i2_high_closed, dtype="int16", endpoint=True), npt.NDArray[np.int16]) -# assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) -# assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) -# assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) -# assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(-32768, 32768, dtype=np.int16), np.int16) +assert_type(def_gen.integers(32767, dtype=np.int16, endpoint=True), np.int16) +assert_type(def_gen.integers(-32768, 32767, dtype=np.int16, endpoint=True), np.int16) assert_type(def_gen.integers(I_i2_low_like, 32767, dtype=np.int16, endpoint=True), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(def_gen.integers(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) @@ -820,10 +818,10 @@ I_i4_low_like: list[int] = [-2147483648] I_i4_high_open: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) I_i4_high_closed: npt.NDArray[np.int32] = np.array([2147483647], dtype=np.int32) -# assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) -# assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="i4", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="i4", endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="i4", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) @@ -832,10 +830,10 @@ assert_type(def_gen.integers(I_i4_high_closed, dtype="i4", endpoint=True), npt.N assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="i4", endpoint=True), npt.NDArray[np.int32]) -# assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) -# assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(def_gen.integers(2147483647, dtype="int32", endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype="int32", endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype="int32", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) @@ -844,10 +842,10 @@ assert_type(def_gen.integers(I_i4_high_closed, dtype="int32", endpoint=True), np assert_type(def_gen.integers(I_i4_low, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(-2147483648, I_i4_high_closed, dtype="int32", endpoint=True), npt.NDArray[np.int32]) -# assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) -# assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) -# assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(def_gen.integers(2147483647, dtype=np.int32, endpoint=True), np.int32) +assert_type(def_gen.integers(-2147483648, 2147483647, dtype=np.int32, endpoint=True), np.int32) assert_type(def_gen.integers(I_i4_low_like, 2147483647, dtype=np.int32, endpoint=True), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(def_gen.integers(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) @@ -861,10 +859,10 @@ I_i8_low_like: list[int] = [-9223372036854775808] I_i8_high_open: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) I_i8_high_closed: npt.NDArray[np.int64] = np.array([9223372036854775807], dtype=np.int64) -# assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="i8", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="i8", endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="i8", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) @@ -873,10 +871,10 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype="i8", endpoint=True), npt.N assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="i8", endpoint=True), npt.NDArray[np.int64]) -# assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype="int64", endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype="int64", endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype="int64", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) @@ -885,10 +883,10 @@ assert_type(def_gen.integers(I_i8_high_closed, dtype="int64", endpoint=True), np assert_type(def_gen.integers(I_i8_low, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(-9223372036854775808, I_i8_high_closed, dtype="int64", endpoint=True), npt.NDArray[np.int64]) -# assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) -# assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) -# assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(def_gen.integers(9223372036854775807, dtype=np.int64, endpoint=True), np.int64) +assert_type(def_gen.integers(-9223372036854775808, 9223372036854775807, dtype=np.int64, endpoint=True), np.int64) assert_type(def_gen.integers(I_i8_low_like, 9223372036854775807, dtype=np.int64, endpoint=True), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(def_gen.integers(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) @@ -955,9 +953,7 @@ assert_type(def_gen.shuffle(D_2D, axis=1), None) assert_type(np.random.Generator(pcg64), np.random.Generator) assert_type(def_gen.__str__(), str) assert_type(def_gen.__repr__(), str) -def_gen_state = def_gen.__getstate__() -assert_type(def_gen_state, dict[str, Any]) -assert_type(def_gen.__setstate__(def_gen_state), None) +assert_type(def_gen.__setstate__(dict(def_gen.bit_generator.state)), None) # RandomState random_st: np.random.RandomState = np.random.RandomState() @@ -1324,163 +1320,164 @@ assert_type(random_st.randint(I_bool_high_open, dtype=bool), npt.NDArray[np.bool assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) assert_type(random_st.randint(0, I_bool_high_open, dtype=bool), npt.NDArray[np.bool]) -# TODO: Commented out type incorrectly indicates an array return: -# assert_type(random_st.randint(2, dtype=np.bool), np.bool) -# assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) +assert_type(random_st.randint(2, dtype=np.bool), np.bool) +assert_type(random_st.randint(0, 2, dtype=np.bool), np.bool) assert_type(random_st.randint(I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(random_st.randint(I_bool_low, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) assert_type(random_st.randint(0, I_bool_high_open, dtype=np.bool), npt.NDArray[np.bool]) -# assert_type(random_st.randint(256, dtype="u1"), np.uint16) -# assert_type(random_st.randint(0, 256, dtype="u1"), np.uint16) +assert_type(random_st.randint(256, dtype="u1"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="u1"), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype="u1"), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(256, dtype="uint8"), np.uint16) -# assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint16) +assert_type(random_st.randint(256, dtype="uint8"), np.uint8) +assert_type(random_st.randint(0, 256, dtype="uint8"), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype="uint8"), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(256, dtype=np.uint8), np.uint16) -# assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint16) +assert_type(random_st.randint(256, dtype=np.uint8), np.uint8) +assert_type(random_st.randint(0, 256, dtype=np.uint8), np.uint8) assert_type(random_st.randint(I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(random_st.randint(I_u1_low, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) assert_type(random_st.randint(0, I_u1_high_open, dtype=np.uint8), npt.NDArray[np.uint8]) -# assert_type(random_st.randint(65536, dtype="u2"), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(65536, dtype="u2"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="u2"), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype="u2"), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(65536, dtype="uint16"), np.uint16) +assert_type(random_st.randint(0, 65536, dtype="uint16"), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype="uint16"), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) -# assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(65536, dtype=np.uint16), np.uint16) +assert_type(random_st.randint(0, 65536, dtype=np.uint16), np.uint16) assert_type(random_st.randint(I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(random_st.randint(I_u2_low, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) assert_type(random_st.randint(0, I_u2_high_open, dtype=np.uint16), npt.NDArray[np.uint16]) -# assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(4294967296, dtype="u4"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="u4"), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype="u4"), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(4294967296, dtype="uint32"), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype="uint32"), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype="uint32"), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) -# assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(4294967296, dtype=np.uint32), np.uint32) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint32), np.uint32) assert_type(random_st.randint(I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint32), npt.NDArray[np.uint32]) -# assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) -# assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(4294967296, dtype=np.uint), np.uint) +assert_type(random_st.randint(0, 4294967296, dtype=np.uint), np.uint) assert_type(random_st.randint(I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(random_st.randint(I_u4_low, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) assert_type(random_st.randint(0, I_u4_high_open, dtype=np.uint), npt.NDArray[np.uint]) -# assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype="u8"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="u8"), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype="u8"), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype="uint64"), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype="uint64"), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype="uint64"), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) -# assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(18446744073709551616, dtype=np.uint64), np.uint64) +assert_type(random_st.randint(0, 18446744073709551616, dtype=np.uint64), np.uint64) assert_type(random_st.randint(I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(random_st.randint(I_u8_low, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) assert_type(random_st.randint(0, I_u8_high_open, dtype=np.uint64), npt.NDArray[np.uint64]) -# assert_type(random_st.randint(128, dtype="i1"), np.int8) -# assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) +assert_type(random_st.randint(128, dtype="i1"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="i1"), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype="i1"), npt.NDArray[np.int8]) -# assert_type(random_st.randint(128, dtype="int8"), np.int8) -# assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) +assert_type(random_st.randint(128, dtype="int8"), np.int8) +assert_type(random_st.randint(-128, 128, dtype="int8"), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype="int8"), npt.NDArray[np.int8]) -# assert_type(random_st.randint(128, dtype=np.int8), np.int8) -# assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) +assert_type(random_st.randint(128, dtype=np.int8), np.int8) +assert_type(random_st.randint(-128, 128, dtype=np.int8), np.int8) assert_type(random_st.randint(I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(random_st.randint(I_i1_low, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) assert_type(random_st.randint(-128, I_i1_high_open, dtype=np.int8), npt.NDArray[np.int8]) -# assert_type(random_st.randint(32768, dtype="i2"), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) +assert_type(random_st.randint(32768, dtype="i2"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="i2"), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype="i2"), npt.NDArray[np.int16]) -# assert_type(random_st.randint(32768, dtype="int16"), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) + +assert_type(random_st.randint(32768, dtype="int16"), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype="int16"), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype="int16"), npt.NDArray[np.int16]) -# assert_type(random_st.randint(32768, dtype=np.int16), np.int16) -# assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) + +assert_type(random_st.randint(32768, dtype=np.int16), np.int16) +assert_type(random_st.randint(-32768, 32768, dtype=np.int16), np.int16) assert_type(random_st.randint(I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(random_st.randint(I_i2_low, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) assert_type(random_st.randint(-32768, I_i2_high_open, dtype=np.int16), npt.NDArray[np.int16]) -# assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(2147483648, dtype="i4"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="i4"), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="i4"), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(2147483648, dtype="int32"), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype="int32"), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype="int32"), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(2147483648, dtype=np.int32), np.int32) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int32), np.int32) assert_type(random_st.randint(I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int32), npt.NDArray[np.int32]) -# assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) -# assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(2147483648, dtype=np.int_), np.int_) +assert_type(random_st.randint(-2147483648, 2147483648, dtype=np.int_), np.int_) assert_type(random_st.randint(I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(random_st.randint(I_i4_low, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) assert_type(random_st.randint(-2147483648, I_i4_high_open, dtype=np.int_), npt.NDArray[np.int_]) -# assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype="i8"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="i8"), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="i8"), npt.NDArray[np.int64]) -# assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype="int64"), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype="int64"), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype="int64"), npt.NDArray[np.int64]) -# assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) -# assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(9223372036854775808, dtype=np.int64), np.int64) +assert_type(random_st.randint(-9223372036854775808, 9223372036854775808, dtype=np.int64), np.int64) assert_type(random_st.randint(I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(random_st.randint(I_i8_low, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) assert_type(random_st.randint(-9223372036854775808, I_i8_high_open, dtype=np.int64), npt.NDArray[np.int64]) diff --git a/numpy/typing/tests/data/reveal/scalars.pyi b/numpy/typing/tests/data/reveal/scalars.pyi index 47c08997a0e3..95775e9a8dbe 100644 --- a/numpy/typing/tests/data/reveal/scalars.pyi +++ b/numpy/typing/tests/data/reveal/scalars.pyi @@ -50,6 +50,7 @@ assert_type(V[["field1", "field2"]], np.void) V[0] = 5 # Aliases +assert_type(np.bool_(), np.bool) assert_type(np.byte(), np.byte) assert_type(np.short(), np.short) assert_type(np.intc(), np.intc) diff --git a/numpy/typing/tests/data/reveal/shape.pyi b/numpy/typing/tests/data/reveal/shape.pyi new file mode 100644 index 000000000000..8f8d819cbcea --- /dev/null +++ b/numpy/typing/tests/data/reveal/shape.pyi @@ -0,0 +1,15 @@ +from typing import Any, NamedTuple + +import numpy as np +from typing_extensions import assert_type + + +# Subtype of tuple[int, int] +class XYGrid(NamedTuple): + x_axis: int + y_axis: int + +arr: np.ndarray[XYGrid, Any] + +# Test shape property matches shape typevar +assert_type(arr.shape, XYGrid) diff --git a/numpy/typing/tests/data/reveal/shape_base.pyi b/numpy/typing/tests/data/reveal/shape_base.pyi index 69940cc1ac2c..526f3abf161c 100644 --- a/numpy/typing/tests/data/reveal/shape_base.pyi +++ b/numpy/typing/tests/data/reveal/shape_base.pyi @@ -53,3 +53,6 @@ assert_type(np.kron(AR_f8, AR_f8), npt.NDArray[np.floating[Any]]) assert_type(np.tile(AR_i8, 5), npt.NDArray[np.int64]) assert_type(np.tile(AR_LIKE_f8, [2, 2]), npt.NDArray[Any]) + +assert_type(np.unstack(AR_i8, axis=0), tuple[npt.NDArray[np.int64], ...]) +assert_type(np.unstack(AR_LIKE_f8, axis=0), tuple[npt.NDArray[Any], ...]) diff --git a/numpy/typing/tests/data/reveal/twodim_base.pyi b/numpy/typing/tests/data/reveal/twodim_base.pyi index 9d808dbb1e0d..f52ad3a41b69 100644 --- a/numpy/typing/tests/data/reveal/twodim_base.pyi +++ b/numpy/typing/tests/data/reveal/twodim_base.pyi @@ -28,6 +28,7 @@ AR_c: npt.NDArray[np.complex128] AR_O: npt.NDArray[np.object_] AR_LIKE_b: list[bool] +AR_LIKE_c: list[complex] assert_type(np.fliplr(AR_b), npt.NDArray[np.bool]) assert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any]) @@ -62,28 +63,84 @@ assert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating[Any]]) assert_type(np.vander(AR_c), npt.NDArray[np.complexfloating[Any, Any]]) assert_type(np.vander(AR_O), npt.NDArray[np.object_]) +assert_type( + np.histogram2d(AR_LIKE_c, AR_LIKE_c), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128 | np.float64], + npt.NDArray[np.complex128 | np.float64], + ], +) assert_type( np.histogram2d(AR_i, AR_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( - np.histogram2d(AR_f, AR_f), + np.histogram2d(AR_f, AR_i), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.floating[Any]], - npt.NDArray[np.floating[Any]], + npt.NDArray[np.float64], + npt.NDArray[np.float64], + ], +) +assert_type( + np.histogram2d(AR_i, AR_f), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.float64], + npt.NDArray[np.float64], ], ) assert_type( np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b), tuple[ npt.NDArray[np.float64], - npt.NDArray[np.complexfloating[Any, Any]], - npt.NDArray[np.complexfloating[Any, Any]], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_f, AR_c, bins=8), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_f, bins=(8, 5)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.complex128], + npt.NDArray[np.complex128], + ], +) +assert_type( + np.histogram2d(AR_c, AR_i, bins=AR_u), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.uint64], + npt.NDArray[np.uint64], + ], +) +assert_type( + np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)), + tuple[ + npt.NDArray[np.float64], + npt.NDArray[np.bool | np.complex128], + npt.NDArray[np.bool | np.complex128], ], ) diff --git a/numpy/typing/tests/data/reveal/ufuncs.pyi b/numpy/typing/tests/data/reveal/ufuncs.pyi index 28e189411802..39a796bf6845 100644 --- a/numpy/typing/tests/data/reveal/ufuncs.pyi +++ b/numpy/typing/tests/data/reveal/ufuncs.pyi @@ -1,5 +1,5 @@ import sys -from typing import Literal, Any +from typing import Literal, Any, NoReturn import numpy as np import numpy.typing as npt @@ -76,6 +76,16 @@ assert_type(np.matmul.identity, None) assert_type(np.matmul(AR_f8, AR_f8), Any) assert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any) +assert_type(np.vecdot.__name__, Literal["vecdot"]) +assert_type(np.vecdot.ntypes, Literal[19]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot.nin, Literal[2]) +assert_type(np.vecdot.nout, Literal[1]) +assert_type(np.vecdot.nargs, Literal[3]) +assert_type(np.vecdot.signature, Literal["(n),(n)->()"]) +assert_type(np.vecdot.identity, None) +assert_type(np.vecdot(AR_f8, AR_f8), Any) + assert_type(np.bitwise_count.__name__, Literal['bitwise_count']) assert_type(np.bitwise_count.ntypes, Literal[11]) assert_type(np.bitwise_count.identity, None) @@ -86,3 +96,27 @@ assert_type(np.bitwise_count.signature, None) assert_type(np.bitwise_count.identity, None) assert_type(np.bitwise_count(i8), Any) assert_type(np.bitwise_count(AR_i8), npt.NDArray[Any]) + +assert_type(np.absolute.outer(), NoReturn) +assert_type(np.frexp.outer(), NoReturn) +assert_type(np.divmod.outer(), NoReturn) +assert_type(np.matmul.outer(), NoReturn) + +assert_type(np.absolute.reduceat(), NoReturn) +assert_type(np.frexp.reduceat(), NoReturn) +assert_type(np.divmod.reduceat(), NoReturn) +assert_type(np.matmul.reduceat(), NoReturn) + +assert_type(np.absolute.reduce(), NoReturn) +assert_type(np.frexp.reduce(), NoReturn) +assert_type(np.divmod.reduce(), NoReturn) +assert_type(np.matmul.reduce(), NoReturn) + +assert_type(np.absolute.accumulate(), NoReturn) +assert_type(np.frexp.accumulate(), NoReturn) +assert_type(np.divmod.accumulate(), NoReturn) +assert_type(np.matmul.accumulate(), NoReturn) + +assert_type(np.frexp.at(), NoReturn) +assert_type(np.divmod.at(), NoReturn) +assert_type(np.matmul.at(), NoReturn) diff --git a/numpy/version.pyi b/numpy/version.pyi index 2c305466a7e0..1262189f2f38 100644 --- a/numpy/version.pyi +++ b/numpy/version.pyi @@ -1,7 +1,24 @@ -version: str -__version__: str -full_version: str +import sys +from typing import Final, TypeAlias -git_revision: str -release: bool -short_version: str +if sys.version_info >= (3, 11): + from typing import LiteralString +else: + LiteralString: TypeAlias = str + +__all__ = ( + '__version__', + 'full_version', + 'git_revision', + 'release', + 'short_version', + 'version', +) + +version: Final[LiteralString] +__version__: Final[LiteralString] +full_version: Final[LiteralString] + +git_revision: Final[LiteralString] +release: Final[bool] +short_version: Final[LiteralString] diff --git a/pavement.py b/pavement.py index 3a52db2e6555..4149f571ef28 100644 --- a/pavement.py +++ b/pavement.py @@ -38,7 +38,7 @@ #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/source/release/2.0.0-notes.rst' +RELEASE_NOTES = 'doc/source/release/2.1.3-notes.rst' #------------------------------------------------------- @@ -50,70 +50,6 @@ installersdir=os.path.join("release", "installers")),) -#------------------------ -# Get the release version -#------------------------ - -sys.path.insert(0, os.path.dirname(__file__)) -try: - from setup import FULLVERSION -finally: - sys.path.pop(0) - - -#-------------------------- -# Source distribution stuff -#-------------------------- -def tarball_name(ftype='gztar'): - """Generate source distribution name - - Parameters - ---------- - ftype : {'zip', 'gztar'} - Type of archive, default is 'gztar'. - - """ - root = f'numpy-{FULLVERSION}' - if ftype == 'gztar': - return root + '.tar.gz' - elif ftype == 'zip': - return root + '.zip' - raise ValueError(f"Unknown type {type}") - - -@task -def sdist(options): - """Make source distributions. - - Parameters - ---------- - options : - Set by ``task`` decorator. - - """ - # First clean the repo and update submodules (for up-to-date doc html theme - # and Sphinx extensions) - sh('git clean -xdf') - sh('git submodule init') - sh('git submodule update') - - # To be sure to bypass paver when building sdist... paver + numpy.distutils - # do not play well together. - # Cython is run over all Cython files in setup.py, so generated C files - # will be included. - sh('python3 setup.py sdist --formats=gztar,zip') - - # Copy the superpack into installers dir - idirs = options.installers.installersdir - if not os.path.exists(idirs): - os.makedirs(idirs) - - for ftype in ['gztar', 'zip']: - source = os.path.join('dist', tarball_name(ftype)) - target = os.path.join(idirs, tarball_name(ftype)) - shutil.copy(source, target) - - #------------- # README stuff #------------- diff --git a/pyproject.toml b/pyproject.toml index 036137c36da7..b5782a7e4258 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,7 +7,7 @@ requires = [ [project] name = "numpy" -version = "2.0.0.dev0" +version = "2.1.3" # TODO: add `license-files` once PEP 639 is accepted (see meson-python#88) license = {file = "LICENSE.txt"} @@ -16,7 +16,7 @@ authors = [{name = "Travis E. Oliphant et al."}] maintainers = [ {name = "NumPy Developers", email="numpy-discussion@python.org"}, ] -requires-python = ">=3.9" +requires-python = ">=3.10" readme = "README.md" classifiers = [ 'Development Status :: 5 - Production/Stable', @@ -26,10 +26,10 @@ classifiers = [ 'Programming Language :: C', 'Programming Language :: Python', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', + 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: 3 :: Only', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Software Development', @@ -184,6 +184,9 @@ repair-wheel-command = "" [tool.meson-python] meson = 'vendored-meson/meson/meson.py' +[tool.meson-python.args] +install = ['--tags=runtime,python-runtime,tests,devel'] + [tool.spin] package = 'numpy' @@ -199,13 +202,17 @@ cli = 'vendored-meson/meson/meson.py' ".spin/cmds.py:lint", ] "Environments" = [ - "spin.cmds.meson.run", ".spin/cmds.py:ipython", - ".spin/cmds.py:python", "spin.cmds.meson.gdb", + "spin.cmds.meson.run", + ".spin/cmds.py:ipython", + ".spin/cmds.py:python", + "spin.cmds.meson.gdb", "spin.cmds.meson.lldb" ] "Documentation" = [ ".spin/cmds.py:docs", ".spin/cmds.py:changelog", ".spin/cmds.py:notes", + ".spin/cmds.py:check_docs", + ".spin/cmds.py:check_tutorials", ] "Metrics" = [".spin/cmds.py:bench"] diff --git a/pytest.ini b/pytest.ini index 33bb8bd1e5b1..71542643e170 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,6 +1,6 @@ [pytest] addopts = -l -norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators +norecursedirs = doc tools numpy/linalg/lapack_lite numpy/_core/code_generators numpy/_core/src/common/pythoncapi-compat doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES junit_family=xunit2 diff --git a/requirements/ci32_requirements.txt b/requirements/ci32_requirements.txt index 0484e5084474..215bc1229930 100644 --- a/requirements/ci32_requirements.txt +++ b/requirements/ci32_requirements.txt @@ -1,4 +1,3 @@ spin # Keep this in sync with ci_requirements.txt -scipy-openblas32==0.3.26.0.4 - +scipy-openblas32==0.3.27.44.6 diff --git a/requirements/ci_requirements.txt b/requirements/ci_requirements.txt index 9ac795a626a6..5bed94385819 100644 --- a/requirements/ci_requirements.txt +++ b/requirements/ci_requirements.txt @@ -1,4 +1,4 @@ spin # Keep this in sync with ci32_requirements.txt -scipy-openblas32==0.3.26.0.4 -scipy-openblas64==0.3.26.0.4 +scipy-openblas32==0.3.27.44.6 +scipy-openblas64==0.3.27.44.6 diff --git a/requirements/doc_requirements.txt b/requirements/doc_requirements.txt index a642de83b4e3..79de7a9f0802 100644 --- a/requirements/doc_requirements.txt +++ b/requirements/doc_requirements.txt @@ -1,7 +1,8 @@ # doxygen required, use apt-get or dnf -sphinx>=4.5.0 +sphinx==7.2.6 numpydoc==1.4 -pydata-sphinx-theme==0.13.3 +pydata-sphinx-theme>=0.15.2 +sphinx-copybutton sphinx-design scipy matplotlib diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 7352f230bb3a..5c19c3a914ec 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -1,9 +1,8 @@ Cython wheel==0.38.1 -#setuptools==65.5.1 ; python_version < '3.12' -#setuptools ; python_version >= '3.12' -setuptools -hypothesis==6.81.1 +setuptools==65.5.1 ; python_version < '3.12' +setuptools ; python_version >= '3.12' +hypothesis==6.104.1 pytest==7.4.0 pytz==2023.3.post1 pytest-cov==4.1.0 @@ -15,7 +14,7 @@ cffi; python_version < '3.10' # For testing types. Notes on the restrictions: # - Mypy relies on C API features not present in PyPy # NOTE: Keep mypy in sync with environment.yml -mypy==1.7.1; platform_python_implementation != "PyPy" +mypy==1.10.0; platform_python_implementation != "PyPy" typing_extensions>=4.2.0 # for optional f2py encoding detection charset-normalizer diff --git a/tools/c_coverage/c_coverage_report.py b/tools/c_coverage/c_coverage_report.py index bd3eeaee9776..2e5a4c270376 100755 --- a/tools/c_coverage/c_coverage_report.py +++ b/tools/c_coverage/c_coverage_report.py @@ -54,25 +54,23 @@ def mark_line(self, lineno, as_func=None): line.add(as_func) def write_text(self, fd): - source = open(self.path, "r") - for i, line in enumerate(source): - if i + 1 in self.lines: - fd.write("> ") - else: - fd.write("! ") - fd.write(line) - source.close() + with open(self.path, "r") as source: + for i, line in enumerate(source): + if i + 1 in self.lines: + fd.write("> ") + else: + fd.write("! ") + fd.write(line) def write_html(self, fd): - source = open(self.path, 'r') - code = source.read() - lexer = CLexer() - formatter = FunctionHtmlFormatter( - self.lines, - full=True, - linenos='inline') - fd.write(highlight(code, lexer, formatter)) - source.close() + with open(self.path, 'r') as source: + code = source.read() + lexer = CLexer() + formatter = FunctionHtmlFormatter( + self.lines, + full=True, + linenos='inline') + fd.write(highlight(code, lexer, formatter)) class SourceFiles: @@ -95,24 +93,24 @@ def clean_path(self, path): def write_text(self, root): for path, source in self.files.items(): - fd = open(os.path.join(root, self.clean_path(path)), "w") - source.write_text(fd) - fd.close() + with open(os.path.join(root, self.clean_path(path)), "w") as fd: + source.write_text(fd) def write_html(self, root): for path, source in self.files.items(): - fd = open(os.path.join(root, self.clean_path(path) + ".html"), "w") - source.write_html(fd) - fd.close() + with open( + os.path.join(root, self.clean_path(path) + ".html"), "w" + ) as fd: + source.write_html(fd) - fd = open(os.path.join(root, 'index.html'), 'w') - fd.write("") - paths = sorted(self.files.keys()) - for path in paths: - fd.write('

%s

' % - (self.clean_path(path), escape(path[len(self.prefix):]))) - fd.write("") - fd.close() + with open(os.path.join(root, 'index.html'), 'w') as fd: + fd.write("") + paths = sorted(self.files.keys()) + for path in paths: + fd.write('

%s

' % + (self.clean_path(path), + escape(path[len(self.prefix):]))) + fd.write("") def collect_stats(files, fd, pattern): @@ -164,9 +162,8 @@ def collect_stats(files, fd, pattern): files = SourceFiles() for log_file in args.callgrind_file: - log_fd = open(log_file, 'r') - collect_stats(files, log_fd, args.pattern) - log_fd.close() + with open(log_file, 'r') as log_fd: + collect_stats(files, log_fd, args.pattern) if not os.path.exists(args.directory): os.makedirs(args.directory) diff --git a/tools/check_installed_files.py b/tools/check_installed_files.py index 9e97f903d65a..c45a046b1ca2 100644 --- a/tools/check_installed_files.py +++ b/tools/check_installed_files.py @@ -21,6 +21,7 @@ import os import glob import sys +import json CUR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__))) @@ -34,7 +35,7 @@ } -def main(install_dir): +def main(install_dir, tests_check): INSTALLED_DIR = os.path.join(ROOT_DIR, install_dir) if not os.path.exists(INSTALLED_DIR): raise ValueError( @@ -44,14 +45,20 @@ def main(install_dir): numpy_test_files = get_files(NUMPY_DIR, kind='test') installed_test_files = get_files(INSTALLED_DIR, kind='test') - # Check test files detected in repo are installed - for test_file in numpy_test_files.keys(): - if test_file not in installed_test_files.keys(): - raise Exception( - "%s is not installed" % numpy_test_files[test_file] - ) - - print("----------- All the test files were installed --------------") + if tests_check == "--no-tests": + if len(installed_test_files) > 0: + raise Exception("Test files aren't expected to be installed in %s" + ", found %s" % (INSTALLED_DIR, installed_test_files)) + print("----------- No test files were installed --------------") + else: + # Check test files detected in repo are installed + for test_file in numpy_test_files.keys(): + if test_file not in installed_test_files.keys(): + raise Exception( + "%s is not installed" % numpy_test_files[test_file] + ) + + print("----------- All the test files were installed --------------") numpy_pyi_files = get_files(NUMPY_DIR, kind='stub') installed_pyi_files = get_files(INSTALLED_DIR, kind='stub') @@ -59,9 +66,13 @@ def main(install_dir): # Check *.pyi files detected in repo are installed for pyi_file in numpy_pyi_files.keys(): if pyi_file not in installed_pyi_files.keys(): + if (tests_check == "--no-tests" and + "tests" in numpy_pyi_files[pyi_file]): + continue raise Exception("%s is not installed" % numpy_pyi_files[pyi_file]) - print("----------- All the .pyi files were installed --------------") + print("----------- All the necessary .pyi files " + "were installed --------------") def get_files(dir_to_check, kind='test'): @@ -79,13 +90,35 @@ def get_files(dir_to_check, kind='test'): k: v for k, v in files.items() if not k.startswith('distutils') } + # ignore python files in vendored pythoncapi-compat submodule + files = { + k: v for k, v in files.items() if 'pythoncapi-compat' not in k + } + return files if __name__ == '__main__': - if not len(sys.argv) == 2: + if len(sys.argv) < 2: raise ValueError("Incorrect number of input arguments, need " "check_installation.py relpath/to/installed/numpy") install_dir = sys.argv[1] - main(install_dir) + tests_check = "" + if len(sys.argv) >= 3: + tests_check = sys.argv[2] + main(install_dir, tests_check) + + all_tags = set() + + with open(os.path.join('build', 'meson-info', + 'intro-install_plan.json'), 'r') as f: + targets = json.load(f) + + for key in targets.keys(): + for values in list(targets[key].values()): + if not values['tag'] in all_tags: + all_tags.add(values['tag']) + + if all_tags != set(['runtime', 'python-runtime', 'devel', 'tests']): + raise AssertionError(f"Found unexpected install tag: {all_tags}") diff --git a/tools/ci/array-api-skips.txt b/tools/ci/array-api-skips.txt deleted file mode 100644 index fec7750098c5..000000000000 --- a/tools/ci/array-api-skips.txt +++ /dev/null @@ -1,50 +0,0 @@ -# 'unique_inverse' output array is 1-D for 0-D input -array_api_tests/test_set_functions.py::test_unique_all -array_api_tests/test_set_functions.py::test_unique_inverse - -# https://github.com/numpy/numpy/issues/21213 -array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -infinity and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ipow__(x1_i is -0 and x2_i > 0 and not (x2_i.is_integer() and x2_i % 2 == 1)) -> +0] -# noted diversions from spec -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[floor_divide(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_binary[__floordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i > 0) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is +infinity and isfinite(x2_i) and x2_i < 0) -> -infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i > 0) -> -infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(x1_i is -infinity and isfinite(x2_i) and x2_i < 0) -> +infinity] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i > 0 and x2_i is -infinity) -> -0] -array_api_tests/test_special_cases.py::test_iop[__ifloordiv__(isfinite(x1_i) and x1_i < 0 and x2_i is +infinity) -> -0] - -# fft test suite is buggy as of 83f0bcdc -array_api_tests/test_fft.py - -# finfo return type misalignment -array_api_tests/test_data_type_functions.py::test_finfo[float32] - -# a few misalignments -array_api_tests/test_operators_and_elementwise_functions.py -array_api_tests/test_signatures.py::test_func_signature[std] -array_api_tests/test_signatures.py::test_func_signature[var] -array_api_tests/test_signatures.py::test_func_signature[asarray] -array_api_tests/test_signatures.py::test_func_signature[reshape] -array_api_tests/test_signatures.py::test_array_method_signature[__array_namespace__] - -# missing 'copy' keyword argument, 'newshape' should be named 'shape' -array_api_tests/test_signatures.py::test_func_signature[reshape] - -# missing 'descending' keyword arguments -array_api_tests/test_signatures.py::test_func_signature[argsort] -array_api_tests/test_signatures.py::test_func_signature[sort] - -# assertionError: out.dtype=float32, but should be float64 [sum(float32)] -array_api_tests/test_statistical_functions.py::test_sum diff --git a/tools/ci/array-api-xfails.txt b/tools/ci/array-api-xfails.txt new file mode 100644 index 000000000000..c81b61c5740e --- /dev/null +++ b/tools/ci/array-api-xfails.txt @@ -0,0 +1,23 @@ +# finfo return type misalignment +array_api_tests/test_data_type_functions.py::test_finfo[float32] + +# 'shape' arg is present. 'newshape' is retained for backward compat. +array_api_tests/test_signatures.py::test_func_signature[reshape] + +# 'min/max' args are present. 'a_min/a_max' are retained for backward compat. +array_api_tests/test_signatures.py::test_func_signature[clip] + +# missing 'descending' keyword argument +array_api_tests/test_signatures.py::test_func_signature[argsort] +array_api_tests/test_signatures.py::test_func_signature[sort] + +# missing 'descending' keyword argument +array_api_tests/test_sorting_functions.py::test_argsort +array_api_tests/test_sorting_functions.py::test_sort + +# ufuncs signature on linux is always +# np.vecdot is the only ufunc with a keyword argument which causes a failure +array_api_tests/test_signatures.py::test_func_signature[vecdot] + +# input is cast to min/max's dtype if they're different +array_api_tests/test_operators_and_elementwise_functions.py::test_clip diff --git a/tools/ci/cirrus_arm.yml b/tools/ci/cirrus_arm.yml index cbf99c9dace6..46fed5bbf0c4 100644 --- a/tools/ci/cirrus_arm.yml +++ b/tools/ci/cirrus_arm.yml @@ -67,13 +67,14 @@ freebsd_test_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' compute_engine_instance: image_project: freebsd-org-cloud-dev - image: family/freebsd-13-2 + image: family/freebsd-14-0 platform: freebsd cpu: 1 memory: 4G install_devtools_script: | pkg install -y git bash ninja ccache blas cblas lapack pkgconf + pkg install -y python311 <<: *MODIFIED_CLONE @@ -86,22 +87,22 @@ freebsd_test_task: prepare_env_script: | # Create a venv (the `source` command needs bash, not the default sh shell) chsh -s /usr/local/bin/bash - python -m venv .venv + python3.11 -m venv .venv source .venv/bin/activate # Minimal build and test requirements - python -m pip install -U pip - python -m pip install meson-python Cython pytest hypothesis + python3.11 -m pip install -U pip + python3.11 -m pip install meson-python Cython pytest hypothesis build_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate - python -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" + python3.11 -m pip install . --no-build-isolation -v -Csetup-args="-Dallow-noblas=false" test_script: | chsh -s /usr/local/bin/bash source .venv/bin/activate cd tools - python -m pytest --pyargs numpy -m "not slow" + python3.11 -m pytest --pyargs numpy -m "not slow" ccache -s on_failure: diff --git a/tools/ci/cirrus_wheels.yml b/tools/ci/cirrus_wheels.yml index bf44a8b72704..99aa6ee2b50f 100644 --- a/tools/ci/cirrus_wheels.yml +++ b/tools/ci/cirrus_wheels.yml @@ -12,6 +12,8 @@ build_and_store_wheels: &BUILD_AND_STORE_WHEELS linux_aarch64_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' + env: + CIRRUS_CLONE_SUBMODULES: true compute_engine_instance: image_project: cirrus-images image: family/docker-builder-arm64 @@ -24,19 +26,17 @@ linux_aarch64_task: # single task takes longer than 60 mins (the default time limit for a # cirrus-ci task). - env: - CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp39-* - EXPECT_CPU_FEATURES: NEON NEON_FP16 NEON_VFPV4 ASIMD ASIMDHP ASIMDDP ASIMDFHM - - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp310-* - env: - CIRRUS_CLONE_SUBMODULES: true CIBW_BUILD: cp311-* - env: - CIRRUS_CLONE_SUBMODULES: true - CIBW_PRERELEASE_PYTHONS: True CIBW_BUILD: cp312-* + - env: + CIBW_BUILD: cp313-* + - env: + CIBW_BUILD: cp313t-* + CIBW_FREE_THREADED_SUPPORT: 1 + CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" initial_setup_script: | apt update @@ -57,17 +57,21 @@ linux_aarch64_task: macosx_arm64_task: use_compute_credits: $CIRRUS_USER_COLLABORATOR == 'true' + env: + CIRRUS_CLONE_SUBMODULES: true macos_instance: matrix: image: ghcr.io/cirruslabs/macos-monterey-xcode matrix: - env: - CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp39-* cp310-* + CIBW_BUILD: cp310-* cp311-* + - env: + CIBW_BUILD: cp312-* cp313-* - env: - CIRRUS_CLONE_SUBMODULES: true - CIBW_BUILD: cp311-* cp312-* + CIBW_BUILD: cp313t-* + CIBW_FREE_THREADED_SUPPORT: 1 + CIBW_BUILD_FRONTEND: "pip; args: --no-build-isolation" env: PATH: /usr/local/lib:/usr/local/include:$PATH CIBW_ARCHS: arm64 diff --git a/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch b/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch deleted file mode 100644 index f06ea4eead19..000000000000 --- a/tools/ci/emscripten/0001-do-not-set-meson-environment-variable-pyodide-gh-4502.patch +++ /dev/null @@ -1,55 +0,0 @@ -From e08ebf0e90f632547c8ff5b396ec0c4ddd65aad4 Mon Sep 17 00:00:00 2001 -From: Gyeongjae Choi -Date: Sat, 10 Feb 2024 03:28:01 +0900 -Subject: [PATCH] Update numpy to 1.26.4 and don't set MESON env variable - (#4502) - -From meson-python 0.15, $MESON env variable is used to overwrite the meson binary -path. We don't want that behavior. ---- - pypabuild.py | 22 +++++++++++++++------- - 1 file changed, 15 insertions(+), 7 deletions(-) - -diff --git a/pypabuild.py b/pypabuild.py -index 9d0107a8..6961b14e 100644 ---- a/pypabuild.py -+++ b/pypabuild.py -@@ -40,6 +40,19 @@ AVOIDED_REQUIREMENTS = [ - "patchelf", - ] - -+# corresponding env variables for symlinks -+SYMLINK_ENV_VARS = { -+ "cc": "CC", -+ "c++": "CXX", -+ "ld": "LD", -+ "lld": "LLD", -+ "ar": "AR", -+ "gcc": "GCC", -+ "ranlib": "RANLIB", -+ "strip": "STRIP", -+ "gfortran": "FC", # https://mesonbuild.com/Reference-tables.html#compiler-and-linker-selection-variables -+} -+ - - def _gen_runner( - cross_build_env: Mapping[str, str], -@@ -207,13 +220,8 @@ def make_command_wrapper_symlinks(symlink_dir: Path) -> dict[str, str]: - symlink_path.unlink() - - symlink_path.symlink_to(pywasmcross_exe) -- if symlink == "c++": -- var = "CXX" -- elif symlink == "gfortran": -- var = "FC" # https://mesonbuild.com/Reference-tables.html#compiler-and-linker-selection-variables -- else: -- var = symlink.upper() -- env[var] = str(symlink_path) -+ if symlink in SYMLINK_ENV_VARS: -+ env[SYMLINK_ENV_VARS[symlink]] = str(symlink_path) - - return env - --- -2.39.3 (Apple Git-145) - diff --git a/tools/ci/run_32_bit_linux_docker.sh b/tools/ci/run_32_bit_linux_docker.sh index b1cf4391e550..5e5e8bae4f96 100644 --- a/tools/ci/run_32_bit_linux_docker.sh +++ b/tools/ci/run_32_bit_linux_docker.sh @@ -2,7 +2,7 @@ set -xe git config --global --add safe.directory /numpy cd /numpy -/opt/python/cp39-cp39/bin/python -mvenv venv +/opt/python/cp310-cp310/bin/python -mvenv venv source venv/bin/activate pip install -r requirements/ci32_requirements.txt python3 -m pip install -r requirements/test_requirements.txt diff --git a/tools/download-wheels.py b/tools/download-wheels.py index e5753eb2148c..54dbdf1200a8 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -56,15 +56,20 @@ def get_wheel_names(version): The release version. For instance, "1.18.3". """ + ret = [] http = urllib3.PoolManager(cert_reqs="CERT_REQUIRED") tmpl = re.compile(rf"^.*{PREFIX}-{version}{SUFFIX}") - index_url = f"{STAGING_URL}/files" - index_html = http.request("GET", index_url) - soup = BeautifulSoup(index_html.data, "html.parser") - return soup.find_all(string=tmpl) + # TODO: generalize this by searching for `showing 1 of N` and + # looping over N pages, starting from 1 + for i in range(1, 3): + index_url = f"{STAGING_URL}/files?page={i}" + index_html = http.request("GET", index_url) + soup = BeautifulSoup(index_html.data, "html.parser") + ret += soup.find_all(string=tmpl) + return ret -def download_wheels(version, wheelhouse): +def download_wheels(version, wheelhouse, test=False): """Download release wheels. The release wheels for the given NumPy version are downloaded @@ -86,8 +91,15 @@ def download_wheels(version, wheelhouse): wheel_path = os.path.join(wheelhouse, wheel_name) with open(wheel_path, "wb") as f: with http.request("GET", wheel_url, preload_content=False,) as r: - print(f"{i + 1:<4}{wheel_name}") - shutil.copyfileobj(r, f) + info = r.info() + length = int(info.get('Content-Length', '0')) + if length == 0: + length = 'unknown size' + else: + length = f"{(length / 1024 / 1024):.2f}MB" + print(f"{i + 1:<4}{wheel_name} {length}") + if not test: + shutil.copyfileobj(r, f) print(f"\nTotal files downloaded: {len(wheel_names)}") @@ -101,6 +113,10 @@ def download_wheels(version, wheelhouse): default=os.path.join(os.getcwd(), "release", "installers"), help="Directory in which to store downloaded wheels\n" "[defaults to /release/installers]") + parser.add_argument( + "-t", "--test", + action = 'store_true', + help="only list available wheels, do not download") args = parser.parse_args() @@ -110,4 +126,4 @@ def download_wheels(version, wheelhouse): f"{wheelhouse} wheelhouse directory is not present." " Perhaps you need to use the '-w' flag to specify one.") - download_wheels(args.version, wheelhouse) + download_wheels(args.version, wheelhouse, test=args.test) diff --git a/tools/lint_diff.ini b/tools/lint_diff.ini index dbebe483b4ab..810e265d4dec 100644 --- a/tools/lint_diff.ini +++ b/tools/lint_diff.ini @@ -1,5 +1,5 @@ [pycodestyle] -max_line_length = 79 +max_line_length = 88 statistics = True ignore = E121,E122,E123,E125,E126,E127,E128,E226,E241,E251,E265,E266,E302,E402,E704,E712,E721,E731,E741,W291,W293,W391,W503,W504 exclude = numpy/__config__.py,numpy/typing/tests/data,.spin/cmds.py diff --git a/tools/linter.py b/tools/linter.py index 0031ff83a479..c5746b518b8e 100644 --- a/tools/linter.py +++ b/tools/linter.py @@ -4,10 +4,8 @@ from argparse import ArgumentParser from git import Repo, exc -CONFIG = os.path.join( - os.path.abspath(os.path.dirname(__file__)), - 'lint_diff.ini', -) +CWD = os.path.abspath(os.path.dirname(__file__)) +CONFIG = os.path.join(CWD, 'lint_diff.ini') # NOTE: The `diff` and `exclude` options of pycodestyle seem to be # incompatible, so instead just exclude the necessary files when @@ -23,7 +21,7 @@ class DiffLinter: def __init__(self, branch): self.branch = branch - self.repo = Repo('.') + self.repo = Repo(os.path.join(CWD, '..')) self.head = self.repo.head.commit def get_branch_diff(self, uncommitted = False): diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 6e63ffccf7cc..8de816715bdb 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -27,7 +27,6 @@ """ import copy -import doctest import inspect import io import os @@ -39,7 +38,6 @@ import docutils.core from argparse import ArgumentParser from contextlib import contextmanager, redirect_stderr -from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL from docutils.parsers.rst import directives @@ -49,8 +47,6 @@ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext')) from numpydoc.docscrape_sphinx import get_doc_object -SKIPBLOCK = doctest.register_optionflag('SKIPBLOCK') - # Enable specific Sphinx directives from sphinx.directives.other import SeeAlso, Only directives.register_directive('seealso', SeeAlso) @@ -97,52 +93,6 @@ 'io.arff': 'io', } -# these names are known to fail doctesting and we like to keep it that way -# e.g. sometimes pseudocode is acceptable etc -# -# Optionally, a subset of methods can be skipped by setting dict-values -# to a container of method-names -DOCTEST_SKIPDICT = { - # cases where NumPy docstrings import things from SciPy: - 'numpy.lib.vectorize': None, - 'numpy.random.standard_gamma': None, - 'numpy.random.gamma': None, - 'numpy.random.vonmises': None, - 'numpy.random.power': None, - 'numpy.random.zipf': None, - # cases where NumPy docstrings import things from other 3'rd party libs: - 'numpy._core.from_dlpack': None, - # remote / local file IO with DataSource is problematic in doctest: - 'numpy.lib.npyio.DataSource': None, - 'numpy.lib.Repository': None, -} - -# Skip non-numpy RST files, historical release notes -# Any single-directory exact match will skip the directory and all subdirs. -# Any exact match (like 'doc/release') will scan subdirs but skip files in -# the matched directory. -# Any filename will skip that file -RST_SKIPLIST = [ - 'scipy-sphinx-theme', - 'sphinxext', - 'neps', - 'changelog', - 'doc/release', - 'doc/source/release', - 'doc/release/upcoming_changes', - 'c-info.ufunc-tutorial.rst', - 'c-info.python-as-glue.rst', - 'f2py.getting-started.rst', - 'f2py-examples.rst', - 'arrays.nditer.cython.rst', - 'how-to-verify-bug.rst', - # See PR 17222, these should be fixed - 'basics.dispatch.rst', - 'basics.subclassing.rst', - 'basics.interoperability.rst', - 'misc.rst', - 'TESTS.rst' -] # these names are not required to be present in ALL despite being in # autosummary:: listing @@ -161,14 +111,6 @@ # priority -- focus on just getting docstrings executed / correct r'numpy\.*', ] -# deprecated windows in scipy.signal namespace -for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman', - 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop', - 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning', - 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'): - REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name) - -HAVE_MATPLOTLIB = False def short_path(path, cwd=None): @@ -609,520 +551,6 @@ def check_rest(module, names, dots=True): return results -### Doctest helpers #### - -# the namespace to run examples in -DEFAULT_NAMESPACE = {'np': np} - -# the namespace to do checks in -CHECK_NAMESPACE = { - 'np': np, - 'numpy': np, - 'assert_allclose': np.testing.assert_allclose, - 'assert_equal': np.testing.assert_equal, - # recognize numpy repr's - 'array': np.array, - 'matrix': np.matrix, - 'int64': np.int64, - 'uint64': np.uint64, - 'int8': np.int8, - 'int32': np.int32, - 'float32': np.float32, - 'float64': np.float64, - 'dtype': np.dtype, - 'nan': np.nan, - 'inf': np.inf, - 'StringIO': io.StringIO, -} - - -class DTRunner(doctest.DocTestRunner): - """ - The doctest runner - """ - DIVIDER = "\n" - - def __init__(self, item_name, checker=None, verbose=None, optionflags=0): - self._item_name = item_name - doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose, - optionflags=optionflags) - - def _report_item_name(self, out, new_line=False): - if self._item_name is not None: - if new_line: - out("\n") - self._item_name = None - - def report_start(self, out, test, example): - self._checker._source = example.source - return doctest.DocTestRunner.report_start(self, out, test, example) - - def report_success(self, out, test, example, got): - if self._verbose: - self._report_item_name(out, new_line=True) - return doctest.DocTestRunner.report_success(self, out, test, example, got) - - def report_unexpected_exception(self, out, test, example, exc_info): - self._report_item_name(out) - return doctest.DocTestRunner.report_unexpected_exception( - self, out, test, example, exc_info) - - def report_failure(self, out, test, example, got): - self._report_item_name(out) - return doctest.DocTestRunner.report_failure(self, out, test, - example, got) - -class Checker(doctest.OutputChecker): - """ - Check the docstrings - """ - obj_pattern = re.compile('at 0x[0-9a-fA-F]+>') - vanilla = doctest.OutputChecker() - rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary", - "# uninitialized", "#uninitialized", "# uninit"} - stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(', - 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(', - '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim', - '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(', - '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='} - - def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2): - self.parse_namedtuples = parse_namedtuples - self.atol, self.rtol = atol, rtol - if ns is None: - self.ns = CHECK_NAMESPACE - else: - self.ns = ns - - def check_output(self, want, got, optionflags): - # cut it short if they are equal - if want == got: - return True - - # skip stopwords in source - if any(word in self._source for word in self.stopwords): - return True - - # skip random stuff - if any(word in want for word in self.rndm_markers): - return True - - # skip function/object addresses - if self.obj_pattern.search(got): - return True - - # ignore comments (e.g. signal.freqresp) - if want.lstrip().startswith("#"): - return True - - # try the standard doctest - try: - if self.vanilla.check_output(want, got, optionflags): - return True - except Exception: - pass - - # OK then, convert strings to objects - try: - a_want = eval(want, dict(self.ns)) - a_got = eval(got, dict(self.ns)) - except Exception: - # Maybe we're printing a numpy array? This produces invalid python - # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between - # values. So, reinsert commas and retry. - # TODO: handle (1) abbreviation (`print(np.arange(10000))`), and - # (2) n-dim arrays with n > 1 - s_want = want.strip() - s_got = got.strip() - cond = (s_want.startswith("[") and s_want.endswith("]") and - s_got.startswith("[") and s_got.endswith("]")) - if cond: - s_want = ", ".join(s_want[1:-1].split()) - s_got = ", ".join(s_got[1:-1].split()) - return self.check_output(s_want, s_got, optionflags) - - if not self.parse_namedtuples: - return False - # suppose that "want" is a tuple, and "got" is smth like - # MoodResult(statistic=10, pvalue=0.1). - # Then convert the latter to the tuple (10, 0.1), - # and then compare the tuples. - try: - num = len(a_want) - regex = (r'[\w\d_]+\(' + - ', '.join([r'[\w\d_]+=(.+)']*num) + - r'\)') - grp = re.findall(regex, got.replace('\n', ' ')) - if len(grp) > 1: # no more than one for now - return False - # fold it back to a tuple - got_again = '(' + ', '.join(grp[0]) + ')' - return self.check_output(want, got_again, optionflags) - except Exception: - return False - - # ... and defer to numpy - try: - return self._do_check(a_want, a_got) - except Exception: - # heterog tuple, eg (1, np.array([1., 2.])) - try: - return all(self._do_check(w, g) for w, g in zip(a_want, a_got)) - except (TypeError, ValueError): - return False - - def _do_check(self, want, got): - # This should be done exactly as written to correctly handle all of - # numpy-comparable objects, strings, and heterogeneous tuples - try: - if want == got: - return True - except Exception: - pass - return np.allclose(want, got, atol=self.atol, rtol=self.rtol) - - -def _run_doctests(tests, full_name, verbose, doctest_warnings): - """ - Run modified doctests for the set of `tests`. - - Parameters - ---------- - tests : list - - full_name : str - - verbose : bool - doctest_warnings : bool - - Returns - ------- - tuple(bool, list) - Tuple of (success, output) - """ - flags = NORMALIZE_WHITESPACE | ELLIPSIS - runner = DTRunner(full_name, checker=Checker(), optionflags=flags, - verbose=verbose) - - output = io.StringIO(newline='') - success = True - - # Redirect stderr to the stdout or output - tmp_stderr = sys.stdout if doctest_warnings else output - - @contextmanager - def temp_cwd(): - cwd = os.getcwd() - tmpdir = tempfile.mkdtemp() - try: - os.chdir(tmpdir) - yield tmpdir - finally: - os.chdir(cwd) - shutil.rmtree(tmpdir) - - # Run tests, trying to restore global state afterward - cwd = os.getcwd() - with np.errstate(), np.printoptions(), temp_cwd() as tmpdir, \ - redirect_stderr(tmp_stderr): - # try to ensure random seed is NOT reproducible - np.random.seed(None) - - ns = {} - for t in tests: - # We broke the tests up into chunks to try to avoid PSEUDOCODE - # This has the unfortunate side effect of restarting the global - # namespace for each test chunk, so variables will be "lost" after - # a chunk. Chain the globals to avoid this - t.globs.update(ns) - t.filename = short_path(t.filename, cwd) - # Process our options - if any([SKIPBLOCK in ex.options for ex in t.examples]): - continue - fails, successes = runner.run(t, out=output.write, clear_globs=False) - if fails > 0: - success = False - ns = t.globs - - output.seek(0) - return success, output.read() - - -def check_doctests(module, verbose, ns=None, - dots=True, doctest_warnings=False): - """ - Check code in docstrings of the module's public symbols. - - Parameters - ---------- - module : ModuleType - Name of module - verbose : bool - Should the result be verbose - ns : dict - Name space of module - dots : bool - - doctest_warnings : bool - - Returns - ------- - results : list - List of [(item_name, success_flag, output), ...] - """ - if ns is None: - ns = dict(DEFAULT_NAMESPACE) - - # Loop over non-deprecated items - results = [] - - for name in get_all_dict(module)[0]: - full_name = module.__name__ + '.' + name - - if full_name in DOCTEST_SKIPDICT: - skip_methods = DOCTEST_SKIPDICT[full_name] - if skip_methods is None: - continue - else: - skip_methods = None - - try: - obj = getattr(module, name) - except AttributeError: - import traceback - results.append((full_name, False, - "Missing item!\n" + - traceback.format_exc())) - continue - - finder = doctest.DocTestFinder() - try: - tests = finder.find(obj, name, globs=dict(ns)) - except Exception: - import traceback - results.append((full_name, False, - "Failed to get doctests!\n" + - traceback.format_exc())) - continue - - if skip_methods is not None: - tests = [i for i in tests if - i.name.partition(".")[2] not in skip_methods] - - success, output = _run_doctests(tests, full_name, verbose, - doctest_warnings) - - if dots: - output_dot('.' if success else 'F') - - results.append((full_name, success, output)) - - if HAVE_MATPLOTLIB: - import matplotlib.pyplot as plt - plt.close('all') - - return results - - -def check_doctests_testfile(fname, verbose, ns=None, - dots=True, doctest_warnings=False): - """ - Check code in a text file. - - Mimic `check_doctests` above, differing mostly in test discovery. - (which is borrowed from stdlib's doctest.testfile here, - https://github.com/python-git/python/blob/master/Lib/doctest.py) - - Parameters - ---------- - fname : str - File name - verbose : bool - - ns : dict - Name space - - dots : bool - - doctest_warnings : bool - - Returns - ------- - list - List of [(item_name, success_flag, output), ...] - - Notes - ----- - - refguide can be signalled to skip testing code by adding - ``#doctest: +SKIP`` to the end of the line. If the output varies or is - random, add ``# may vary`` or ``# random`` to the comment. for example - - >>> plt.plot(...) # doctest: +SKIP - >>> random.randint(0,10) - 5 # random - - We also try to weed out pseudocode: - * We maintain a list of exceptions which signal pseudocode, - * We split the text file into "blocks" of code separated by empty lines - and/or intervening text. - * If a block contains a marker, the whole block is then assumed to be - pseudocode. It is then not being doctested. - - The rationale is that typically, the text looks like this: - - blah - - >>> from numpy import some_module # pseudocode! - >>> func = some_module.some_function - >>> func(42) # still pseudocode - 146 - - blah - - >>> 2 + 3 # real code, doctest it - 5 - - """ - if ns is None: - ns = CHECK_NAMESPACE - results = [] - - _, short_name = os.path.split(fname) - if short_name in DOCTEST_SKIPDICT: - return results - - full_name = fname - with open(fname, encoding='utf-8') as f: - text = f.read() - - PSEUDOCODE = set(['some_function', 'some_module', 'import example', - 'ctypes.CDLL', # likely need compiling, skip it - 'integrate.nquad(func,' # ctypes integrate tutotial - ]) - - # split the text into "blocks" and try to detect and omit pseudocode blocks. - parser = doctest.DocTestParser() - good_parts = [] - base_line_no = 0 - for part in text.split('\n\n'): - try: - tests = parser.get_doctest(part, ns, fname, fname, base_line_no) - except ValueError as e: - if e.args[0].startswith('line '): - # fix line number since `parser.get_doctest` does not increment - # the reported line number by base_line_no in the error message - parts = e.args[0].split() - parts[1] = str(int(parts[1]) + base_line_no) - e.args = (' '.join(parts),) + e.args[1:] - raise - if any(word in ex.source for word in PSEUDOCODE - for ex in tests.examples): - # omit it - pass - else: - # `part` looks like a good code, let's doctest it - good_parts.append((part, base_line_no)) - base_line_no += part.count('\n') + 2 - - # Reassemble the good bits and doctest them: - tests = [] - for good_text, line_no in good_parts: - tests.append(parser.get_doctest(good_text, ns, fname, fname, line_no)) - success, output = _run_doctests(tests, full_name, verbose, - doctest_warnings) - - if dots: - output_dot('.' if success else 'F') - - results.append((full_name, success, output)) - - if HAVE_MATPLOTLIB: - import matplotlib.pyplot as plt - plt.close('all') - - return results - - -def iter_included_files(base_path, verbose=0, suffixes=('.rst',)): - """ - Generator function to walk `base_path` and its subdirectories, skipping - files or directories in RST_SKIPLIST, and yield each file with a suffix in - `suffixes` - - Parameters - ---------- - base_path : str - Base path of the directory to be processed - verbose : int - - suffixes : tuple - - Yields - ------ - path - Path of the directory and its sub directories - """ - if os.path.exists(base_path) and os.path.isfile(base_path): - yield base_path - for dir_name, subdirs, files in os.walk(base_path, topdown=True): - if dir_name in RST_SKIPLIST: - if verbose > 0: - sys.stderr.write('skipping files in %s' % dir_name) - files = [] - for p in RST_SKIPLIST: - if p in subdirs: - if verbose > 0: - sys.stderr.write('skipping %s and subdirs' % p) - subdirs.remove(p) - for f in files: - if (os.path.splitext(f)[1] in suffixes and - f not in RST_SKIPLIST): - yield os.path.join(dir_name, f) - - -def check_documentation(base_path, results, args, dots): - """ - Check examples in any *.rst located inside `base_path`. - Add the output to `results`. - - See Also - -------- - check_doctests_testfile - """ - for filename in iter_included_files(base_path, args.verbose): - if dots: - sys.stderr.write(filename + ' ') - sys.stderr.flush() - - tut_results = check_doctests_testfile( - filename, - (args.verbose >= 2), dots=dots, - doctest_warnings=args.doctest_warnings) - - # stub out a "module" which is needed when reporting the result - def scratch(): - pass - scratch.__name__ = filename - results.append((scratch, tut_results)) - if dots: - sys.stderr.write('\n') - sys.stderr.flush() - - -def init_matplotlib(): - """ - Check feasibility of matplotlib initialization. - """ - global HAVE_MATPLOTLIB - - try: - import matplotlib - matplotlib.use('Agg') - HAVE_MATPLOTLIB = True - except ImportError: - HAVE_MATPLOTLIB = False - def main(argv): """ @@ -1132,15 +560,7 @@ def main(argv): parser = ArgumentParser(usage=__doc__.lstrip()) parser.add_argument("module_names", metavar="SUBMODULES", default=[], nargs='*', help="Submodules to check (default: all public)") - parser.add_argument("--doctests", action="store_true", - help="Run also doctests on ") parser.add_argument("-v", "--verbose", action="count", default=0) - parser.add_argument("--doctest-warnings", action="store_true", - help="Enforce warning checking for doctests") - parser.add_argument("--rst", nargs='?', const='doc', default=None, - help=("Run also examples from *rst files " - "discovered walking the directory(s) specified, " - "defaults to 'doc'")) args = parser.parse_args(argv) modules = [] @@ -1149,8 +569,6 @@ def main(argv): if not args.module_names: args.module_names = list(PUBLIC_SUBMODULES) + [BASE_MODULE] - os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true' - module_names = list(args.module_names) for name in module_names: if name in OTHER_MODULE_DOCS: @@ -1164,9 +582,6 @@ def main(argv): errormsgs = [] - if args.doctests or args.rst: - init_matplotlib() - for submodule_name in module_names: prefix = BASE_MODULE + '.' if not ( @@ -1186,7 +601,7 @@ def main(argv): if submodule_name in args.module_names: modules.append(module) - if args.doctests or not args.rst: + if modules: print("Running checks for %d modules:" % (len(modules),)) for module in modules: if dots: @@ -1201,9 +616,6 @@ def main(argv): module.__name__) mod_results += check_rest(module, set(names).difference(deprecated), dots=dots) - if args.doctests: - mod_results += check_doctests(module, (args.verbose >= 2), dots=dots, - doctest_warnings=args.doctest_warnings) for v in mod_results: assert isinstance(v, tuple), v @@ -1214,18 +626,6 @@ def main(argv): sys.stderr.write('\n') sys.stderr.flush() - if args.rst: - base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..') - rst_path = os.path.relpath(os.path.join(base_dir, args.rst)) - if os.path.exists(rst_path): - print('\nChecking files in %s:' % rst_path) - check_documentation(rst_path, results, args, dots) - else: - sys.stderr.write(f'\ninvalid --rst argument "{args.rst}"') - errormsgs.append('invalid directory argument to --rst') - if dots: - sys.stderr.write("\n") - sys.stderr.flush() # Report results for module, mod_results in results: diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i index c8c26cbcd3d6..747446648c8b 100644 --- a/tools/swig/numpy.i +++ b/tools/swig/numpy.i @@ -1989,7 +1989,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY1[ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) @@ -2018,7 +2018,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE* ARGOUT_ARRAY1, DIM_TYPE DIM1) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) @@ -2047,7 +2047,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DIM_TYPE DIM1, DATA_TYPE* ARGOUT_ARRAY1) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) @@ -2065,7 +2065,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY2[ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) @@ -2083,7 +2083,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY3[ANY][ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /* Typemap suite for (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY]) @@ -2101,7 +2101,7 @@ void free_cap(PyObject * cap) %typemap(argout) (DATA_TYPE ARGOUT_ARRAY4[ANY][ANY][ANY][ANY]) { - $result = SWIG_Python_AppendOutput($result,(PyObject*)array$argnum); + $result = SWIG_AppendOutput($result,(PyObject*)array$argnum); } /*****************************/ @@ -2126,7 +2126,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEW_ARRAY1) @@ -2147,7 +2147,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2169,7 +2169,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_ARRAY2) @@ -2191,7 +2191,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2213,7 +2213,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEW_FARRAY2) @@ -2235,7 +2235,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2259,7 +2259,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2283,7 +2283,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2307,7 +2307,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2331,7 +2331,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2356,7 +2356,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2381,7 +2381,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEW_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2406,7 +2406,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2431,7 +2431,7 @@ void free_cap(PyObject * cap) PyArrayObject* array = (PyArrayObject*) obj; if (!array || !require_fortran(array)) SWIG_fail; - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /*************************************/ @@ -2465,7 +2465,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DATA_TYPE** ARGOUTVIEWM_ARRAY1) @@ -2495,7 +2495,7 @@ PyObject* cap = PyCapsule_New((void*)(*$2), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2526,7 +2526,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_ARRAY2) @@ -2557,7 +2557,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY2, DIM_TYPE* DIM1, DIM_TYPE* DIM2) @@ -2588,7 +2588,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DATA_TYPE** ARGOUTVIEWM_FARRAY2) @@ -2619,7 +2619,7 @@ PyObject* cap = PyCapsule_New((void*)(*$3), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2652,7 +2652,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2685,7 +2685,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY3, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2718,7 +2718,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, @@ -2751,7 +2751,7 @@ PyObject* cap = PyCapsule_New((void*)(*$4), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_ARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2785,7 +2785,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2819,7 +2819,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DATA_TYPE** ARGOUTVIEWM_FARRAY4, DIM_TYPE* DIM1, DIM_TYPE* DIM2, @@ -2853,7 +2853,7 @@ PyObject* cap = PyCapsule_New((void*)(*$1), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /* Typemap suite for (DIM_TYPE* DIM1, DIM_TYPE* DIM2, DIM_TYPE* DIM3, DIM_TYPE* DIM4, @@ -2887,7 +2887,7 @@ PyObject* cap = PyCapsule_New((void*)(*$5), SWIGPY_CAPSULE_NAME, free_cap); PyArray_SetBaseObject(array,cap); %#endif - $result = SWIG_Python_AppendOutput($result,obj); + $result = SWIG_AppendOutput($result,obj); } /**************************************/ diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index a5b5ae5c22e6..021b4b0289e7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs/libopenblas*.so +Files: numpy.libs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 1ebd5663d02c..81889131cfa7 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -4,7 +4,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -40,7 +40,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy/.dylibs/libopenblas*.so +Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index f8eaaf1cae25..a2ccce66fbe5 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -5,7 +5,7 @@ This binary distribution of NumPy also bundles the following software: Name: OpenBLAS -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled as a dynamically linked library Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause @@ -41,7 +41,7 @@ License: BSD-3-Clause Name: LAPACK -Files: numpy.libs\libopenblas*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: bundled in OpenBLAS Availability: https://github.com/OpenMathLib/OpenBLAS/ License: BSD-3-Clause-Attribution @@ -96,7 +96,7 @@ License: BSD-3-Clause-Attribution Name: GCC runtime library -Files: numpy.libs\libgfortran*.dll +Files: numpy.libs\libscipy_openblas*.dll Description: statically linked to files compiled with gcc Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran License: GPL-3.0-with-GCC-exception @@ -879,24 +879,3 @@ the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read . -Name: libquadmath -Files: numpy.libs\libopenb*.dll -Description: statically linked to files compiled with gcc -Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index 24a295005727..e2f464d32a2a 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -50,5 +50,14 @@ EOF fi if [[ $RUNNER_OS == "Windows" ]]; then # delvewheel is the equivalent of delocate/auditwheel for windows. - python -m pip install delvewheel + python -m pip install delvewheel wheel +fi + +# TODO: delete along with enabling build isolation by unsetting +# CIBW_BUILD_FRONTEND when numpy is buildable under free-threaded +# python with a released version of cython +FREE_THREADED_BUILD="$(python -c"import sysconfig; print(bool(sysconfig.get_config_var('Py_GIL_DISABLED')))")" +if [[ $FREE_THREADED_BUILD == "True" ]]; then + python -m pip install meson-python ninja + python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython fi diff --git a/tools/wheels/cibw_test_command.sh b/tools/wheels/cibw_test_command.sh index 693a271efd41..73328e26dd15 100644 --- a/tools/wheels/cibw_test_command.sh +++ b/tools/wheels/cibw_test_command.sh @@ -1,11 +1,12 @@ # This script is used by .github/workflows/wheels.yml to run the full test -# suite, checks for lincense inclusion and that the openblas version is correct. +# suite, checks for license inclusion and that the openblas version is correct. set -xe PROJECT_DIR="$1" python -m pip install threadpoolctl python -c "import numpy; numpy.show_config()" + if [[ $RUNNER_OS == "Windows" ]]; then # GH 20391 PY_DIR=$(python -c "import sys; print(sys.prefix)") @@ -26,6 +27,23 @@ fi # Set available memory value to avoid OOM problems on aarch64. # See gh-22418. export NPY_AVAILABLE_MEM="4 GB" + +FREE_THREADED_BUILD="$(python -c"import sysconfig; print(bool(sysconfig.get_config_var('Py_GIL_DISABLED')))")" +if [[ $FREE_THREADED_BUILD == "True" ]]; then + # TODO: delete when numpy is buildable under free-threaded python + # with a released version of cython + python -m pip uninstall -y cython + python -m pip install -i https://pypi.anaconda.org/scientific-python-nightly-wheels/simple cython + + # Manually check that importing NumPy does not re-enable the GIL. + # In principle the tests should catch this but it seems harmless to leave it + # here as a final sanity check before uploading broken wheels + if [[ $(python -c "import numpy" 2>&1) == "*The global interpreter lock (GIL) has been enabled*" ]]; then + echo "Error: Importing NumPy re-enables the GIL in the free-threaded build" + exit 1 + fi +fi + # Run full tests with -n=auto. This makes pytest-xdist distribute tests across # the available N CPU cores: 2 by default for Linux instances and 4 for macOS arm64 python -c "import sys; import numpy; sys.exit(not numpy.test(label='full', extra_argv=['-n=auto']))" diff --git a/vendored-meson/meson b/vendored-meson/meson index 4e370ca8ab73..0d93515fb826 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 4e370ca8ab73c07f7b84abe8a4b937caace050a4 +Subproject commit 0d93515fb826440d19707eee47fd92655fe2f166 pFad - Phonifier reborn

Pfad - The Proxy pFad of © 2024 Garber Painting. All rights reserved.

Note: This service is not intended for secure transactions such as banking, social media, email, or purchasing. Use at your own risk. We assume no liability whatsoever for broken pages.


Alternative Proxies:

Alternative Proxy

pFad Proxy

pFad v3 Proxy

pFad v4 Proxy