From f6ee2bb7ebac86453a7e279770fca17501cac945 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Thu, 12 Jan 2023 13:37:00 +0100 Subject: [PATCH 01/53] Ensure master tests are triggered on cuda_main (#1305) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update branch with commits from master. `cuda_main` and `cuda_devel` branches should have the same tests as the master branch. These tests are activated here. The setup for cuda tests is also added Co-authored-by: Abdechahid Ihya <50792865+aihya@users.noreply.github.com> Co-authored-by: Aymane Benaissa <47903494+Pinkyboi@users.noreply.github.com> Co-authored-by: Youness Farini Co-authored-by: Yassine Alaoui <48657685+yassine-alaoui@users.noreply.github.com> Co-authored-by: Youness Farini Co-authored-by: Yaman Güçlü Co-authored-by: Aaron Holmes Co-authored-by: Ibrahim El Mountasser <33915853+ceciver@users.noreply.github.com> Co-authored-by: OTHMANE HACHIM <35775290+ohachim@users.noreply.github.com> --- .dict_custom.txt | 77 +++ .github/CONTRIBUTING.md | 51 ++ .github/ISSUE_TEMPLATE/bug-report.md | 11 +- .github/ISSUE_TEMPLATE/documentation.md | 10 + .github/ISSUE_TEMPLATE/feature-request.md | 14 +- .../ISSUE_TEMPLATE/installation-problem.md | 24 + .../actions/coverage_collection/action.yml | 1 - .github/actions/coverage_install/action.yml | 2 +- .github/actions/linux_install/action.yml | 10 +- .github/actions/macos_install/action.yml | 3 +- .github/actions/pytest_parallel/action.yml | 4 +- .github/actions/pytest_run/action.yml | 16 +- .github/actions/pytest_run_cuda/action.yml | 17 + .github/actions/pytest_run_python/action.yml | 4 +- .github/actions/python_install/action.yml | 17 + .github/workflows/Github_pytest.yml | 134 ++++- .github/workflows/bench.yml | 38 +- .github/workflows/lint.yml | 24 + .github/workflows/master.yml | 28 +- .github/workflows/pyccel_lint.yml | 23 + .github/workflows/spelling.yml | 29 + .pylintrc | 3 +- .pyspelling.yml | 26 + README.md | 61 +-- ci_tools/check_new_coverage.py | 31 ++ ci_tools/check_slots.py | 109 ++++ ci_tools/coverage_analysis_tools.py | 175 ++++++ ci_tools/git_evaluation_tools.py | 88 +++ ci_tools/summarise_pyspelling.py | 62 +++ developer_docs/how_to_solve_an_issue.md | 2 +- developer_docs/order_docs.md | 468 ++++++++++++++++ developer_docs/overview.md | 17 +- developer_docs/review_process.md | 6 +- developer_docs/scope.md | 4 +- pyccel/ast/basic.py | 2 +- pyccel/ast/bind_c.py | 2 +- pyccel/ast/bitwise_operators.py | 10 +- pyccel/ast/builtin_imports.py | 2 + pyccel/ast/builtins.py | 34 +- pyccel/ast/c_concepts.py | 179 +++++- pyccel/ast/core.py | 13 + pyccel/ast/cwrapper.py | 1 + pyccel/ast/datatypes.py | 3 +- pyccel/ast/headers.py | 7 +- pyccel/ast/internals.py | 9 +- pyccel/ast/itertoolsext.py | 2 +- pyccel/ast/literals.py | 10 +- pyccel/ast/numpyext.py | 135 +++-- pyccel/ast/omp.py | 18 + pyccel/ast/operators.py | 13 +- pyccel/ast/scipyext.py | 2 + pyccel/ast/sympy_helper.py | 3 + pyccel/ast/sysext.py | 61 +++ pyccel/ast/utilities.py | 31 +- pyccel/ast/variable.py | 3 + pyccel/codegen/printing/ccode.py | 213 +++++--- pyccel/codegen/printing/fcode.py | 113 +++- pyccel/codegen/utilities.py | 10 +- pyccel/compilers/default_compilers.py | 2 + pyccel/naming/cnameclashchecker.py | 2 +- pyccel/naming/fortrannameclashchecker.py | 2 +- pyccel/parser/scope.py | 1 + pyccel/parser/semantic.py | 66 ++- pyccel/parser/syntactic.py | 4 +- .../cwrapper_ndarrays/cwrapper_ndarrays.c | 5 +- pyccel/stdlib/numpy/numpy_c.c | 24 + pyccel/stdlib/numpy/numpy_c.h | 20 + pyccel/stdlib/numpy/numpy_f90.f90 | 156 ++++++ pyccel/version.py | 2 +- pyproject.toml | 6 +- setup.cfg | 19 +- tests/epyccel/modules/arrays.py | 41 +- tests/epyccel/modules/augassign.py | 155 ++++++ .../modules/call_user_defined_funcs.py | 9 + tests/epyccel/modules/numpy_sign.py | 295 ++++++++++ .../recognised_functions/test_numpy_funcs.py | 514 ++++++++++++++++-- .../recognised_functions/test_numpy_types.py | 188 ++++++- tests/epyccel/test_arrays.py | 180 +++++- tests/epyccel/test_builtins.py | 79 +-- .../test_default_precision_template.py | 31 ++ tests/epyccel/test_epyccel_augassign.py | 212 ++++++++ tests/epyccel/test_epyccel_complex_func.py | 19 +- tests/epyccel/test_epyccel_functions.py | 13 +- tests/epyccel/test_epyccel_generators.py | 2 +- tests/epyccel/test_epyccel_modules.py | 13 +- tests/epyccel/test_epyccel_optional_args.py | 17 +- tests/epyccel/test_epyccel_return_arrays.py | 481 ++++++++++++++++ tests/epyccel/test_epyccel_sign.py | 310 +++++++++++ tests/epyccel/test_generic_functions.py | 3 +- tests/epyccel/test_return.py | 21 + .../pyccel/scripts/array_binary_operation.py | 92 ++++ tests/pyccel/scripts/exits/empty_exit.py | 6 + tests/pyccel/scripts/exits/negative_exit1.py | 6 + tests/pyccel/scripts/exits/negative_exit2.py | 6 + tests/pyccel/scripts/exits/positive_exit1.py | 6 + tests/pyccel/scripts/exits/positive_exit2.py | 6 + tests/pyccel/scripts/exits/positive_exit3.py | 7 + tests/pyccel/scripts/exits/zero_exit.py | 6 + tests/pyccel/scripts/numpy/numpy_sign.py | 84 +++ tests/pyccel/scripts/print_integers.py | 37 ++ tests/pyccel/scripts/print_tuples.py | 12 + tests/pyccel/scripts/runtest_type_print.py | 4 +- tests/pyccel/test_pyccel.py | 94 ++-- tutorial/builtin-functions.md | 138 ++--- tutorial/compiler.md | 48 +- tutorial/const_keyword.md | 14 +- tutorial/decorators.md | 8 +- tutorial/function-pointers-as-arguments.md | 12 +- tutorial/header-files.md | 12 +- tutorial/ndarrays.md | 12 +- tutorial/numpy-functions.md | 78 +-- tutorial/openmp.md | 140 ++--- tutorial/quickstart.md | 34 +- tutorial/templates.md | 10 +- 114 files changed, 5479 insertions(+), 750 deletions(-) create mode 100644 .dict_custom.txt create mode 100644 .github/CONTRIBUTING.md create mode 100644 .github/ISSUE_TEMPLATE/documentation.md create mode 100644 .github/ISSUE_TEMPLATE/installation-problem.md create mode 100644 .github/actions/pytest_run_cuda/action.yml create mode 100644 .github/actions/python_install/action.yml create mode 100644 .github/workflows/lint.yml create mode 100644 .github/workflows/pyccel_lint.yml create mode 100644 .github/workflows/spelling.yml create mode 100644 .pyspelling.yml create mode 100644 ci_tools/check_new_coverage.py create mode 100644 ci_tools/check_slots.py create mode 100644 ci_tools/coverage_analysis_tools.py create mode 100644 ci_tools/git_evaluation_tools.py create mode 100644 ci_tools/summarise_pyspelling.py create mode 100644 developer_docs/order_docs.md create mode 100644 pyccel/ast/sysext.py create mode 100644 pyccel/stdlib/numpy/numpy_c.c create mode 100644 pyccel/stdlib/numpy/numpy_c.h create mode 100644 pyccel/stdlib/numpy/numpy_f90.f90 create mode 100644 tests/epyccel/modules/augassign.py create mode 100644 tests/epyccel/modules/numpy_sign.py create mode 100644 tests/epyccel/test_default_precision_template.py create mode 100644 tests/epyccel/test_epyccel_augassign.py create mode 100644 tests/epyccel/test_epyccel_sign.py create mode 100644 tests/pyccel/scripts/array_binary_operation.py create mode 100644 tests/pyccel/scripts/exits/empty_exit.py create mode 100644 tests/pyccel/scripts/exits/negative_exit1.py create mode 100644 tests/pyccel/scripts/exits/negative_exit2.py create mode 100644 tests/pyccel/scripts/exits/positive_exit1.py create mode 100644 tests/pyccel/scripts/exits/positive_exit2.py create mode 100644 tests/pyccel/scripts/exits/positive_exit3.py create mode 100644 tests/pyccel/scripts/exits/zero_exit.py create mode 100644 tests/pyccel/scripts/numpy/numpy_sign.py create mode 100644 tests/pyccel/scripts/print_integers.py create mode 100644 tests/pyccel/scripts/print_tuples.py diff --git a/.dict_custom.txt b/.dict_custom.txt new file mode 100644 index 0000000000..c8a236346b --- /dev/null +++ b/.dict_custom.txt @@ -0,0 +1,77 @@ +Pyccel +Pythran +numba +NumPy +NumPy's +BLAS +LAPACK +MPI +OpenMP +Fortran +pyccelise +pyccelised +pyccelising +allocatable +deallocate +conda +PyPI +CentOS +RHEL +executables +linux +macOS +DLL +DLLs +MPICH +openSUSE +Xcode +SIMD +runtime +pragma +pragmas +API +APIs +Pyccel's +PGI +GFortran +GCC +DNF +Homebrew +CLT +SDK +GitHub +OpenMPI +JSON +CPython +IPython +HPC +podman +vectorisation +precompiled +elementwise +SELinux +distro +PACKage +ndarray +ndarrays +metavariable +broadcastable +parallelisation +Quickstart +FST +AST +RedBaron +intel +nvidia +boolean +quicksort +iterm +textx +tracebacks +pytest +docstring +docstrings +Codacy +codebase +backend +iterable diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100644 index 0000000000..ce4c3456df --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,51 @@ +# Contributing to pyccel + +Firstly, thanks for your interest in contributing code to pyccel! + +We welcome any and all contributions. + +There are many ways to help with the pyccel project which are more or less involved. +The following is a summary of how you can do this. + +## Asking questions + +For simple questions (e.g. installation problems), we encourage you to use the [discussions tab](https://github.com/pyccel/pyccel/discussions/categories/q-a). + +## Answering questions + +Questions are sometimes asked in the [discussions tab](https://github.com/pyccel/pyccel/discussions/categories/q-a) or in issues. +Any help answering these questions is appreciated as it helps our users find solutions as fast as possible. + +## Reporting issues + +When reporting issues please include as much detail as possible about your +operating system, and python version. Whenever possible, please +also include a brief, self-contained code example that demonstrates the problem. + +## Requesting features + +To request a new feature, please use the [discussions tab](https://github.com/pyccel/pyccel/discussions/categories/ideas). +This allows us to discuss the best implementation in the target languages before creating an issue to work on. + +## Documentation + +Reviewing the existing documentation is a great way to help out! +We need to ensure that it is up-to-date and relevant, and that the examples are clear, relevant, and work correctly. + +Missing documentation can also be added to improve the existing docs. + +Please open a pull request to add or change anything in the docs. + +## Reviewing a pull request + +There are many pull requests and reviewing them is a great way to help things get to the master branch faster. +This is also a really good way to get to grips with the code base as you will see examples of many different areas of the code. +It's incredibly helpful to have pull requests go through as many reviews as possible, to make sure the code change makes sense, is documented, and is efficient and clear. +As the clarity is subjective, more eyes can only improve the code base. +The review process is described in the [developer docs](https://github.com/pyccel/pyccel/blob/master/developer_docs/review_process.md), keep an eye out for PRs tagged `needs_initial_review`. + +## Contributing code + +Before contributing we strongly recommend you check out the [developer docs](https://github.com/pyccel/pyccel/tree/master/developer_docs). +We try to flag `good-first-issue`s. +These are either issues which can be fixed by following the example provided by a similar solution which is already implemented, or issues which only concern one of the pyccel [stages](https://github.com/pyccel/pyccel/blob/master/developer_docs/overview.md). diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index f2c83bb246..cff2bddd9d 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -7,25 +7,26 @@ assignees: '' --- -**Describe the bug** +### Describe the bug A clear and concise description of what the bug is. -**To Reproduce** +### To Reproduce Provide code to reproduce the behavior: ```python Code here ``` +### Error details Provide the generated code, or the error message: ```bash/fortran/c Translated code here ``` -**Expected behavior** +### Expected behavior A clear and concise description of what you expected to happen. -**Language** +### Language Please specify which language the python code is translated to (Fortran by default) -**Additional context** +### Additional context Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 0000000000..c549bc5621 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,10 @@ +--- +name: Documentation issue +about: Request additional documentation or report documentation errors +title: '' +labels: documentation +assignees: '' + +--- + +A description of the requested documentation changes. diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 1fd5ce674a..bd9afcb48b 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -7,14 +7,20 @@ assignees: '' --- -**Describe the bug** +### Relevant Discussion + +Non-trivial features require consideration to find the best implementation in the target languages. +This conversation should take place in the [discussion tab](https://github.com/pyccel/pyccel/discussions/categories/q-a). +Please link to the discussion where the proposed feature was investigated. + +### Describe the feature A clear and concise description of what you would like to be implemented. -**Test Code** +### Test Code Provide code which does not currently work but which should do when this issue is fixed: ```python Code here ``` -**Language** -If appropriate, please specify which language the python code is translated to (Fortran by default) +### Proposed Solution +Briefly summarise how you expect this feature to be implemented diff --git a/.github/ISSUE_TEMPLATE/installation-problem.md b/.github/ISSUE_TEMPLATE/installation-problem.md new file mode 100644 index 0000000000..6f6342c4c2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/installation-problem.md @@ -0,0 +1,24 @@ +--- +name: Installation problem +about: Report an installation problem +title: '' +labels: bug +assignees: '' + +--- + +### Relevant Discussion + +Most installation problems are not directly caused by pyccel, but are due to dependencies. Please link to the discussion in the [discussion tab](https://github.com/pyccel/pyccel/discussions/categories/q-a) where the problem was investigated. + +### Description + +A clear and concise description of the problem encountered. + +### Environment information + +Python version : + +OS version : + +Versions for any relevant dependencies: diff --git a/.github/actions/coverage_collection/action.yml b/.github/actions/coverage_collection/action.yml index 03d1b2cdfd..e32b82b317 100644 --- a/.github/actions/coverage_collection/action.yml +++ b/.github/actions/coverage_collection/action.yml @@ -6,7 +6,6 @@ runs: - name: Coverage collection run: | coverage combine - coverage xml rm ${SITE_DIR}/pyccel_cov.pth shell: bash diff --git a/.github/actions/coverage_install/action.yml b/.github/actions/coverage_install/action.yml index 44ef760dcb..fb55f29c45 100644 --- a/.github/actions/coverage_install/action.yml +++ b/.github/actions/coverage_install/action.yml @@ -10,7 +10,7 @@ runs: - name: Directory Creation run: | INSTALL_DIR=$(cd tests; python -c "import pyccel; print(pyccel.__path__[0])") - SITE_DIR=$(python -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])') + SITE_DIR=$(dirname ${INSTALL_DIR}) echo -e "import coverage; coverage.process_startup()" > ${SITE_DIR}/pyccel_cov.pth echo -e "[run]\nparallel = True\nsource = ${INSTALL_DIR}\ndata_file = $(pwd)/.coverage\n[report]\ninclude = ${INSTALL_DIR}/*\n[xml]\noutput = cobertura.xml" > .coveragerc echo "SITE_DIR=${SITE_DIR}" >> $GITHUB_ENV diff --git a/.github/actions/linux_install/action.yml b/.github/actions/linux_install/action.yml index 8fb5cd8505..0ef9a69b8e 100644 --- a/.github/actions/linux_install/action.yml +++ b/.github/actions/linux_install/action.yml @@ -9,22 +9,22 @@ runs: shell: bash - name: Install fortran run: - sudo apt-get install gfortran + sudo apt-get install -y gfortran shell: bash - name: Install LaPack run: - sudo apt-get install libblas-dev liblapack-dev + sudo apt-get install -y libblas-dev liblapack-dev shell: bash - name: Install MPI run: | - sudo apt-get install libopenmpi-dev openmpi-bin + sudo apt-get install -y libopenmpi-dev openmpi-bin echo "MPI_OPTS=--oversubscribe" >> $GITHUB_ENV shell: bash - name: Install OpenMP run: - sudo apt-get install libomp-dev libomp5 + sudo apt-get install -y libomp-dev libomp5 shell: bash - name: Install Valgrind run: - sudo apt-get install valgrind + sudo apt-get install -y valgrind shell: bash diff --git a/.github/actions/macos_install/action.yml b/.github/actions/macos_install/action.yml index 941d6920d6..212c79d24c 100644 --- a/.github/actions/macos_install/action.yml +++ b/.github/actions/macos_install/action.yml @@ -8,7 +8,8 @@ runs: brew install open-mpi brew install libomp if [[ ! -f "/usr/local/bin/gfortran" ]]; then - ln -s /usr/local/bin/gfortran-10 /usr/local/bin/gfortran + gfort=$(ls /usr/local/bin/gfortran-* | tail -n 1) + ln -s ${gfort} /usr/local/bin/gfortran fi echo "MPI_OPTS=--oversubscribe" >> $GITHUB_ENV shell: bash diff --git a/.github/actions/pytest_parallel/action.yml b/.github/actions/pytest_parallel/action.yml index 74a9652afe..c7c77d99c7 100644 --- a/.github/actions/pytest_parallel/action.yml +++ b/.github/actions/pytest_parallel/action.yml @@ -10,8 +10,8 @@ runs: steps: - name: Test with pytest run: | - mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel/test_parallel_epyccel.py -v -m parallel -rx - #mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel -v -m parallel -rx + mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel/test_parallel_epyccel.py -v -m parallel -rXx + #mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel -v -m parallel -rXx shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run/action.yml b/.github/actions/pytest_run/action.yml index 6bec971da6..73a3e53080 100644 --- a/.github/actions/pytest_run/action.yml +++ b/.github/actions/pytest_run/action.yml @@ -12,13 +12,19 @@ runs: - name: Test with pytest run: | which python - python -m pytest -n auto -rX -m "not (parallel or xdist_incompatible) and c" --ignore=symbolic --ignore=ndarrays - python -m pytest -rX -m "xdist_incompatible and not parallel and c" --ignore=symbolic --ignore=ndarrays + python -m pytest -n auto -rXx -v -m "not (parallel or xdist_incompatible) and c" --ignore=symbolic --ignore=ndarrays + if [ -n "${SITE_DIR}" ]; then + echo "Touching" + # Test ndarray folder update (requires parallel tests to avoid clean) + touch ${SITE_DIR}/pyccel/stdlib/cwrapper/cwrapper.h + python -m pytest -n auto -rXx -v -m c -k test_array_int32_1d_scalar epyccel/test_arrays.py + fi + python -m pytest -rXx -m "xdist_incompatible and not parallel and c" --ignore=symbolic --ignore=ndarrays pyccel-clean - python -m pytest -n auto -rX -m "not (parallel or xdist_incompatible) and not (c or python)" --ignore=symbolic --ignore=ndarrays - python -m pytest -rX -m "xdist_incompatible and not parallel and not (c or python)" --ignore=symbolic --ignore=ndarrays + python -m pytest -n auto -rXx -m "not (parallel or xdist_incompatible) and not (c or python or ccuda)" --ignore=symbolic --ignore=ndarrays + python -m pytest -rXx -m "xdist_incompatible and not parallel and not (c or python or ccuda)" --ignore=symbolic --ignore=ndarrays pyccel-clean - python -m pytest ndarrays/ -rX + python -m pytest ndarrays/ -rXx pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run_cuda/action.yml b/.github/actions/pytest_run_cuda/action.yml new file mode 100644 index 0000000000..59c8b5b916 --- /dev/null +++ b/.github/actions/pytest_run_cuda/action.yml @@ -0,0 +1,17 @@ +name: 'Pyccel pytest commands generating Ccuda' +inputs: + shell_cmd: + description: 'Specifies the shell command (different for anaconda)' + required: false + default: "bash" + +runs: + using: "composite" + steps: + - name: Ccuda tests with pytest + run: | + # Catch exit 5 (no tests found) + sh -c 'python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and ccuda" --ignore=tests/symbolic --ignore=tests/ndarrays; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' + pyccel-clean + shell: ${{ inputs.shell_cmd }} + working-directory: ./ diff --git a/.github/actions/pytest_run_python/action.yml b/.github/actions/pytest_run_python/action.yml index f57507773e..842fd2eaf6 100644 --- a/.github/actions/pytest_run_python/action.yml +++ b/.github/actions/pytest_run_python/action.yml @@ -10,8 +10,8 @@ runs: steps: - name: Python tests with pytest run: | - python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and python" --ignore=symbolic --ignore=ndarrays - python -m pytest -rx -m "xdist_incompatible and not parallel and python" --ignore=symbolic --ignore=ndarrays + python -m pytest -n auto -rXx -m "not (parallel or xdist_incompatible) and python" --ignore=symbolic --ignore=ndarrays + python -m pytest -rXx -m "xdist_incompatible and not parallel and python" --ignore=symbolic --ignore=ndarrays pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/python_install/action.yml b/.github/actions/python_install/action.yml new file mode 100644 index 0000000000..f9b720e3e1 --- /dev/null +++ b/.github/actions/python_install/action.yml @@ -0,0 +1,17 @@ +name: 'Python installation commands' + +runs: + using: "composite" + steps: + - name: Install python + run: + sudo apt-get -y install python3-dev + shell: bash + - name: python as python3 + run: + sudo apt-get -y install python-is-python3 + shell: bash + - name: Install Pip + run: + sudo apt-get -y install python3-pip + shell: bash diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index a4a2f986c8..fb1c717218 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -2,7 +2,7 @@ name: Pyccel tests on: pull_request: - branches: [ master ] + branches: [ master, cuda_main, cuda_devel ] jobs: Linux: @@ -10,9 +10,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.7 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.7 - name: Install dependencies @@ -32,21 +32,21 @@ jobs: - name: Collect coverage information continue-on-error: True uses: ./.github/actions/coverage_collection - - name: Run codacy-coverage-reporter - uses: codacy/codacy-coverage-reporter-action@master - continue-on-error: True + - name: Save code coverage report + uses: actions/upload-artifact@v3 with: - project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} - coverage-reports: cobertura.xml + name: coverage-artifact + path: .coverage + retention-days: 1 Windows: runs-on: windows-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup Python 3.9 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: # The second most recent version is used as # setup-python installs the most recent patch @@ -73,9 +73,9 @@ jobs: runs-on: macos-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.10 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: '3.10' - name: Install dependencies @@ -89,22 +89,120 @@ jobs: - name: Parallel tests with pytest uses: ./.github/actions/pytest_parallel - Linter: + Cuda: + + runs-on: ubuntu-20.04 + container: nvidia/cuda:11.7.1-devel-ubuntu20.04 + if: github.event.pull_request.base.ref != 'master' + steps: + - uses: actions/checkout@v2 + - name: Prepare docker + run: | + apt update && apt install sudo + TZ=Europe/France + ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata + shell: bash + - name: CUDA Version + run: nvcc --version # cuda install check + - name: Install dependencies + uses: ./.github/actions/linux_install + - name: Install python (setup-python action doesn't work with containers) + uses: ./.github/actions/python_install + - name: Install python dependencies + uses: ./.github/actions/pip_installation + - name: Coverage install + uses: ./.github/actions/coverage_install + - name: Ccuda tests with pytest + uses: ./.github/actions/pytest_run_cuda + - name: Collect coverage information + continue-on-error: True + uses: ./.github/actions/coverage_collection + - name: Save code coverage report + uses: actions/upload-artifact@v3 + with: + name: cuda-coverage-artifact + path: .coverage + retention-days: 1 + + CoverageCollection: runs-on: ubuntu-latest + needs: [Linux, Cuda] + if: ${{ always() && needs.Linux.result == 'success' && needs.Cuda.result != 'failure' }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.7 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 + with: + python-version: 3.7 + - name: Install dependencies + uses: ./.github/actions/linux_install + - name: Install python dependencies + uses: ./.github/actions/pip_installation + - name: Coverage install + uses: ./.github/actions/coverage_install + - name: Collect coverage information + uses: actions/download-artifact@v3 + with: + name: coverage-artifact + - name: Rename coverage file + run: mv .coverage .coverage.linux + - name: Collect coverage information + uses: actions/download-artifact@v3 + if: needs.Cuda.result == 'success' + with: + name: cuda-coverage-artifact + - name: Rename coverage file + if: needs.Cuda.result == 'success' + run: mv .coverage .coverage.cuda + - name: Generate coverage report + run: | + coverage combine + coverage xml + - name: Run codacy-coverage-reporter + uses: codacy/codacy-coverage-reporter-action@master + continue-on-error: True + with: + project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} + coverage-reports: cobertura.xml + - name: Save code coverage xml report + uses: actions/upload-artifact@v3 + with: + name: coverage-artifact-xml + path: cobertura.xml + retention-days: 1 + + CoverageChecker: + + runs-on: ubuntu-latest + needs: [CoverageCollection] + if: ${{ always() && needs.CoverageCollection.result == 'success' && needs.Cuda.result != 'failure' }} + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.7 + uses: actions/setup-python@v4 with: python-version: 3.7 - name: Install python dependencies run: | python -m pip install --upgrade pip - python -m pip install pylint + python -m pip install defusedxml + shell: bash + - name: Collect coverage information + uses: actions/download-artifact@v3 + with: + name: coverage-artifact-xml + - name: Collect diff information + run: | + BASE_BRANCH=$GITHUB_BASE_REF + git fetch + git diff origin/${BASE_BRANCH}..HEAD --no-indent-heuristic --unified=0 --output=pull_diff.txt --no-color + ls shell: bash - - name: Pylint + - name: Check coverage run: | - python -m pylint --rcfile=.pylintrc pyccel/parser/semantic.py + python ci_tools/check_new_coverage.py pull_diff.txt cobertura.xml $GITHUB_EVENT_PATH $GITHUB_STEP_SUMMARY shell: bash diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 789cbab4ec..72b927ff70 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -11,30 +11,20 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.7 - uses: actions/setup-python@v3 + - name: Set up ssh + uses: webfactory/ssh-agent@v0.7.0 with: - python-version: 3.7 - - name: Install dependencies - uses: ./.github/actions/linux_install - - name: Install python dependencies - uses: ./.github/actions/pip_installation - - name: Install python benchmark dependencies + ssh-private-key: ${{ secrets.SSH_DEPLOY_KEY }} + - name: Clone pyccel-benchmarks repository + working-directory: ../ run: | - python -m pip install pythran - python -m pip install numba - python -m pip install pyperf - - name: Benchmark + git clone git@github.com:pyccel/pyccel-benchmarks.git + - name: Push results to pyccel-benchmarks + working-directory: ../pyccel-benchmarks run: | - python benchmarks/run_benchmark.py --pyperf --verbose - echo "# Performance Comparison (as of $(date))" > performance.md - cat bench.out >> performance.md - shell: bash - working-directory: ./. - - name: Add & Commit - uses: EndBug/add-and-commit@v9.0.0 - with: - message: 'Update performance comparison' - add: 'performance.md' - default_author: github_actions + export GIT_AUTHOR_NAME="Pyccel/pyccel" + export GIT_AUTHOR_EMAIL="41898282+github-actions[bot]@users.noreply.github.com" + git config user.email ${GIT_AUTHOR_EMAIL} + git config user.name ${GIT_AUTHOR_NAME} + git commit --allow-empty -m "Benchmark of pyccel/pyccel@${GITHUB_SHA}" + git push diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 0000000000..827b8eece6 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,24 @@ +name: Python Linting +on: + pull_request: + branches: [ master, cuda_main, cuda_devel ] + +jobs: + Linter: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.7 + uses: actions/setup-python@v4 + with: + python-version: 3.7 + - name: Install python dependencies + run: | + python -m pip install --upgrade pip + python -m pip install pylint + shell: bash + - name: Pylint + run: | + python -m pylint --rcfile=.pylintrc pyccel/parser/semantic.py > $GITHUB_STEP_SUMMARY + shell: bash diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index afb51c6f3d..dd89a52af8 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -15,9 +15,9 @@ jobs: python-version: [3.7, 3.8, 3.9, '3.10'] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python-version }} - name: Install dependencies @@ -47,9 +47,9 @@ jobs: runs-on: windows-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Setup Python 3.7 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: # The second most recent version is used as # setup-python installs the most recent patch @@ -73,9 +73,9 @@ jobs: runs-on: macos-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.9 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.9 - name: Install dependencies @@ -94,9 +94,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.7 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.7 - name: Install dependencies @@ -113,9 +113,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.7 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.7 - name: Install dependencies @@ -132,9 +132,9 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python 3.7 - uses: actions/setup-python@v2 + uses: actions/setup-python@v4 with: python-version: 3.7 - name: Install dependencies @@ -152,7 +152,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install dependencies uses: ./.github/actions/linux_install - uses: conda-incubator/setup-miniconda@v2 @@ -180,7 +180,7 @@ jobs: runs-on: windows-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 with: auto-update-conda: true diff --git a/.github/workflows/pyccel_lint.yml b/.github/workflows/pyccel_lint.yml new file mode 100644 index 0000000000..fab5dda618 --- /dev/null +++ b/.github/workflows/pyccel_lint.yml @@ -0,0 +1,23 @@ +name: Pyccel Linting +on: + pull_request: + branches: [ master, cuda_main, cuda_devel ] + +jobs: + Pyccel-Linter: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.7 + uses: actions/setup-python@v4 + with: + python-version: 3.7 + - name: Install dependencies + uses: ./.github/actions/linux_install + - name: Install python dependencies + uses: ./.github/actions/pip_installation + - name: Lint + run: | + python ci_tools/check_slots.py $GITHUB_STEP_SUMMARY + shell: bash diff --git a/.github/workflows/spelling.yml b/.github/workflows/spelling.yml new file mode 100644 index 0000000000..b6c754efdc --- /dev/null +++ b/.github/workflows/spelling.yml @@ -0,0 +1,29 @@ +name: Spellcheck Action +on: + pull_request: + branches: [ master, cuda_main, cuda_devel ] + +jobs: + Spelling: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.7 + uses: actions/setup-python@v4 + with: + python-version: 3.7 + - name: Install dependencies + run: | + python -m pip install --upgrade pip setuptools + python -m pip install pyspelling + python -m pip install pymdown-extensions + # Install any additional libraries required: additional plugins, documentation building libraries, etc. + - name: Install Aspell + run: | + sudo apt-get install aspell aspell-en + - name: Spell check + run: | + python -m pyspelling > misspellings.txt || true + cat misspellings.txt + python ci_tools/summarise_pyspelling.py misspellings.txt $GITHUB_STEP_SUMMARY + diff --git a/.pylintrc b/.pylintrc index 6e9e563490..2e79ec9299 100644 --- a/.pylintrc +++ b/.pylintrc @@ -99,7 +99,8 @@ disable=invalid-name, redundant-keyword-arg, duplicate-code, unused-wildcard-import, # Raised by everything not used in pyccel.errors.messages when using 'from pyccel.errors.messages import *' - isinstance-second-argument-not-valid-type # prevents doing isinstance(a, acceptable_iterable_types) + isinstance-second-argument-not-valid-type, # prevents doing isinstance(a, acceptable_iterable_types) + django-not-configured # Enable the message, report, category or checker with the given id(s). You can diff --git a/.pyspelling.yml b/.pyspelling.yml new file mode 100644 index 0000000000..50acd593cf --- /dev/null +++ b/.pyspelling.yml @@ -0,0 +1,26 @@ +matrix: +- name: markdown + sources: + - README.md + - tutorial/*.md + - developer_docs/*.md + aspell: + lang: en + d: en_GB + dictionary: + wordlists: + - .dict_custom.txt + encoding: utf-8 + output: build/dictionary/python.dic + pipeline: + - pyspelling.filters.markdown: + markdown_extensions: + - pymdownx.superfences: + - pymdownx.magiclink: + - pyspelling.filters.html: + comments: false + ignores: + - code + - pre + - a + default_encoding: utf-8 diff --git a/README.md b/README.md index db3f160020..01c34305d8 100644 --- a/README.md +++ b/README.md @@ -16,12 +16,14 @@ The aim of **Pyccel** is to provide a simple way to generate automatically, para Pyccel comes with a selection of **extensions** allowing you to convert calls to some specific Python packages to Fortran/C. The following packages will be covered (partially): -- numpy -- scipy -- mpi4py (not available yet) -- h5py (not available yet) +- `numpy` +- `scipy` +- `mpi4py` (not available yet) +- `h5py` (not available yet) -Pyccel's acceleration capabilities lead to much faster code. A small speed comparison of Python vs Pyccel or other tools can be found in the [performance](./performance.md) file. +Pyccel's acceleration capabilities lead to much faster code. Comparisons of Python vs Pyccel or other tools can be found in the [benchmarks](https://github.com/pyccel/pyccel-benchmarks) repository. +The results for the master branch currently show the following performance on python 3.10: +![Pyccel execution times for master branch](https://github.com/pyccel/pyccel-benchmarks/blob/main/version_specific_results/devel_performance_310_execution.svg) If you are eager to try Pyccel out, we recommend reading our [quick-start guide](./tutorial/quickstart.md) @@ -40,7 +42,7 @@ If you are eager to try Pyccel out, we recommend reading our [quick-start guide] - [Windows](#Windows) - [Installation](#Installation) - - [From PyPi](#From-PyPi) + - [From PyPI](#From-PyPI) - [From sources](#From-sources) - [On a read-only system](#On-a-read-only-system) @@ -70,7 +72,7 @@ If you are eager to try Pyccel out, we recommend reading our [quick-start guide] - Supported libraries/APIs - [OpenMP](./tutorial/openmp.md) - - [Numpy](./tutorial/numpy-functions.md) + - [NumPy](./tutorial/numpy-functions.md) ## Pyccel Installation Methods @@ -80,8 +82,8 @@ Some advanced features of Pyccel require additional non-Python libraries to be i Alternatively, Pyccel can be deployed through a **Linux Docker image** that contains all dependencies, and which can be setup with any version of Pyccel. For more information, please read the section on [Pyccel container images](#Pyccel-Container-Images). -It is possible to use pyccel with anaconda but this is generally not advised as anaconda modifies paths used for finding executables, shared libraries and other objects. -Support is provided for anaconda on linux/macos. +It is possible to use Pyccel with anaconda but this is generally not advised as anaconda modifies paths used for finding executables, shared libraries and other objects. +Support is provided for anaconda on linux/macOS. On Windows support is limited to examples which do not use external libraries. This is because we do not know of a way to reliably avoid [DLL hell](https://en.wikipedia.org/wiki/DLL_Hell). @@ -116,7 +118,7 @@ Finally, Pyccel supports distributed-memory parallel programming through the Mes We recommend using GFortran/GCC and Open-MPI. -Pyccel also depends on several Python3 packages, which are automatically downloaded by pip, the Python Package Installer, during the installation process. In addition to these, unit tests require the _scipy_, _mpi4py_, _pytest_ and _coverage_ packages, while building the documentation requires [Sphinx](http://www.sphinx-doc.org/). +Pyccel also depends on several Python3 packages, which are automatically downloaded by pip, the Python Package Installer, during the installation process. In addition to these, unit tests require additional packages which are installed as optional dependencies with pip, while building the documentation requires [Sphinx](http://www.sphinx-doc.org/). ### Linux Debian-Ubuntu-Mint @@ -206,7 +208,7 @@ msiexec //i msmpisdk.msi At this point, close and reopen your terminal to refresh all environment variables! -In Administrator git-bash, generate mpi.mod for gfortran according to : +In Administrator git-bash, generate `mpi.mod` for GFortran according to : ```sh cd "$MSMPI_INC" @@ -216,7 +218,7 @@ gfortran -c -D_WIN64 -D INT_PTR_KIND\(\)=8 -fno-range-check mpi.f90 cd - ``` -Generate static libmsmpi.a from msmpi.dll: +Generate static `libmsmpi.a` from `msmpi.dll`: ```sh cd "$MSMPI_LIB64" @@ -252,7 +254,7 @@ echo "import os; os.add_dll_directory('C://ProgramData/chocolatey/lib/mingw/tool On Windows and/or Anaconda Python, use `pip` instead of `pip3` for the Installation of Pyccel below. -### From PyPi +### From PyPI Simply run, for a user-specific installation: @@ -283,10 +285,10 @@ for a system-wide installation. ```sh git clone git@github.com:pyccel/pyccel.git cd pyccel - pip3 install --user -e . + pip3 install --user -e .[test] ``` -this will install a _python_ library **pyccel** and a _binary_ called **pyccel**. +this will install a _python_ library **Pyccel** and a _binary_ called **`pyccel`**. Any required Python packages will be installed automatically from PyPI. ### On a read-only system @@ -298,42 +300,33 @@ sudo pyccel-init This step is necessary in order to [pickle header files](./tutorial/header-files.md#Pickling-header-files). If this command is not run then Pyccel will still run correctly but may be slower when using [OpenMP](./tutorial/openmp.md) or other supported external packages. -A warning, reminding the user to execute this command, will be printed to the screen when pyccelizing files which rely on these packages if the pickling step has not been executed. +A warning, reminding the user to execute this command, will be printed to the screen when pyccelising files which rely on these packages if the pickling step has not been executed. ## Additional packages In order to run the unit tests and to get a coverage report, a few additional Python packages should be installed: ```sh -pip3 install --user scipy -pip3 install --user mpi4py -pip3 install --user tblib -pip3 install --user pytest -pip3 install --user astunparse -pip3 install --user coverage +pip install --user -e .[test] ``` -Most of the unit tests can also be run in parallel. This can be done by installing one additional package: - -```sh -pip3 install --user pytest-xdist -``` +Most of the unit tests can also be run in parallel. ## Testing -To test your Pyccel installation please run the script _tests/run\_tests\_py3.sh_ (Unix), or _tests/run\_tests.bat_ (Windows). +To test your Pyccel installation please run the script `tests/run\_tests\_py3.sh` (Unix), or `tests/run\_tests.bat` (Windows). -Continuous testing runs on github actions: +Continuous testing runs on GitHub actions: ## Pyccel Container Images -Pyccel container images are available through both Docker Hub (docker.io) and the GitHub Container Registry (ghcr.io). +Pyccel container images are available through both Docker Hub () and the GitHub Container Registry (). The images: -- are based on ubuntu:latest -- use distro packaged python3, gcc, gfortran, blas and openmpi -- support all pyccel releases except the legacy "0.1" +- are based on `ubuntu:latest` +- use distro packaged python3, GCC, GFortran, BLAS and OpenMPI +- support all Pyccel releases except the legacy "0.1" Image tags match Pyccel releases. @@ -347,7 +340,7 @@ docker run -it -v $PWD:/data:rw pyccel/pyccel:v1.0.0 bash ``` If you are using SELinux, you will need to set the right context for your host based volume. -Alternatively you may have docker or podman set the context using -v $PWD:/data:rwz instead of -v $PWD:/data:rw . +Alternatively you may have docker or podman set the context using `-v $PWD:/data:rwz` instead of `-v $PWD:/data:rw` . ## Developer Documentation diff --git a/ci_tools/check_new_coverage.py b/ci_tools/check_new_coverage.py new file mode 100644 index 0000000000..53573fe5e3 --- /dev/null +++ b/ci_tools/check_new_coverage.py @@ -0,0 +1,31 @@ +""" Script to check that all new lines in the python files in the pyccel/ code folder are used in the tests +""" +import json +import argparse +from git_evaluation_tools import get_diff_as_json +import coverage_analysis_tools as cov + +parser = argparse.ArgumentParser(description='Check that all new lines in the python files in the pyccel/ code folder are used in the tests') +parser.add_argument('diffFile', metavar='diffFile', type=str, + help='File containing the git diff output') +parser.add_argument('coverageFile', metavar='coverageFile', type=str, + help='File containing the coverage xml output') +parser.add_argument('gitEvent', metavar='gitEvent', type=str, + help='File containing the json description of the triggering event') +parser.add_argument('output', metavar='output', type=str, + help='File where the markdown output will be printed') + +args = parser.parse_args() + +diff = get_diff_as_json(args.diffFile) +untested, file_contents = cov.get_untested_lines(args.coverageFile) + +new_untested = cov.allow_untested_error_calls(cov.compare_coverage_to_diff(untested, diff)) + +with open(args.gitEvent, encoding="utf-8") as pr_data_file: + pr_data = json.load(pr_data_file) + +cov.print_markdown_summary(new_untested, file_contents, pr_data["pull_request"]["head"]["sha"], args.output) + +cov.show_results(new_untested) + diff --git a/ci_tools/check_slots.py b/ci_tools/check_slots.py new file mode 100644 index 0000000000..b4b7287943 --- /dev/null +++ b/ci_tools/check_slots.py @@ -0,0 +1,109 @@ +""" Script to check that Pyccel coding conventions are correctly followed in the AST +""" +import argparse +import importlib +import inspect +import os +import sys +from pyccel import ast +from pyccel.ast.basic import Basic, PyccelAstNode, ScopedNode + +parser = argparse.ArgumentParser(description='Check that all new lines in the python files in the pyccel/ code folder are used in the tests') +parser.add_argument('output', metavar='output', type=str, + help='File where the markdown output will be printed') + +args = parser.parse_args() + +# Get ast modules +ast_folder = os.path.dirname(ast.__file__) +ast_modules = [mod[:-3] for mod in os.listdir(ast_folder) if mod != '__init__.py' and mod.endswith('.py')] + +# Prepare error collection +missing_all = [] +non_alphabetical_all = [] +missing_slots = [] +overridden_slots = [] +missing_attribute_nodes = [] +missing_from_all = [] + +for mod_name in ast_modules: + mod = importlib.import_module('pyccel.ast.'+mod_name) + all_attr = getattr(mod, '__all__', None) + if all_attr: + sorted_all = list(all_attr) + sorted_all.sort() + if sorted_all != list(all_attr): + non_alphabetical_all.append(mod_name) + else: + missing_all.append(mod_name) + + classes = inspect.getmembers(mod, inspect.isclass) + for cls_name, cls_obj in classes: + if inspect.getmodule(cls_obj) is not mod: + continue + super_classes = cls_obj.mro()[1:] + if '__slots__' not in cls_obj.__dict__: + missing_slots.append(f"{mod_name}.{cls_name}") + else: + slots = cls_obj.__slots__ + for c in super_classes: + if '__slots__' not in c.__dict__: + continue + elif any(s in slots for s in c.__slots__): + overridden_slots.append(f'Slot values are overwritten between `{mod_name}.{cls_name}` and `{c.__name__}`') + + if Basic in super_classes: + if cls_obj not in (PyccelAstNode, ScopedNode) and not isinstance(cls_obj._attribute_nodes, tuple): #pylint: disable=W0212 + missing_attribute_nodes.append(f"{mod_name}.{cls_name}") + + if all_attr and cls_name not in all_attr: + missing_from_all.append(f"{mod_name}.{cls_name}") + +print("Missing __all__") +print(missing_all) +print("__all__ non-alphabetical") +print(non_alphabetical_all) +print("Missing __slots__") +print(missing_slots) +print("Missing _attribute_nodes") +print(missing_attribute_nodes) +print("Not in __all__") +print(missing_from_all) +print("Misused slots") +print(overridden_slots) + +with open(args.output, "w", encoding="utf-8") as out: + # Report error + if missing_all: + print("## Missing `__all__`", file=out) + for f in missing_all: + print(f"- `pyccel.ast.{f}`", file=out) + if non_alphabetical_all: + print("## Non-alphabetical `__all__`", file=out) + for f in non_alphabetical_all: + print(f"- `pyccel.ast.{f}`", file=out) + if missing_from_all: + print("## Classes missing from `__all__`", file=out) + for f in missing_from_all: + print(f"- `pyccel.ast.{f}`", file=out) + if missing_slots: + print("## Classes with no `__slots__`", file=out) + for f in missing_slots: + print(f"- `pyccel.ast.{f}`", file=out) + if missing_attribute_nodes: + print("## Classes with no `_attribute_nodes`", file=out) + for f in missing_attribute_nodes: + print(f"- `pyccel.ast.{f}`", file=out) + if overridden_slots: + print("## Misused slots", file=out) + for o in overridden_slots: + print("- ", o, file=out) + +failure = (bool(missing_all) or # bool(non_alphabetical_all) or + bool(missing_slots) or bool(missing_attribute_nodes) or + bool(overridden_slots)) + +if failure: + sys.exit(1) +else: + sys.exit(0) diff --git a/ci_tools/coverage_analysis_tools.py b/ci_tools/coverage_analysis_tools.py new file mode 100644 index 0000000000..346e78fe27 --- /dev/null +++ b/ci_tools/coverage_analysis_tools.py @@ -0,0 +1,175 @@ +""" Functions for comparing coverage output and git diff output +""" +import os +import sys +import defusedxml.ElementTree as ET + +def get_untested_lines(coverage_filename): + """ + Parse a coverage xml file and return a dictionary containing the files and lines + which are untested + + Parameters + ---------- + coverage_filename : str + The name of the xml file containing the coverage information + + Returns + ------- + no_coverage : dict + A dictionary whose keys are the files in pyccel + and whose values are lists containing the line numbers + where coverage is lacking in that file + content_lines : dict + A dictionary whose keys are the files in pyccel + and whose values are lists containing the line numbers + where a python command starts (this excludes comments, + empty lines, and lines which are continuations of + previous lines) + """ + tree = ET.parse(coverage_filename) + root = tree.getroot() + + content_lines = {} + no_coverage = {} + + for f in root.findall('.//class'): + filename = f.attrib['filename'] + lines = f.findall('lines')[0].findall('line') + all_lines = [int(l.attrib['number']) for l in lines] + untested_lines = [int(l.attrib['number']) for l in lines if l.attrib['hits'] == "0"] + no_coverage[os.path.join('pyccel',filename)] = untested_lines + content_lines[os.path.join('pyccel',filename)] = all_lines + + return no_coverage, content_lines + +def compare_coverage_to_diff(coverage, diff): + """ + Compare dictionaries containing coverage information and git + diff information to find untested lines which have been added + to the code base + + Parameters + ---------- + coverage : dict + A dictionary whose keys are the files in pyccel + and whose values are lists containing the line numbers + where coverage is lacking in that file + diff : dict + A dictionary whose keys are files which have been + changed in this branch and whose values are a dictionary. + The dictionary must contain a key 'addition' whose value + is a list containing the line numbers of lines which have + been changed/added + + Returns + ------- + untested : dict + A dictionary whose keys are the files in pyccel with + untested lines which have been added in this branch + and whose values are lists containing the line numbers + where coverage is lacking in that file + """ + untested = {} + for f,line_info in diff.items(): + if f not in coverage: + # Ignore non-python files or files in other directories + continue + new_lines = line_info['addition'] + untested_lines = coverage[f] + if any(n in untested_lines for n in new_lines): + untested[f] = [n for n in new_lines if n in untested_lines] + return untested + +def allow_untested_error_calls(untested): + """ + Takes a dictionary describing untested lines and returns an + equivalent dictionary without lines designed to raise errors + + Parameter + --------- + untested : dict + A dictionary whose keys are the files in pyccel with + untested lines which have been added in this branch + and whose values are lists containing the line numbers + where coverage is lacking in that file + + Returns + ------- + reduced_untested : dict + A dictionary which is a copy of the input dictionary + without the lines which express raise statements + """ + reduced_untested = {} + for f,line_nums in untested.items(): + with open(f, encoding="utf-8") as filename: + f_lines = filename.readlines() + untested_lines = [(i, f_lines[i-1].strip()) for i in line_nums] + lines = [i for i,l in untested_lines if not (l.startswith('raise ') or l.startswith('errors.report(') or l.startswith('return errors.report('))] + if len(lines): + reduced_untested[f] = lines + + return reduced_untested + +def print_markdown_summary(untested, content_lines, commit, output): + """ + Print the results neatly in markdown in a provided file + + Parameters + ---------- + untested : dict + Dictionary whose keys are the files in pyccel with untested + lines which have been added in this branch and whose values + are lists containing the line numbers where coverage is + lacking in that file + content_lines : dict + Dictionary whose keys are the files in pyccel and whose + values are lists containing the line numbers where python + commands begin + commit : str + The commit being tested + output : str + The file where the markdown summary should be printed + """ + if len(untested) == 0: + md_string = "## Congratulations! All new python code in the pyccel package is fully tested! :tada:" + else: + md_string = "## Warning! The new code is not run\n" + for f, lines in untested.items(): + md_string += f"### {f}\n" + line_indices = content_lines[f] + n_code_lines = len(line_indices) + n_untested = len(lines) + i = 0 + while i < n_untested: + start_line = lines[i] + j = line_indices.index(start_line) + while j < n_code_lines and i < n_untested and lines[i] == line_indices[j]: + i+=1 + j+=1 + if j < n_code_lines-1: + end_line = line_indices[j]-1 + else: + end_line = line_indices[j] + md_string += "https://github.com/pyccel/pyccel/blob/"+commit+"/"+f+f"#L{start_line}-L{end_line}\n" + + with open(output, "a", encoding="utf-8") as out_file: + print(md_string, file=out_file) + +def show_results(untested): + """ + Print the results and fail if coverage is lacking + + Parameters + ---------- + untested : dict + Dictionary whose keys are the files in pyccel with untested + lines which have been added in this branch and whose values + are lists containing the line numbers where coverage is + lacking in that file + """ + for f, lines in untested.items(): + print(f"In file {f} the following lines are untested : {lines}") + + if len(untested) != 0: + sys.exit(1) diff --git a/ci_tools/git_evaluation_tools.py b/ci_tools/git_evaluation_tools.py new file mode 100644 index 0000000000..4290ab4e9d --- /dev/null +++ b/ci_tools/git_evaluation_tools.py @@ -0,0 +1,88 @@ +""" Tools to help examine git information +""" + +def get_diff_as_json(filename): + """ + A function which converts the output of a reduced git diff call + to a dictionary that can be exported using json. + The diff call should use the argument `--unified=0` + + Parameters + ---------- + filename : str + The file where the diff was printed + + Returns + ------- + changes : dict + A dictionary whose keys are files which have been + changed in this branch and whose values are a dictionary. + The dictionary is itself a dictionary with the keys 'addition' + and 'deletion' whose values are lists containing the line + numbers of lines which have been changed/added (addition) or + changed/deleted (deletion) + """ + with open(filename, encoding="utf-8") as f: + lines = f.readlines() + + lines = [l.strip() for l in lines] + changes ={} + i = 0 + n = len(lines) + + current_file_name = None + current_file_additions = [] + current_file_deletions = [] + + while i < n: + l = lines[i] + if l.startswith("diff "): + if current_file_name: + changes[current_file_name] = {} + changes[current_file_name]['addition'] = current_file_additions + changes[current_file_name]['deletion'] = current_file_deletions + current_file_additions = [] + current_file_deletions = [] + current_file_name = l.split(' ')[3][2:] + i+=1 + elif l.startswith('@@'): + line_info = l.split('@@')[1].split() + for info in line_info: + key = info[0] + info = info[1:] + if ',' in info: + line_num, n_lines = [int(li) for li in info.split(',')] + else: + n_lines = 1 + line_num = int(info) + if key == '+': + insert_index = line_num + n_append = n_lines + elif key == '-': + delete_index = line_num + n_delete = n_lines + i+=1 + j=0 + while j'): + words.add(lines[i]) + i+=1 + + if filename in errors: + errors[filename].update(words) + else: + errors[filename] = words + +if errors: + all_words = set() + + with open(os.path.join(os.path.dirname(__file__),'..','.dict_custom.txt'), encoding="utf-8") as d: + internal_dict = [w.strip() for w in d.readlines()] + + with open(args.output, 'w', encoding="utf-8") as f: + print("There are misspelled words", file=f) + for name, words in errors.items(): + print("## `", name, "`", file=f) + for w in words: + suggestions = difflib.get_close_matches(w, internal_dict) + if suggestions: + print("- ", w, f" : Did you mean {w} -> {suggestions}", file=f) + else: + print("- ", w, file=f) + print(file=f) + all_words.update(words) + + print("These errors may be due to typos, capitalisation errors, or lack of quotes around code. If this is a false positive please add your word to `.dict_custom.txt`", file=f) + + sys.exit(1) +else: + sys.exit(0) diff --git a/developer_docs/how_to_solve_an_issue.md b/developer_docs/how_to_solve_an_issue.md index 3b68e9f166..0a6a12ebb4 100644 --- a/developer_docs/how_to_solve_an_issue.md +++ b/developer_docs/how_to_solve_an_issue.md @@ -10,7 +10,7 @@ To add a new function: - Add a class to represent the function. The class should go in the appropriate file in the [ast](../pyccel/ast) folder. This function will probably inherit from [PyccelInternalFunction](../pyccel/ast/internals.py) - Ensure the function is recognised in the semantic stage by adding it to the appropriate dictionary (see the function `builtin_function` and the dictionary `builtin_import_registery` in [ast/utilities.py](../pyccel/ast/utilities.py) - Add the print functions for the 3 languages -- Add tests in the folder tests/epyccel +- Add tests in the folder `tests/epyccel` ## Language Specific Bug Fixes diff --git a/developer_docs/order_docs.md b/developer_docs/order_docs.md new file mode 100644 index 0000000000..1d6a837d99 --- /dev/null +++ b/developer_docs/order_docs.md @@ -0,0 +1,468 @@ +# ndarrays memory layout (order) + +## Order in NumPy + +`order` is the parameter given to the `numpy.array` function in order to choose how a multi-dimensional array is stored in memory. +For both of the orders discussed here (`C` and `F`) the arrays are stored **contiguously** in memory, but they differ in how their entries are arranged. + +### Order C + +`order='C'` tells NumPy to store the array row by row (row-major). For example: + +```python +import numpy as np + +if __name__ == "__main__": + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], order='C') # order='C' is the default in numpy.array + print(a.ravel('K')) + ``` + +`array.ravel('k')` shows us how the array is stored in memory. +This Python script will output `[1 2 3 4 5 6 7 8 9]`; notice that the rows are stored one after the other. +This is the default behaviour in Python. + +### Order F + +`order='F'` on the other hand tells NumPy to store the array column by column (column-major). For example: + +```python +import numpy as np + +if __name__ == "__main__": + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]], order='F') + print(a.ravel('K')) +``` + +This Python script will output `[1 4 7 2 5 8 3 6 9]`; notice that the columns are stored one after the other. + +### Printing and indexing in NumPy + +The `order` of a NumPy array does not affect the indexing or the printing: unlike the `transpose` operation, the `shape` of the array remains the same, and only the `strides` change. For example: + +```python +import numpy as np + +if __name__ == "__main__": + a = np.array([[1, 2], + [4, 5], + [7, 8]], order='F') + b = np.array([[1, 2], + [4, 5], + [7, 8]], order='C') + print(a.shape, a.strides) # output: (3, 2) (8, 24) + print(b.shape, b.strides) # output: (3, 2) (16, 8) + print(a) + # output:[[1 2] + # [4 5] + # [7 8]] + print(b) + # output:[[1 2] + # [4 5] + # [7 8]] + + print(a[2][1], a[0][0], a[1]) # output: 8 1 [4 5] + print(b[2][1], b[0][0], b[1]) # output: 8 1 [4 5] +``` + +`arr.strides` is the variable that helps us navigate the array (indexing/printing) by telling us how many bytes we have to skip in memory to move to the next position along a certain axis (dimension). For example for `memory_layout_of_a = [1 4 7 2 5 8]` and `strides_of_a = (8, 24)`, we have to skip 8 bytes (1 element for `int64`) to move to the next row, but 24 bytes (3 elements for `int64`) to get to the same position in the next column of `a`. +`a[2][1]` would give us `'8'`, using the `strides`: `2 * 8 + 1 * 24 = 40`, which means that in the flattened array, we would have to skip `40` bytes to get the value of `a[2][1]`, each element is 8 bytes, so we would have to skip `40 / 8 = 5` elements to get to `'8'` + +The order does however change how the user writes code. +With `order='C'` (as in C), the last dimension contains contiguous elements, whereas with `order='F'` (as in Fortran) the first dimension contains contiguous elements. +Fast code should index efficiently. +By this, we mean that the elements should be visited in the order in which they appear in memory. +For example here is the efficient indexing for 2D arrays: +```python +import numpy as np +if __name__ == "__main__": + a = np.array([[1, 2], + [4, 5], + [7, 8]], order='F') + b = np.array([[1, 2], + [4, 5], + [7, 8]], order='C') + for row in range(3): + for col in range(2): + b[row, col] = ... + for col in range(2): + for row in range(3): + b[row, col] = ... +``` + +## Pyccel's C code + +In Pyccel's C code, we aim to replicate NumPy's indexing/printing and memory layout conventions. + +### Ordering in C code + +Multidimensional arrays in `C` code are flattened into a one dimensional array, `strides` and `shape` are used to navigate this array (unlike NumPy, Pyccel's strides use 'number of elements' instead of 'number of bytes' as a unit) +While the `order_c ndarrays` only require a simple copy to be populated, `order_f` array creation requires slightly different steps. + +Example: + To translate the following: + + ```python + a = np.array([[1, 2, 3], [4, 5, 6]], order=?) + ``` + `order_c` creation +     1. allocate/create `order_c ndarray` +     2. copy values to `ndarray` + + `order_f` creation +     1. allocate/create temporary `order_c ndarray` +     2. copy values to temporary `ndarray` +     3. allocate/create `order_f ndarray` +     4. copy temporary `ndarray` elements to final `order_f ndarray` using `strides` and `shape`, this will create a column-major version of the temporary `order_c ndarray` + +One dimensional arrays require no order, since order would not change how they behave. + +### Indexing in C code + +For indexing the function `GET_ELEMENT(arr, type, ...)` is used, indexing does not change with `order` so that we can mirror NumPy's conventions. + +If we take the following 2D array as an example: +| | | | +|---|---|---| +| 1 | 2 | 3 | +| 4 | 5 | 6 | + +with `array.rows = 2` and `array.columns = 3`, `GET_ELEMENT(arr, int32, 0, 1)` which is equivalent to `arr[0][1]` would return `2` no matter the `order`. + +To loop efficiently in an `order_c ndarray`, we would do this: +```c +for (int row = 0; row < array.rows; ++i) +{ + for (int column = 0; column < array.columns; ++j) + { + GET_ELEMENT(array, int32, row, column) = ...; + } +} +``` + +For an `order_f ndarray` we would do this: + +```c +for (int column = 0; column < array.columns; ++i) +{ + for (int row = 0; row < array.rows; ++j) + { + GET_ELEMENT(array, int32, row, column) = ...; + } +} +``` + +### `order_c` array creation example + +To create an `order_c ndarray`, we simply copy the flattened data to our `ndarray`'s data placeholder that changes depending on the type. + +If the data is composed of scalars only (ex: `np.array([1, 2, 3])`), an `array_dummy` is created, before copying it to our destination `ndarray`. + +Example: + +```python +if __name__ == "__main__": + import numpy as np + a = np.array([[1, 2, 3], [4, 5, 6]]) +``` + +Would translate to: + +```c +int main() +{ + t_ndarray a = {.shape = NULL}; + a = array_create(2, (int64_t[]){INT64_C(2), INT64_C(3)}, nd_int64, false, order_c); + int64_t array_dummy[] = {INT64_C(1), INT64_C(2), INT64_C(3), INT64_C(4), INT64_C(5), INT64_C(6)}; // Creation of an array_dummy containing the scalars, notice the data is flattened + memcpy(a.nd_int64, array_dummy, 6 * a.type_size); // Copying from array_dummy to our ndarray 'a' + free_array(a); + return 0; +} +``` + +If the data is composed of at least one variable array (like `c` in the example below), we would use a series of copy operations to our `ndarray`. + +Example: + +```python +if __name__ == "__main__": + import numpy as np + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + c = np.array([a, [7, 8, 9], b]) +``` + +Would translate to this: + +```c +int main() +{ + t_ndarray a = {.shape = NULL}; + t_ndarray b = {.shape = NULL}; + t_ndarray c = {.shape = NULL}; + a = array_create(1, (int64_t[]){INT64_C(3)}, nd_int64, false, order_c); + int64_t array_dummy[] = {INT64_C(1), INT64_C(2), INT64_C(3)}; + memcpy(a.nd_int64, array_dummy, 3 * a.type_size); + b = array_create(1, (int64_t[]){INT64_C(3)}, nd_int64, false, order_c); + int64_t array_dummy_0001[] = {INT64_C(4), INT64_C(5), INT64_C(6)}; + memcpy(b.nd_int64, array_dummy_0001, 3 * b.type_size); + + // 'c' ndarray creation starts here, 'c' is [a, [7, 8, 9], b] + + c = array_create(2, (int64_t[]){INT64_C(3), INT64_C(3)}, nd_int64, false, order_c); // Allocating 'c' ndarray + uint32_t offset = 0; // Initializing offset, used later to avoid overwritting data when executing multiple copy operations + array_copy_data(&c, a, offset); // Copying the first element of 'c', 'offset' is 0 since it's our first copy operation + offset += a.length; // Incrementing offset for upcoming copy operation + int64_t array_dummy_0002[] = {INT64_C(7), INT64_C(8), INT64_C(9)}; // Creating an array_dummy with 'c''s second element's scalars ([7, 8, 9]) + memcpy(c.nd_int64 + offset, array_dummy_0002, 3 * c.type_size); // 'offset' is also with 'memcpy' + offset += 3; // incrementing 'offset', preparing for final copy + array_copy_data(&c, b, offset); // Copying the third element to 'c' ndarray + free_array(a); + free_array(b); + free_array(c); + return 0; +} +``` + +### `order_f` array creation example + +For `order_f`, the process is similar to `order_c`, but instead of copying our data straight to the destination `ndarray`, we first create an (`order_c`) `temp_ndarray`, copy the data to the `temp_ndarray`, then create an `order_f ndarray`, and copy from the `temp_ndarray` to the destination `order_f ndarray` -- using `strides` and `shape` -- to get the correct column-major memory layout. + +Example: + +```python +if __name__ == "__main__": + import numpy as np + a = np.array([[1, 2, 3], [4, 5, 6]], order="F") + print(a[0][0]) # output ==> 1 +``` + +Would be translated to this: + +```c +int main() +{ + t_ndarray a = {.shape = NULL}; + a = array_create(2, (int64_t[]){INT64_C(2), INT64_C(3)}, nd_int64, false, order_f); // Allocating the required ndarray + t_ndarray temp_array = {.shape = NULL}; + temp_array = array_create(2, (int64_t[]){INT64_C(2), INT64_C(3)}, nd_int64, false, order_c); // Allocating an order_c temp_array + int64_t array_dummy[] = {INT64_C(1), INT64_C(2), INT64_C(3), INT64_C(4), INT64_C(5), INT64_C(6)}; // array_dummy with our flattened data + memcpy(temp_array.nd_int64, array_dummy, 6 * temp_array.type_size); // Copying our array_dummy to our temp ndarray + array_copy_data(&a, temp_array, 0); // Copying into a column-major memory layout + free_array(temp_array); // Freeing the temp_array right after we were done with it + printf("%ld\n", GET_ELEMENT(a, nd_int64, (int64_t)0, (int64_t)0)); // output ==> 1 + free_array(a); + return 0; +} +``` + +If the data is composed of at least one variable array, the process would still be somewhat the same as an `order_c ndarray` creation: + The `order_f ndarray` is not populated from the get go, instead, we create an `order_c temp_array` (following `order_c ndarray` creation steps) containing all the data, then we do a 'copy into a column-major memory layout' operation to our `order_f ndarray`. + +Example: + +```python +if __name__ == "__main__": + import numpy as np + a = np.array([1, 2, 3]) + b = np.array([4, 5, 6]) + f = np.array([a, [7, 8, 9], b], order="F") +``` + +Would be translated to (focus on `f` `ndarray` creation): + +```c +int main() +{ + t_ndarray a = {.shape = NULL}; + t_ndarray b = {.shape = NULL}; + t_ndarray c = {.shape = NULL}; + a = array_create(1, (int64_t[]){3}, nd_int64, false, order_c); + int64_t array_dummy[] = {INT64_C(1), INT64_C(2), INT64_C(3)}; + memcpy(a.nd_int64, array_dummy, 3 * a.type_size); + b = array_create(1, (int64_t[]){INT64_C(3)}, nd_int64, false, order_c); + int64_t array_dummy_0001[] = {INT64_C(4), INT64_C(5), INT64_C(6)}; + memcpy(b.nd_int64, array_dummy_0001, 3 * b.type_size); + + // 'f' ndarray creation + + f = array_create(2, (int64_t[]){INT64_C(3), INT64_C(3)}, nd_int64, false, order_f); // Allocating the required ndarray (order_f) + t_ndarray temp_array = {.shape = NULL}; + temp_array = array_create(2, (int64_t[]){INT64_C(3), INT64_C(3)}, nd_int64, false, order_c); // Allocating a temp_array (order_c) + uint32_t offset = 0; + array_copy_data(&temp_array, a, offset); // Copying the first element to temp_array + offset += a.length; + int64_t array_dummy_0002[] = {INT64_C(7), INT64_C(8), INT64_C(9)}; + memcpy(temp_array.nd_int64 + offset, array_dummy_0002, 3 * temp_array.type_size); // Copying the second element to temp_array + offset += 3; + array_copy_data(&temp_array, b, offset); // Copying the third element to temp_array + array_copy_data(&f, temp_array, 0); // Copying our temp_array into a column-major memory layout (order_f) + free_array(temp_array); // freeing the temp_array + free_array(a); + free_array(b); + free_array(c); + return 0; +} +``` + +## Pyccel's Fortran code + +As Fortran has arrays in the language there is no need to add special handling for arrays. Fortran ordered arrays (`order_f`) are already compatible with the Fortran language. They can therefore be passed to the function as they are. + +In order to pass C ordered arrays (`order_c`) and retain the shape and correct element placing to be compatible with Fortran, a transpose would be needed. +In Pyccel we prefer to avoid unnecessary copies, so instead we pass the contiguous block of memory to Fortran and change how we index the array to ensure that we access the expected element. + +### Ordering in Fortran code + +Fortran indexing does not occur in the same order as in C. +If we take the following 2D array as an example: + +| | | | +|---|---|---| +| 1 | 2 | 3 | +| 4 | 5 | 6 | + +In C the element `A[1,0]=4` is the fourth element in memory, however in Fortran the element `A(1,0)=4` is the second element in memory. +Thus to iterate over this array in the most efficient way in C we would do: +```C +# A.shape = (2,3) +for (int row = 0; row < 2; ++row) { + for (int column = 0; column < 3; ++column) { + A[row,column] = .... + } +} +``` + +while in Fortran we would do: +```Fortran +# A.shape = (2,3) +do column = 0, 3 + do row = 0, 2 + A(row,column) = .... + end do +end do +``` + +As you can see in the Fortran-ordered array the indices are passed to the array in the same order, however the index does not point to the same location in memory. +In C code the index `i_1, i_2, i_3` points to the element `i_1 * (n_2 * n_3) + i_2 * n_2 + i_3` in memory. +In Fortran code the index `i_1, i_2, i_3` points to the element `i_1 + i_2 * n_1 + i_3 * (n_2 * n_3)` in memory. + +### Order F +Pyccel's translation of code with `order='F'` should look very similar to the original Python code. + +NumPy's storage of the strides ensures that the first dimension is the contiguous dimension as in Fortran, so the code is equivalent for all element-wise operations. + +There are some exceptions to this rule, for example printing. Python always prints arrays in a row-major format so Pyccel must take care to respect this rule in the output. + +### Order C + +As mentioned above, printing a C-ordered array in Fortran is more complicated. +Consider the following 2D C-ordered array: + +| | | | +|---|---|---| +| 1 | 2 | 3 | +| 4 | 5 | 6 | + +where the numbers indicate the position of the elements in memory. If this data block (`[1, 2, 3, 4, 5, 6]`) were passed to Fortran indicating a size (2,3), we would obtain the following array: + +| | | | +|---|---|---| +| 1 | 3 | 5 | +| 2 | 4 | 6 | + +As a result we cannot pass the data block without either rearranging the elements (transposing), or changing the index. In Pyccel we prefer avoiding unnecessary copies. As a result we pass a data block (`[1, 2, 3, 4, 5, 6]`), but we indicate a size (3,2). This gives us the following array: + +| | | +|---|---| +| 1 | 4 | +| 2 | 5 | +| 3 | 6 | + +This is equivalent to the transpose of the original array. As a result we can obtain expected results by simply inverting the index order. + +Therefore the following Python code +```python +for i in range(2): + for j in range(3): + a[i,j] = i*3+j +``` + +is translated to the following efficient indexing: +```fortran +do i = 0_i64, 1_i64, 1_i64 + do j = 0_i64, 2_i64, 1_i64 + a(j, i) = i * 3_i64 + j + end do +end do +``` + +As we are effectively operating on the transpose of the array, this must be taken into account when printing anything related to arrays with `order='C'`. + +For example, consider the code: +```python +def f(c_array : 'float[:,:](order=C)', f_array : 'float[:,:](order=F)'): + print(c_array.shape) + print(f_array.shape) + + for row in range(c_array.shape[0]): + for col in range(c_array.shape[1]): + c_array[row, col] = ... + + for col in range(f_array.shape[1]): + for row in range(f_array.shape[0]): + f_array[row, col] = ... +``` + +This will be translated to: + +```Fortran + subroutine f(c_array, f_array) + + implicit none + + real(f64), intent(inout) :: c_array(0:,0:) + real(f64), intent(inout) :: f_array(0:,0:) + integer(i64) :: row + integer(i64) :: col + + write(stdout, '(A I0 A I0 A)', advance="no") '(' , size(c_array, & + 2_i64, i64) , ', ' , size(c_array, 1_i64, i64) , ')' + write(stdout, '()', advance="yes") + write(stdout, '(A I0 A I0 A)', advance="no") '(' , size(f_array, & + 1_i64, i64) , ', ' , size(f_array, 2_i64, i64) , ')' + write(stdout, '()', advance="yes") + do row = 0_i64, size(c_array, 2_i64, i64) - 1_i64, 1_i64 + do col = 0_i64, size(c_array, 1_i64, i64) - 1_i64, 1_i64 + c_array(col, row) = ... + end do + end do + do col = 0_i64, size(f_array, 2_i64, i64) - 1_i64, 1_i64 + do row = 0_i64, size(f_array, 1_i64, i64) - 1_i64, 1_i64 + f_array(row, col) = ... + end do + end do + + end subroutine f +``` + +Note the changes to the shape and the indexing, which make this code closer to the following intermediate representation: + +```python +def f_intermediate(c_array_T : 'float[:,:](order=F)', f_array : 'float[:,:](order=F)'): + print(c_array_T.shape[::-1]) + print(f_array.shape) + + for row in range(c_array_T.shape[1]): + for col in range(c_array_T.shape[0]): + c_array_T[col, row] = ... + + for col in range(f_array.shape[1]): + for row in range(f_array.shape[0]): + f_array[row, col] = ... +``` + +Note that `f(c_array, f_array) == f_intermediate(c_array.T, f_array)`. diff --git a/developer_docs/overview.md b/developer_docs/overview.md index 1b8a7f171e..519377fe69 100644 --- a/developer_docs/overview.md +++ b/developer_docs/overview.md @@ -1,6 +1,6 @@ ## Developer Setup -Before beginning any development in pyccel, it is important to ensure pyccel is correctly installed **from source in development mode** as described [here](../README.md#from-sources). If this step is not followed then any changes made to source will not be used when `pyccel` or `epyccel` are used. +Before beginning any development in Pyccel, it is important to ensure Pyccel is correctly installed **from source in development mode** as described [here](../README.md#from-sources). If this step is not followed then any changes made to source will not be used when `pyccel` or `epyccel` are used. ## Overview @@ -8,7 +8,7 @@ Pyccel's development is split into 4 main stages: ### Syntactic Stage -Pyccel uses Python's [ast module](https://docs.python.org/3/library/ast.html) to read the input file(s). The ast does not store information in the same way as the rest of Pyccel so this stage exists to **convert Python's ast to Pyccel's ast**. The related code can be found in [parser/syntactic.py](../pyccel/parser/syntactic.py). +Pyccel uses Python's [`ast` module](https://docs.python.org/3/library/ast.html) to read the input file(s). The abstract syntax tree (AST) of Python's `ast` module does not store information in the same way as the rest of Pyccel so this stage exists to **convert Python's AST to Pyccel's AST**. The related code can be found in [parser/syntactic.py](../pyccel/parser/syntactic.py). The syntactic stage also handles parsing header comments. This is managed using [textx](http://textx.github.io/textX/stable/). The files describing the _textx_ grammar are found in the folder [parser/grammar](../pyccel/parser/grammar). From these files _textx_ generates instances of the classes found in the folder [parser/syntax](../pyccel/parser/syntax). @@ -18,7 +18,7 @@ The role of this stage has decreased significantly since we moved from [redbaron ### Semantic Stage -This is the most important stage in pyccel. It is here that all the information about types is calculated. This stage strives to be **language-agnostic**; this means for example, that additional variables required to handle problems appearing in one specific language should not be created here. +This is the most important stage in Pyccel. It is here that all the information about types is calculated. This stage strives to be **language-agnostic**; this means for example, that additional variables required to handle problems appearing in one specific language should not be created here. When adding functions to this stage the aim is often to create a `PyccelAstNode` (see [ast/basic.py](../pyccel/ast/basic.py)) and correctly define all of its parameters. This information is sometimes readily available (e.g. the type of a `PyccelAdd` can be derived from the type of the variables passed to it), but sometimes the information must be collected from elsewhere (e.g. when creating a `Variable` from a `PyccelSymbol` (roughly equivalent to a string). In this case information is needed from a `Scope` instance which is stored in the `scope`. @@ -34,11 +34,11 @@ As in the Semantic stage, the Code Generation stage also stores the current Scop ### Compilation Stage -Finally the generated code is compiled. This is handled in the [pipeline](../pyccel/codegen/pipeline.py). The compilers commands are found in [codegen/compiling/compilers.py](../pyccel/codegen/compiling/compilers.py). Different compilers have different flags and need different libraries. Once pyccel has been executed once on your machine the flags and libraries can be found in json files in the [compilers](../pyccel/compilers) folder +Finally the generated code is compiled. This is handled in the [pipeline](../pyccel/codegen/pipeline.py). The compilers commands are found in [codegen/compiling/compilers.py](../pyccel/codegen/compiling/compilers.py). Different compilers have different flags and need different libraries. Once Pyccel has been executed once on your machine the flags and libraries can be found in JSON files in the [compilers](../pyccel/compilers) folder ### Function Naming Conventions/File Navigation -In the syntactic, semantic, and code generation stages a similar strategy is used for traversing the Python objects. This strategy is based on function names. The majority of functions have names of the form: `_prefix_ClassName` (in the syntactic and semantic stages the prefix is `visit`, in the code generation stages it is `print`). These functions are never called directly, but instead are called via a high level function `_prefix` (e.g. `_visit` for the semantic stage). This strategy avoids large if/elif blocks to handle all possible types. +In the syntactic, semantic, and code generation stages a similar strategy is used for traversing the Python objects. This strategy is based on function names. The majority of functions have names of the form: `_prefix_ClassName` (in the syntactic and semantic stages the prefix is `visit`, in the code generation stages it is `print`). These functions are never called directly, but instead are called via a high level function `_prefix` (e.g. `_visit` for the semantic stage). This strategy avoids large `if`/`elif` blocks to handle all possible types. #### Example Suppose we want to generate the code for an object of the class `NumpyTanh`, first we collect the inheritance tree of `NumpyTanh`. This gives us: @@ -60,7 +60,10 @@ In the case of `NumpyTanh` the function which will be selected is `_print_NumpyU ### AST -The objects as understood by pyccel are each described by classes which inherit from [pyccel.ast.basic.Basic](../pyccel/ast/basic.py). These classes are found in the [ast](../pyccel/ast) folder. The ast is split into several files. There is one file for each supported extension module and files to group concepts, e.g. literals/operators/built-in functions +The objects as understood by Pyccel are each described by classes which inherit from [pyccel.ast.basic.Basic](../pyccel/ast/basic.py). +These classes are found in the [ast](../pyccel/ast) folder. +The objects in the Abstract Syntax Tree (AST) are described in several files. +There is one file for each supported extension module and files to group concepts, e.g. literals/operators/built-in functions ## Error System @@ -73,4 +76,4 @@ If the error prevents further translation (e.g. the type of an object is now unk ## Getting Help -While discussions within the associated Github issue are often sufficient, should you require more help do not hesitate to ask one of the other developers to add you to our slack: pyccel.slack.com +While discussions within the associated GitHub issue are often sufficient, should you require more help do not hesitate to ask one of the other developers to add you to our slack: diff --git a/developer_docs/review_process.md b/developer_docs/review_process.md index ca6b767583..a1e71cd85c 100644 --- a/developer_docs/review_process.md +++ b/developer_docs/review_process.md @@ -7,13 +7,13 @@ When you believe your branch is ready to merge you should create a pull request. Once the pull request is opened 4 tests should be triggered they are: - **Linux** : Runs the suite of tests on a linux machine -- **MacOS** : Runs the suite of tests on a mac os machine +- **MacOS** : Runs the suite of tests on a macOS machine - **Windows** : Runs the suite of tests on a windows machine - **Codacy** : Runs a static compiler via the [codacy](https://app.codacy.com/gh/pyccel/pyccel/dashboard) platform -Once the pull request is open the tests will be triggered every time you push new changes to the branch. Please be mindful of this and try to avoid pushing multiple times in a row to save compute resources. If you do find you need to push repeatedly, don't hesitate to cancel concurrent jobs using the github "Actions" tab. +Once the pull request is open the tests will be triggered every time you push new changes to the branch. Please be mindful of this and try to avoid pushing multiple times in a row to save compute resources. If you do find you need to push repeatedly, don't hesitate to cancel concurrent jobs using the GitHub "Actions" tab. -When the pull request is ready for review (ie. you are happy with it, and it is passing all tests) it can be marked as such and the review process can begin. This process is split into 3 stages which each have an associated label. The labels are described in the next sections. When a reviewer marks a PR as accepted, they should change the label to indicate the next stage of the review process. If they request changes they should remove the label so the pull request owner can react. +When the pull request is ready for review (i.e. you are happy with it, and it is passing all tests) it can be marked as such and the review process can begin. This process is split into 3 stages which each have an associated label. The labels are described in the next sections. When a reviewer marks a PR as accepted, they should change the label to indicate the next stage of the review process. If they request changes they should remove the label so the pull request owner can react. Once your pull request has been reviewed please react to the open conversations. If you disagree you can say this, if not please leave a reference to the commit which fixes the mentioned issue. This makes the review process faster and more streamlined. Please only resolve conversations that you opened. You may think you fixed the problem, but the reviewer may disagree and leaving the discussion open makes it easier for them to verify that they agree with you. If you are reviewing then please close all conversations that you open once the problem is resolved. If you don't this can block the merge. diff --git a/developer_docs/scope.md b/developer_docs/scope.md index 23d2031ab8..134dc1bbdd 100644 --- a/developer_docs/scope.md +++ b/developer_docs/scope.md @@ -6,7 +6,7 @@ In Pyccel, a `Scope` is an object defined in [parser/scope.py](../pyccel/parser/ Each of these objects must be inserted into the scope using and insert function. -## ScopedNode +## `ScopedNode` Each scope is associated with a class, e.g. `FunctionDef`, `For`, `Module`. These classes inherit from the `ScopedNode` class. The scope associated with the class instance, is saved within the class. This makes the scope available when the class instance is available. This is important so as to correctly set the `scope` variable in the `SemanticParser` and the different `CodePrinter`s. @@ -16,7 +16,7 @@ The `Scope` object keeps track of all names used in the described scope. This me 1. In the syntactic stage all symbols in the code must be added to the correct scope. This is done via the function `insert_symbol` -2. All names of variables created by pyccel must be created using one of the following functions defined in the Scope class: +2. All names of variables created by Pyccel must be created using one of the following functions defined in the Scope class: - `get_expected_name` : Collect the name which will be used to create the Variable referenced in the argument. In most cases this operation will be the identity operation, but it ensures that name collisions are handled and that the Symbol has been correctly inserted into the Scope - `get_new_name` : Get a new name with no collisions. A name can be requested and will be used if available - `get_new_incremented_symbol` : Get a new name with no collisions following a pattern. This function keeps track of the index appended to the incremented string so it is most useful when creating multiple names with the same prefix diff --git a/pyccel/ast/basic.py b/pyccel/ast/basic.py index a703d665da..b1fd29ec54 100644 --- a/pyccel/ast/basic.py +++ b/pyccel/ast/basic.py @@ -11,7 +11,7 @@ """ import ast -__all__ = ('Basic', 'PyccelAstNode') +__all__ = ('Basic', 'Immutable', 'PyccelAstNode', 'ScopedNode') dict_keys = type({}.keys()) dict_values = type({}.values()) diff --git a/pyccel/ast/bind_c.py b/pyccel/ast/bind_c.py index ff8cdcd65f..d857a9afa6 100644 --- a/pyccel/ast/bind_c.py +++ b/pyccel/ast/bind_c.py @@ -20,9 +20,9 @@ 'BindCFunctionDef', 'BindCPointer', 'CLocFunc', - 'as_static_module', 'as_static_function', 'as_static_function_call', + 'as_static_module', 'sanitize_arguments', 'wrap_array', 'wrap_module_array_var', diff --git a/pyccel/ast/bitwise_operators.py b/pyccel/ast/bitwise_operators.py index 6e69bdf50b..fc59dc6d73 100644 --- a/pyccel/ast/bitwise_operators.py +++ b/pyccel/ast/bitwise_operators.py @@ -15,12 +15,14 @@ from .operators import PyccelUnaryOperator, PyccelOperator __all__ = ( - 'PyccelRShift', - 'PyccelLShift', - 'PyccelBitXor', - 'PyccelBitOr', + 'PyccelBitComparisonOperator', + 'PyccelBitOperator', 'PyccelBitAnd', + 'PyccelBitOr', + 'PyccelBitXor', 'PyccelInvert', + 'PyccelLShift', + 'PyccelRShift', ) #============================================================================== diff --git a/pyccel/ast/builtin_imports.py b/pyccel/ast/builtin_imports.py index 885a1659b4..afddd7a97d 100644 --- a/pyccel/ast/builtin_imports.py +++ b/pyccel/ast/builtin_imports.py @@ -2,6 +2,8 @@ File containing a set of all python standard libraries from python 3.6 - 3.9 (from python 3.10 there is a function in sys for this purpose) """ +__all__ = ('python_builtin_libs',) + python_builtin_libs = { "_dummy_thread", "_thread", diff --git a/pyccel/ast/builtins.py b/pyccel/ast/builtins.py index 9e078b7bdc..09d9f60f34 100644 --- a/pyccel/ast/builtins.py +++ b/pyccel/ast/builtins.py @@ -29,6 +29,9 @@ pyccel_stage = PyccelStage() __all__ = ( + 'Lambda', + 'PythonAbs', + 'PythonComplexProperty', 'PythonReal', 'PythonImag', 'PythonConjugate', @@ -43,6 +46,7 @@ 'PythonMap', 'PythonPrint', 'PythonRange', + 'PythonSum', 'PythonType', 'PythonZip', 'PythonMax', @@ -61,6 +65,7 @@ class PythonComplexProperty(PyccelInternalFunction): arg : Variable, Literal """ + __slots__ = () _dtype = NativeFloat() _precision = -1 _rank = 0 @@ -132,6 +137,7 @@ class PythonConjugate(PyccelInternalFunction): arg : Variable, Literal """ + __slots__ = () _dtype = NativeComplex() _precision = -1 _rank = 0 @@ -588,7 +594,8 @@ class PythonPrint(Basic): expr : PyccelAstNode The expression to print - + file: String (Optional) + Select 'stdout' (default) or 'stderr' to print to Examples >>> from pyccel.ast.internals import symbols @@ -597,18 +604,27 @@ class PythonPrint(Basic): >>> Print(('results', n,m)) Print((results, n, m)) """ - __slots__ = ('_expr') + __slots__ = ('_expr', '_file') _attribute_nodes = ('_expr',) name = 'print' - def __init__(self, expr): + def __init__(self, expr, file="stdout"): + if file not in ('stdout', 'stderr'): + raise ValueError('output_unit can be `stdout` or `stderr`') self._expr = expr + self._file = file super().__init__() @property def expr(self): return self._expr + @property + def file(self): + """ returns the output unit (`stdout` or `stderr`) + """ + return self._file + #============================================================================== class PythonRange(Basic): @@ -673,8 +689,7 @@ class PythonZip(PyccelInternalFunction): Represents a zip stmt. """ - __slots__ = ('_length','_args') - _attribute_nodes = ('_args',) + __slots__ = ('_length',) name = 'zip' def __init__(self, *args): @@ -886,9 +901,12 @@ def print_string(self): can be used in a print statement """ prec = self.precision - return LiteralString("".format( - dtype = str(self.dtype), - precision = '' if prec in (None, -1) else (prec * (16 if self.dtype is NativeComplex() else 8)))) + dtype = str(self.dtype) + if prec in (None, -1): + return LiteralString(f"") + else: + precision = prec * (16 if self.dtype is NativeComplex() else 8) + return LiteralString(f"") #============================================================================== python_builtin_datatypes_dict = { diff --git a/pyccel/ast/c_concepts.py b/pyccel/ast/c_concepts.py index b00c110117..0e5f807f42 100644 --- a/pyccel/ast/c_concepts.py +++ b/pyccel/ast/c_concepts.py @@ -7,7 +7,12 @@ Module representing object address. """ -from .basic import PyccelAstNode +from .basic import PyccelAstNode, Basic +from .literals import LiteralString + +__all__ = ('CMacro', + 'CStringExpression', + 'ObjectAddress') class ObjectAddress(PyccelAstNode): """Represents the address of an object. @@ -34,3 +39,175 @@ def obj(self): """The object whose address is of interest """ return self._obj + +#------------------------------------------------------------------------------ +class CStringExpression(Basic): + """ + Internal class used to hold a C string that has LiteralStrings and C macros. + + Parameters + ---------- + *args : str / LiteralString / CMacro / CStringExpression + any number of arguments to be added to the expression + note: they will get added in the order provided + + Example + ------ + >>> expr = CStringExpression( + ... CMacro("m"), + ... CStringExpression( + ... LiteralString("the macro is: "), + ... CMacro("mc") + ... ), + ... LiteralString("."), + ... ) + """ + __slots__ = ('_expression',) + _attribute_nodes = ('_expression',) + + def __init__(self, *args): + self._expression = [] + super().__init__() + for arg in args: + self.append(arg) + + def __repr__(self): + return ''.join(repr(e) for e in self._expression) + + def __str__(self): + return ''.join(str(e) for e in self._expression) + + def __add__(self, o): + """ + return new CStringExpression that has `o` at the end + + Parameter + ---------- + o : str / LiteralString / CMacro / CStringExpression + the expression to add + """ + if isinstance(o, str): + o = LiteralString(o) + if not isinstance(o, (LiteralString, CMacro, CStringExpression)): + raise TypeError(f"unsupported operand type(s) for +: '{self.__class__}' and '{type(o)}'") + return CStringExpression(*self._expression, o) + + def __radd__(self, o): + if isinstance(o, LiteralString): + return CStringExpression(o, self) + return NotImplemented + + def __iadd__(self, o): + self.append(o) + return self + + def append(self, o): + """ + append the argument `o` to the end of the list _expression + + Parameter + --------- + o : str / LiteralString / CMacro / CStringExpression + the expression to append + """ + if isinstance(o, str): + o = LiteralString(o) + if not isinstance(o, (LiteralString, CMacro, CStringExpression)): + raise TypeError(f"unsupported operand type(s) for append: '{self.__class__}' and '{type(o)}'") + self._expression += (o,) + o.set_current_user_node(self) + + def join(self, lst): + """ + insert self between each element of the list `lst` + + Parameter + --------- + lst : list + the list to insert self between its elements + + Example + ------- + >>> a = [ + ... CMacro("m"), + ... CStringExpression(LiteralString("the macro is: ")), + ... LiteralString("."), + ... ] + >>> b = CStringExpression("?").join(a) + ... + ... # is the same as: + ... + >>> b = CStringExpression( + ... CMacro("m"), + ... CStringExpression("?"), + ... CStringExpression(LiteralString("the macro is: ")), + CStringExpression("?"), + ... LiteralString("."), + ... ) + """ + result = CStringExpression() + if not lst: + return result + result += lst[0] + for elm in lst[1:]: + result += self + result += elm + return result + + def get_flat_expression_list(self): + """ + returns a list of LiteralStrings and CMacros after merging every + consecutive LiteralString + """ + tmp_res = [] + for e in self.expression: + if isinstance(e, CStringExpression): + tmp_res.extend(e.get_flat_expression_list()) + else: + tmp_res.append(e) + if not tmp_res: + return [] + result = [tmp_res[0]] + for e in tmp_res[1:]: + if isinstance(e, LiteralString) and isinstance(result[-1], LiteralString): + result[-1] += e + else: + result.append(e) + return result + + @property + def expression(self): + """ The list containing the literal strings and c macros + """ + return self._expression + +#------------------------------------------------------------------------------ +class CMacro(Basic): + """Represents a c macro""" + __slots__ = ('_macro',) + _attribute_nodes = () + + def __init__(self, arg): + super().__init__() + if not isinstance(arg, str): + raise TypeError('arg must be of type str') + self._macro = arg + + def __repr__(self): + return str(self._macro) + + def __add__(self, o): + if isinstance(o, (LiteralString, CStringExpression)): + return CStringExpression(self, o) + return NotImplemented + + def __radd__(self, o): + if isinstance(o, LiteralString): + return CStringExpression(o, self) + return NotImplemented + + @property + def macro(self): + """ The string containing macro name + """ + return self._macro diff --git a/pyccel/ast/core.py b/pyccel/ast/core.py index 92ccc80c1f..a297d5aea2 100644 --- a/pyccel/ast/core.py +++ b/pyccel/ast/core.py @@ -50,29 +50,40 @@ 'CodeBlock', 'Comment', 'CommentBlock', + 'Concatenate', 'ConstructorCall', 'Continue', 'Deallocate', 'Declare', + 'Decorator', 'Del', + 'DottedFunctionCall', 'Duplicate', 'DoConcurrent', 'EmptyNode', + 'ErrorExit', + 'Exit', 'For', 'ForIterator', + 'FuncAddressDeclare', + 'FunctionAddress', 'FunctionCall', 'FunctionCallArgument', 'FunctionDef', 'FunctionDefArgument', 'If', + 'IfSection', 'Import', 'InlineFunctionDef', 'InProgram', 'Interface', + 'Iterable', 'Module', 'ModuleHeader', 'Pass', 'Program', + 'PyccelFunctionDef', + 'Raise', 'Return', 'SeparatorComment', 'StarredArguments', @@ -2735,6 +2746,7 @@ class PyccelFunctionDef(FunctionDef): The class which should be instantiated upon a FunctionCall to this FunctionDef object """ + __slots__ = () def __init__(self, name, func_class): assert isinstance(func_class, type) and \ issubclass(func_class, (PyccelInternalFunction, PyccelAstNode)) @@ -3987,6 +3999,7 @@ class Decorator(Basic): The name of the decorator """ __slots__ = ('_name',) + _attribute_nodes = () def __init__(self, name): self._name = name diff --git a/pyccel/ast/cwrapper.py b/pyccel/ast/cwrapper.py index b00d5f72fd..d71845897b 100644 --- a/pyccel/ast/cwrapper.py +++ b/pyccel/ast/cwrapper.py @@ -38,6 +38,7 @@ 'PyArgKeywords', 'PyArg_ParseTupleNode', 'PyBuildValueNode', + 'PyModule_AddObject', #--------- CONSTANTS ---------- 'Py_True', 'Py_False', diff --git a/pyccel/ast/datatypes.py b/pyccel/ast/datatypes.py index 581f60cf56..6d17610a8a 100644 --- a/pyccel/ast/datatypes.py +++ b/pyccel/ast/datatypes.py @@ -27,7 +27,7 @@ 'NativeGeneric', 'NativeInteger', 'NativeTuple', -# 'NativeNil', + 'NativeNil', 'NativeRange', 'NativeFloat', 'NativeString', @@ -193,6 +193,7 @@ def __init__(self, name='__UNDEFINED__'): self._name = name class NativeGeneric(DataType): + __slots__ = () _name = 'Generic' diff --git a/pyccel/ast/headers.py b/pyccel/ast/headers.py index 09bddba117..61051e099f 100644 --- a/pyccel/ast/headers.py +++ b/pyccel/ast/headers.py @@ -10,7 +10,7 @@ from .basic import Basic, iterable from .core import Assign, FunctionCallArgument from .core import FunctionDef, FunctionCall, FunctionAddress -from .datatypes import datatype, DataTypeFactory, UnionType +from .datatypes import datatype, DataTypeFactory, UnionType, default_precision from .internals import PyccelSymbol, Slice from .macros import Macro, MacroShape, construct_macro from .variable import DottedName, DottedVariable @@ -25,6 +25,7 @@ 'MacroVariable', 'MetaVariable', 'MethodHeader', + 'Template', 'VariableHeader', ) @@ -284,6 +285,10 @@ def build_argument(var_name, dc): order = None shape = None + + if rank and precision == -1: + precision = default_precision[dtype] + if rank >1: order = dc['order'] diff --git a/pyccel/ast/internals.py b/pyccel/ast/internals.py index caac5ef444..b7e6df0587 100644 --- a/pyccel/ast/internals.py +++ b/pyccel/ast/internals.py @@ -8,6 +8,7 @@ To avoid circular imports this file should only import from basic, datatypes, and literals """ +from operator import attrgetter from pyccel.utilities.stage import PyccelStage from .basic import Basic, PyccelAstNode, Immutable @@ -22,8 +23,8 @@ 'PyccelInternalFunction', 'PyccelSymbol', 'Slice', + 'get_final_precision', 'max_precision', - 'get_final_precision' ) @@ -292,8 +293,10 @@ def max_precision(objs : list, dtype = None, allow_native = True): return max(def_prec if o.precision == -1 \ else o.precision for o in objs if o.dtype is dtype) else: - return max(default_precision[str(o.dtype)] if o.precision == -1 \ - else o.precision for o in objs) + ndarray_list = [o for o in objs if getattr(o, 'is_ndarray', False)] + if ndarray_list: + return get_final_precision(max(ndarray_list, key=attrgetter('precision'))) + return max(get_final_precision(o) for o in objs) def get_final_precision(obj): """ diff --git a/pyccel/ast/itertoolsext.py b/pyccel/ast/itertoolsext.py index faea896c45..24c6914890 100644 --- a/pyccel/ast/itertoolsext.py +++ b/pyccel/ast/itertoolsext.py @@ -10,8 +10,8 @@ from .internals import PyccelInternalFunction __all__ = ( - 'itertools_mod', 'Product', + 'itertools_mod', ) class Product(PyccelInternalFunction): diff --git a/pyccel/ast/literals.py b/pyccel/ast/literals.py index 39f01be2a6..0a3a6ad5d1 100644 --- a/pyccel/ast/literals.py +++ b/pyccel/ast/literals.py @@ -11,6 +11,7 @@ NativeComplex, NativeString) __all__ = ( + 'Literal', 'LiteralTrue', 'LiteralFalse', 'LiteralInteger', @@ -149,8 +150,8 @@ def __new__(cls, real, imag, precision = -1): def __init__(self, real, imag, precision = -1): super().__init__(precision) - self._real_part = LiteralFloat(self._collect_python_val(real)) - self._imag_part = LiteralFloat(self._collect_python_val(imag)) + self._real_part = LiteralFloat(self._collect_python_val(real), precision = precision) + self._imag_part = LiteralFloat(self._collect_python_val(imag), precision = precision) @staticmethod def _collect_python_val(arg): @@ -213,6 +214,11 @@ def __repr__(self): def __str__(self): return str(self.python_value) + def __add__(self, o): + if isinstance(o, LiteralString): + return LiteralString(self._string + o._string) + return NotImplemented + @property def python_value(self): return self.arg diff --git a/pyccel/ast/numpyext.py b/pyccel/ast/numpyext.py index e79dba32d7..70f97ba63f 100644 --- a/pyccel/ast/numpyext.py +++ b/pyccel/ast/numpyext.py @@ -32,11 +32,11 @@ from .internals import PyccelInternalFunction, Slice, max_precision, get_final_precision from .internals import PyccelArraySize -from .literals import LiteralInteger, LiteralFloat, LiteralComplex, convert_to_literal +from .literals import LiteralInteger, LiteralFloat, LiteralComplex, LiteralString, convert_to_literal from .literals import LiteralTrue, LiteralFalse from .literals import Nil from .mathext import MathCeil -from .operators import broadcast, PyccelMinus, PyccelDiv +from .operators import broadcast, PyccelMinus, PyccelDiv, PyccelMul, PyccelAdd from .variable import (Variable, Constant, HomogeneousTupleVariable) errors = Errors() @@ -45,8 +45,14 @@ __all__ = ( 'process_shape', # --- + 'NumpyAutoFill', + 'NumpyUfuncBase', + 'NumpyUfuncBinary', + 'NumpyUfuncUnary', + # --- 'NumpyAbs', 'NumpyFloor', + 'NumpySign', # --- 'NumpySqrt', 'NumpySin', @@ -65,18 +71,27 @@ 'NumpyArccosh', 'NumpyArctanh', # --- - 'NumpyEmpty', - 'NumpyEmptyLike', - 'NumpyFloat', + 'NumpyAmax', + 'NumpyAmin', + 'NumpyArange', + 'NumpyArray', + 'NumpyArraySize', + 'NumpyBool', + 'NumpyCountNonZero', 'NumpyComplex', 'NumpyComplex64', 'NumpyComplex128', + 'NumpyConjugate', + 'NumpyEmpty', + 'NumpyEmptyLike', + 'NumpyFabs', + 'NumpyFloat', 'NumpyFloat32', 'NumpyFloat64', 'NumpyFull', 'NumpyFullLike', 'NumpyImag', - 'NumpyBool', + 'NumpyHypot', 'NumpyInt', 'NumpyInt8', 'NumpyInt16', @@ -84,11 +99,7 @@ 'NumpyInt64', 'NumpyLinspace', 'NumpyMatmul', - 'NumpyAmax', - 'NumpyAmin', - 'NumpyArange', - 'NumpyArraySize', - 'NumpyCountNonZero', + 'NumpyNewArray', 'NumpyMod', 'NumpyNonZero', 'NumpyNonZeroElement', @@ -100,10 +111,11 @@ 'NumpyRand', 'NumpyRandint', 'NumpyReal', - 'Shape', + 'NumpyTranspose', 'NumpyWhere', 'NumpyZeros', 'NumpyZerosLike', + 'Shape', ) #======================================================================================= @@ -188,12 +200,14 @@ def __init__(self, arg=None, base=10): class NumpyInt8(NumpyInt): """ Represents a call to numpy.int8() function. """ + __slots__ = () _precision = dtype_registry['int8'][1] name = 'int8' class NumpyInt16(NumpyInt): """ Represents a call to numpy.int16() function. """ + __slots__ = () _precision = dtype_registry['int16'][1] name = 'int16' @@ -253,10 +267,12 @@ class NumpyImag(PythonImag): __slots__ = ('_precision','_rank','_shape','_order') name = 'imag' def __new__(cls, arg): + if not isinstance(arg.dtype, NativeComplex): - dtype=NativeInteger() if isinstance(arg.dtype, NativeBool) else arg.dtype + dtype = NativeInteger() if isinstance(arg.dtype, NativeBool) else arg.dtype if arg.rank == 0: return convert_to_literal(0, dtype, arg.precision) + dtype = DtypePrecisionToCastFunction[dtype.name][arg.precision] return NumpyZeros(arg.shape, dtype=dtype) return super().__new__(cls, arg) @@ -318,8 +334,7 @@ class NumpyComplex128(NumpyComplex): 'Complex' : { -1 : PythonComplex, 4 : NumpyComplex64, - 8 : NumpyComplex, - 16 : NumpyComplex128,}, + 8 : NumpyComplex128,}, 'Bool': { -1 : PythonBool, 4 : NumpyBool} @@ -328,24 +343,54 @@ class NumpyComplex128(NumpyComplex): #============================================================================== def process_dtype(dtype): + """ + This function takes a dtype passed to a numpy array creation function, + processes it in different ways depending on its type, and finally extracts + the corresponding type and precision from the `dtype_registry` dictionary. + + This function could be useful when working with numpy creation function + having a dtype argument, like numpy.array, numpy.arrange, numpy.linspace... + + Parameters + ---------- + dtype: PythonType | PyccelFunctionDef | String + The actual dtype passed to the numpy function + + Raises + ------ + TypeError: In the case of unrecognized argument type. + TypeError: In the case of passed string argument not recognized as valid dtype. + + Returns: + ---------- + dtype: Datatype + The Datatype corresponding to the passed dtype. + precision: int + The precision corresponding to the passed dtype. + """ + if isinstance(dtype, PythonType): return dtype.dtype, get_final_precision(dtype) if isinstance(dtype, PyccelFunctionDef): dtype = dtype.cls_name - if dtype in (PythonInt, PythonFloat, PythonComplex, PythonBool): + if dtype in (PythonInt, PythonFloat, PythonComplex, PythonBool): # remove python prefix from dtype.name len("python") = 6 dtype = dtype.__name__.lower()[6:] - elif dtype in (NumpyInt, NumpyInt8, NumpyInt16, NumpyInt32, NumpyInt64, NumpyComplex, NumpyFloat, + elif dtype in (NumpyInt, NumpyInt8, NumpyInt16, NumpyInt32, NumpyInt64, NumpyComplex, NumpyFloat, NumpyComplex128, NumpyComplex64, NumpyFloat64, NumpyFloat32): # remove numpy prefix from dtype.name len("numpy") = 5 dtype = dtype.__name__.lower()[5:] + elif isinstance(dtype, (LiteralString, str)): + dtype = str(dtype).replace('\'', '').lower() + if dtype not in dtype_registry: + raise TypeError(f'Unknown type of {dtype}.') else: - dtype = str(dtype).replace('\'', '').lower() + raise TypeError(f'Unknown type of {dtype}.') dtype, precision = dtype_registry[dtype] if precision == -1: - precision = default_precision[dtype] - dtype = datatype(dtype) + precision = default_precision[dtype] + dtype = datatype(dtype) return dtype, precision @@ -398,7 +443,11 @@ def __init__(self, arg, dtype=None, order='C'): # Verify dtype and get precision if dtype is None: dtype = arg.dtype - dtype, prec = process_dtype(dtype) + prec = get_final_precision(arg) + else: + dtype, prec = process_dtype(dtype) + # ... Determine ordering + order = str(order).strip("\'") shape = process_shape(False, arg.shape) rank = len(shape) @@ -494,6 +543,9 @@ def stop(self): def step(self): return self._step + def __getitem__(self, index): + step = PyccelMul(index, self.step, simplify=True) + return PyccelAdd(self.start, step, simplify=True) #============================================================================== class NumpySum(PyccelInternalFunction): @@ -931,11 +983,11 @@ def __init__(self, shape, fill_value, dtype=None, order='C'): # If there is no dtype, extract it from fill_value # TODO: must get dtype from an annotated node - if not dtype: + if dtype is None: dtype = fill_value.dtype - - # Verify dtype and get precision - dtype, precision = process_dtype(dtype) + precision = get_final_precision(fill_value) + else: + dtype, precision = process_dtype(dtype) # Cast fill_value to correct type if fill_value: @@ -964,7 +1016,6 @@ class NumpyAutoFill(NumpyFull): def __init__(self, shape, dtype='float', order='C'): if not dtype: raise TypeError("Data type must be provided") - super().__init__(shape, Nil(), dtype, order) #============================================================================== @@ -973,6 +1024,12 @@ class NumpyEmpty(NumpyAutoFill): """ __slots__ = () name = 'empty' + + def __init__(self, shape, dtype='float', order='C'): + if dtype in NativeNumeric: + precision = default_precision[str_dtype(dtype)] + dtype = DtypePrecisionToCastFunction[dtype.name][precision] + super().__init__(shape, dtype, order) @property def fill_value(self): return None @@ -1029,7 +1086,8 @@ class NumpyFullLike(PyccelInternalFunction): def __new__(cls, a, fill_value, dtype=None, order='K', subok=True, shape=None): # NOTE: we ignore 'subok' argument - dtype = dtype or a.dtype + if dtype is None: + dtype = DtypePrecisionToCastFunction[a.dtype.name][a.precision] order = a.order if str(order).strip('\'"') in ('K', 'A') else order shape = Shape(a) if shape is None else shape return NumpyFull(shape, fill_value, dtype, order) @@ -1043,7 +1101,8 @@ class NumpyEmptyLike(PyccelInternalFunction): def __new__(cls, a, dtype=None, order='K', subok=True, shape=None): # NOTE: we ignore 'subok' argument - dtype = dtype or a.dtype + if dtype is None: + dtype = DtypePrecisionToCastFunction[a.dtype.name][a.precision] order = a.order if str(order).strip('\'"') in ('K', 'A') else order shape = Shape(a) if shape is None else shape @@ -1058,7 +1117,8 @@ class NumpyOnesLike(PyccelInternalFunction): def __new__(cls, a, dtype=None, order='K', subok=True, shape=None): # NOTE: we ignore 'subok' argument - dtype = dtype or a.dtype + if dtype is None: + dtype = DtypePrecisionToCastFunction[a.dtype.name][a.precision] order = a.order if str(order).strip('\'"') in ('K', 'A') else order shape = Shape(a) if shape is None else shape @@ -1073,7 +1133,8 @@ class NumpyZerosLike(PyccelInternalFunction): def __new__(cls, a, dtype=None, order='K', subok=True, shape=None): # NOTE: we ignore 'subok' argument - dtype = dtype or a.dtype + if dtype is None: + dtype = DtypePrecisionToCastFunction[a.dtype.name][a.precision] order = a.order if str(order).strip('\'"') in ('K', 'A') else order shape = Shape(a) if shape is None else shape @@ -1277,13 +1338,24 @@ class NumpyArctanh(NumpyUfuncUnary): #======================================================================================= + +class NumpySign(NumpyUfuncUnary): + """Represent a call to the sign function in the Numpy library""" + __slots__ = () + name = 'sign' + def _set_dtype_precision(self, x): + if not isinstance(x.dtype, (NativeInteger, NativeFloat, NativeComplex)): + raise TypeError(f'{x.dtype} not supported') + self._dtype = x.dtype + self._precision = get_final_precision(x) + class NumpyAbs(NumpyUfuncUnary): """Represent a call to the abs function in the Numpy library""" __slots__ = () name = 'abs' def _set_dtype_precision(self, x): self._dtype = NativeInteger() if x.dtype is NativeInteger() else NativeFloat() - self._precision = default_precision[str_dtype(self._dtype)] + self._precision = get_final_precision(x) class NumpyFloor(NumpyUfuncUnary): """Represent a call to the floor function in the Numpy library""" @@ -1707,6 +1779,7 @@ def __str__(self): 'linspace' : PyccelFunctionDef('linspace' , NumpyLinspace), 'where' : PyccelFunctionDef('where' , NumpyWhere), # --- + 'sign' : PyccelFunctionDef('sign' , NumpySign), 'abs' : PyccelFunctionDef('abs' , NumpyAbs), 'floor' : PyccelFunctionDef('floor' , NumpyFloor), 'absolute' : PyccelFunctionDef('absolute' , NumpyAbs), diff --git a/pyccel/ast/omp.py b/pyccel/ast/omp.py index 9c7df0ce02..0c61d9e721 100644 --- a/pyccel/ast/omp.py +++ b/pyccel/ast/omp.py @@ -12,6 +12,24 @@ from .basic import Basic +__all__ = ('OmpAnnotatedComment', + 'OMP_For_Loop', + 'OMP_Simd_Construct', + 'OMP_TaskLoop_Construct', + 'OMP_Distribute_Construct', + 'OMP_Parallel_Construct', + 'OMP_Task_Construct', + 'OMP_Single_Construct', + 'OMP_Critical_Construct', + 'OMP_Master_Construct', + 'OMP_Masked_Construct', + 'OMP_Cancel_Construct', + 'OMP_Target_Construct', + 'OMP_Teams_Construct', + 'OMP_Sections_Construct', + 'OMP_Section_Construct', + 'Omp_End_Clause') + class OmpAnnotatedComment(Basic): """Represents an OpenMP Annotated Comment in the code. diff --git a/pyccel/ast/operators.py b/pyccel/ast/operators.py index 6b6d654880..d863a85d00 100644 --- a/pyccel/ast/operators.py +++ b/pyccel/ast/operators.py @@ -30,6 +30,11 @@ __all__ = ( 'PyccelOperator', + 'PyccelArithmeticOperator', + 'PyccelBinaryOperator', + 'PyccelBooleanOperator', + 'PyccelComparisonOperator', + 'PyccelUnaryOperator', 'PyccelPow', 'PyccelAdd', 'PyccelMinus', @@ -390,14 +395,14 @@ def _calculate_dtype(cls, *args): strs = [a for a in args if a.dtype is NativeString()] if strs: - return cls._handle_str_type(strs) assert len(integers + floats + complexes) == 0 + return cls._handle_str_type(strs) elif complexes: - return cls._handle_complex_type(complexes) + return cls._handle_complex_type(args) elif floats: - return cls._handle_float_type(floats) + return cls._handle_float_type(args) elif integers: - return cls._handle_integer_type(integers) + return cls._handle_integer_type(args) else: raise TypeError('cannot determine the type of {}'.format(args)) diff --git a/pyccel/ast/scipyext.py b/pyccel/ast/scipyext.py index 115ced880c..8bbe7adf89 100644 --- a/pyccel/ast/scipyext.py +++ b/pyccel/ast/scipyext.py @@ -10,6 +10,8 @@ from .core import Module, Import from .variable import Constant +__all__ = ('scipy_mod', 'scipy_pi_const') + scipy_pi_const = Constant('float', 'pi', value=pi) scipy_mod = Module('scipy', diff --git a/pyccel/ast/sympy_helper.py b/pyccel/ast/sympy_helper.py index dd84de94e2..8e3ea31992 100644 --- a/pyccel/ast/sympy_helper.py +++ b/pyccel/ast/sympy_helper.py @@ -27,6 +27,9 @@ from .variable import Variable, PyccelArraySize +__all__ = ('sympy_to_pyccel', + 'pyccel_to_sympy') + #============================================================================== def sympy_to_pyccel(expr, symbol_map): """ diff --git a/pyccel/ast/sysext.py b/pyccel/ast/sysext.py new file mode 100644 index 0000000000..355ce16152 --- /dev/null +++ b/pyccel/ast/sysext.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- +#------------------------------------------------------------------------------------------# +# This file is part of Pyccel which is released under MIT License. See the LICENSE file or # +# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. # +#------------------------------------------------------------------------------------------# +""" Module containing objects from the sys module understood by pyccel +""" +from .core import PyccelFunctionDef, Module +from .internals import PyccelInternalFunction +from .datatypes import NativeVoid +from .internals import LiteralInteger + +__all__ = ( + 'SysExit', + 'sys_constants', + 'sys_funcs', + 'sys_mod', +) + +class SysExit(PyccelInternalFunction): + """Represents a call to sys.exit + + Parameters + ---------- + + arg : PyccelAstNode (optional) + if arg.dtype is NativeInteger it will be used as the exit_code + else the arg will be printed to the stderror + """ + __slots__ = () + name = 'exit' + _dtype = NativeVoid() + _precision = -1 + _rank = 0 + _shape = None + _order = None + + def __init__(self, status=LiteralInteger(0)): + super().__init__(status) + + @property + def status(self): + """return the arg of exit""" + return self._args[0] + + def __str__(self): + return f'exit({str(self.status)})' + +sys_constants = { + +} + +sys_funcs = { + 'exit' : PyccelFunctionDef('exit', SysExit), +} + +sys_mod = Module('sys', + variables = (sys_constants.values()), + funcs = (sys_funcs.values()), + imports = [ + ]) diff --git a/pyccel/ast/utilities.py b/pyccel/ast/utilities.py index dca7d425c4..9e15e910a7 100644 --- a/pyccel/ast/utilities.py +++ b/pyccel/ast/utilities.py @@ -24,6 +24,7 @@ from .itertoolsext import itertools_mod from .literals import LiteralInteger, Nil from .mathext import math_mod +from .sysext import sys_mod from .numpyext import (NumpyEmpty, NumpyArray, numpy_mod, NumpyTranspose, NumpyLinspace) @@ -37,6 +38,7 @@ errors = Errors() __all__ = ( + 'LoopCollection', 'builtin_function', 'builtin_import', 'builtin_import_registery', @@ -86,7 +88,8 @@ def builtin_function(expr, args=None): Import('scipy', AsName(scipy_mod,'scipy')), Import('itertools', AsName(itertools_mod,'itertools')), Import('math', AsName(math_mod,'math')), - Import('pyccel', AsName(pyccel_mod,'pyccel')) + Import('pyccel', AsName(pyccel_mod,'pyccel')), + Import('sys', AsName(sys_mod,'sys')), ]) if sys.version_info < (3, 10): from .builtin_imports import python_builtin_libs @@ -309,6 +312,9 @@ def insert_index(expr, pos, index_var): return type(expr)(insert_index(expr.args[0], pos, index_var), insert_index(expr.args[1], pos, index_var)) + elif hasattr(expr, '__getitem__'): + return expr[index_var] + else: raise NotImplementedError("Expansion not implemented for type : {}".format(type(expr))) @@ -356,8 +362,8 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul current_level = 0 array_creator_types = (Allocate, PythonList, PythonTuple, Concatenate, Duplicate) is_function_call = lambda f: ((isinstance(f, FunctionCall) and not f.funcdef.is_elemental) - or (isinstance(f, PyccelInternalFunction) and not f.is_elemental - and not isinstance(f, NumpyTranspose))) + or (isinstance(f, PyccelInternalFunction) and not f.is_elemental and not hasattr(f, '__getitem__') + and not isinstance(f, (NumpyTranspose)))) for line in block: if (isinstance(line, Assign) and @@ -400,6 +406,7 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul transposed_vars = [v for v in notable_nodes if isinstance(v, NumpyTranspose)] \ + [v for f in elemental_func_calls \ for v in f.get_attribute_nodes(NumpyTranspose)] + indexed_funcs = [v for v in notable_nodes if isinstance(v, PyccelInternalFunction) and hasattr(v, '__getitem__')] is_checks = [n for n in notable_nodes if isinstance(n, PyccelIs)] @@ -416,7 +423,7 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul funcs = [f for f in notable_nodes+transposed_vars if (isinstance(f, FunctionCall) \ and not f.funcdef.is_elemental)] internal_funcs = [f for f in notable_nodes+transposed_vars if (isinstance(f, PyccelInternalFunction) \ - and not f.is_elemental) \ + and not f.is_elemental and not hasattr(f, '__getitem__')) \ and not isinstance(f, NumpyTranspose)] # Collect all variables for which values other than the value indexed in the loop are important @@ -439,7 +446,7 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul symbol=line, severity='fatal') func_results = [f.funcdef.results[0] for f in funcs] - func_vars2 = [new_index(r.dtype, r) for r in func_results] + func_vars2 = [new_index(r.dtype, r.name) for r in func_results] assigns += [Assign(v, f) for v,f in zip(func_vars2, funcs)] if assigns: @@ -457,7 +464,7 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul rank = line.lhs.rank shape = line.lhs.shape new_vars = variables - new_vars_t = transposed_vars + handled_funcs = transposed_vars + indexed_funcs # Loop over indexes, inserting until the expression can be evaluated # in the desired language new_level = 0 @@ -468,8 +475,8 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul indices.append(new_index('int','i')) index_var = indices[rank+index] new_vars = [insert_index(v, index, index_var) for v in new_vars] - new_vars_t = [insert_index(v, index, index_var) for v in new_vars_t] - if compatible_operation(*new_vars, *new_vars_t, language_has_vectors = language_has_vectors): + handled_funcs = [insert_index(v, index, index_var) for v in handled_funcs] + if compatible_operation(*new_vars, *handled_funcs, language_has_vectors = language_has_vectors): break # TODO [NH]: get all indices when adding axis argument to linspace function @@ -477,10 +484,12 @@ def collect_loops(block, indices, new_index, language_has_vectors = False, resul line.rhs.ind = indices[0] # Replace variable expressions with Indexed versions - line.substitute(variables, new_vars, excluded_nodes = (FunctionCall, PyccelInternalFunction)) - line.substitute(transposed_vars, new_vars_t, excluded_nodes = (FunctionCall)) + line.substitute(variables, new_vars, + excluded_nodes = (FunctionCall, PyccelInternalFunction)) + line.substitute(transposed_vars + indexed_funcs, handled_funcs, + excluded_nodes = (FunctionCall)) _ = [f.substitute(variables, new_vars) for f in elemental_func_calls] - _ = [f.substitute(transposed_vars, new_vars_t) for f in elemental_func_calls] + _ = [f.substitute(transposed_vars + indexed_funcs, handled_funcs) for f in elemental_func_calls] # Recurse through result tree to save line with lines which need # the same set of for loops diff --git a/pyccel/ast/variable.py b/pyccel/ast/variable.py index 611c34d924..502bf3fd1f 100644 --- a/pyccel/ast/variable.py +++ b/pyccel/ast/variable.py @@ -25,9 +25,12 @@ pyccel_stage = PyccelStage() __all__ = ( + 'Constant', 'DottedName', 'DottedVariable', + 'HomogeneousTupleVariable', 'IndexedElement', + 'InhomogeneousTupleVariable', 'TupleVariable', 'Variable' ) diff --git a/pyccel/codegen/printing/ccode.py b/pyccel/codegen/printing/ccode.py index dc6758656f..c299980981 100644 --- a/pyccel/codegen/printing/ccode.py +++ b/pyccel/codegen/printing/ccode.py @@ -47,7 +47,7 @@ from pyccel.ast.variable import DottedName from pyccel.ast.variable import InhomogeneousTupleVariable, HomogeneousTupleVariable -from pyccel.ast.c_concepts import ObjectAddress +from pyccel.ast.c_concepts import ObjectAddress, CMacro, CStringExpression from pyccel.codegen.printing.codeprinter import CodePrinter @@ -66,7 +66,7 @@ # Used in CCodePrinter._print_NumpyUfuncBase(self, expr) numpy_ufunc_to_c_float = { 'NumpyAbs' : 'fabs', - 'NumpyFabs' : 'fabs', + 'NumpyFabs' : 'fabs', 'NumpyMin' : 'minval', 'NumpyMax' : 'maxval', 'NumpyFloor': 'floor', # TODO: might require special treatment with casting @@ -199,6 +199,7 @@ "stdlib", "string", "tgmath", + "inttypes", ) dtype_registry = {('float',8) : 'double', @@ -222,6 +223,17 @@ ('int',1) : 'nd_int8', ('bool',4) : 'nd_bool'} +type_to_format = {('float',8) : '%.12lf', + ('float',4) : '%.12f', + ('complex',8) : '(%.12lf + %.12lfj)', + ('complex',4) : '(%.12f + %.12fj)', + ('int',4) : '%d', + ('int',8) : LiteralString("%") + CMacro('PRId64'), + ('int',2) : LiteralString("%") + CMacro('PRId16'), + ('int',1) : LiteralString("%") + CMacro('PRId8'), + ('bool',4) : '%s', + ('string', 0) : '%s'} + import_dict = {'omp_lib' : 'omp' } c_imports = {n : Import(n, Module(n, (), ())) for n in @@ -234,8 +246,10 @@ 'stdint', 'pyc_math_c', 'stdio', + "inttypes", 'stdbool', - 'assert']} + 'assert', + 'numpy_c']} class CCodePrinter(CodePrinter): """A printer to convert python expressions to strings of c code""" @@ -310,9 +324,6 @@ def copy_NumpyArray_Data(self, expr): declare_dtype = self.find_in_dtype_registry(self._print(rhs.dtype), rhs.precision) dtype = self.find_in_ndarray_type_registry(self._print(rhs.dtype), rhs.precision) arg = rhs.arg if isinstance(rhs, NumpyArray) else rhs - if rhs.rank > 1: - # flattening the args to use them in C initialization. - arg = self._flatten_list(arg) self.add_import(c_imports['string']) if isinstance(arg, Variable): @@ -320,6 +331,9 @@ def copy_NumpyArray_Data(self, expr): cpy_data = "memcpy({0}.{2}, {1}.{2}, {0}.buffer_size);\n".format(lhs, arg, dtype) return '%s' % (cpy_data) else : + if arg.rank > 1: + # flattening the args to use them in C initialization. + arg = self._flatten_list(arg) arg = ', '.join(self._print(i) for i in arg) dummy_array = "%s %s[] = {%s};\n" % (declare_dtype, dummy_array_name, arg) cpy_data = "memcpy({0}.{2}, {1}, {0}.buffer_size);\n".format(self._print(lhs), dummy_array_name, dtype) @@ -387,43 +401,6 @@ def _init_stack_array(self, expr): self.add_import(c_imports['ndarrays']) return buffer_array, array_init - def fill_NumpyArange(self, expr, lhs): - """ print the assignment of a NumpyArange - parameters - ---------- - expr : NumpyArange - The node holding NumpyArange - lhs : Variable - The left hand of Assign - Return - ------ - String - Return string that contains the Assign code and the For loop - responsible for filling the array values - """ - start = self._print(expr.start) - stop = self._print(expr.stop) - step = self._print(expr.step) - dtype = self.find_in_ndarray_type_registry(self._print(expr.dtype), expr.precision) - - target = self.scope.get_temporary_variable(expr.dtype) - index = self.scope.get_temporary_variable(NativeInteger()) - - self._additional_code += self._print(Assign(index, LiteralInteger(0))) - - code = 'for({target} = {start}; {target} {op} {stop}; {target} += {step})' - code += '\n{{\n{lhs}.{dtype}[{index}] = {target};\n' - code += self._print(AugAssign(index, '+', LiteralInteger(1))) + '\n}}' - code = code.format(target = self._print(target), - start = start, - stop = stop, - op = '<' if not isinstance(expr.step, PyccelUnarySub) else '>', - step = step, - index = self._print(index), - lhs = lhs, - dtype = dtype) - return code - def _handle_inline_func_call(self, expr): """ Print a function call to an inline function """ @@ -521,8 +498,16 @@ def _print_PythonMin(self, expr): self.add_import(c_imports['math']) return "fmin({}, {})".format(self._print(arg[0]), self._print(arg[1])) + elif arg.dtype is NativeInteger() and len(arg) == 2: + arg1 = self.scope.get_temporary_variable(NativeInteger()) + arg2 = self.scope.get_temporary_variable(NativeInteger()) + assign1 = Assign(arg1, arg[0]) + assign2 = Assign(arg2, arg[1]) + self._additional_code += self._print(assign1) + self._additional_code += self._print(assign2) + return f"({arg1} < {arg2} ? {arg1} : {arg2})" else: - return errors.report("min in C is only supported for 2 float arguments", symbol=expr, + return errors.report("min in C is only supported for 2 scalar arguments", symbol=expr, severity='fatal') def _print_PythonMax(self, expr): @@ -531,10 +516,28 @@ def _print_PythonMax(self, expr): self.add_import(c_imports['math']) return "fmax({}, {})".format(self._print(arg[0]), self._print(arg[1])) + elif arg.dtype is NativeInteger() and len(arg) == 2: + arg1 = self.scope.get_temporary_variable(NativeInteger()) + arg2 = self.scope.get_temporary_variable(NativeInteger()) + assign1 = Assign(arg1, arg[0]) + assign2 = Assign(arg2, arg[1]) + self._additional_code += self._print(assign1) + self._additional_code += self._print(assign2) + return f"({arg1} > {arg2} ? {arg1} : {arg2})" else: - return errors.report("max in C is only supported for 2 float arguments", symbol=expr, + return errors.report("max in C is only supported for 2 scalar arguments", symbol=expr, severity='fatal') + def _print_SysExit(self, expr): + code = "" + if expr.status.dtype is not NativeInteger() or expr.status.rank > 0: + print_arg = FunctionCallArgument(expr.status) + code = self._print(PythonPrint((print_arg, ), file="stderr")) + arg = "1" + else: + arg = self._print(expr.status) + return f"{code}exit({arg});\n" + def _print_PythonFloat(self, expr): value = self._print(expr.arg) type_name = self.find_in_dtype_registry('float', expr.precision) @@ -553,6 +556,17 @@ def _print_PythonBool(self, expr): def _print_Literal(self, expr): return repr(expr.python_value) + def _print_LiteralInteger(self, expr): + if isinstance(expr, LiteralInteger) and get_final_precision(expr) == 8: + self.add_import(c_imports['stdint']) + return f"INT64_C({repr(expr.python_value)})" + return repr(expr.python_value) + + def _print_LiteralFloat(self, expr): + if isinstance(expr, LiteralFloat) and get_final_precision(expr) == 4: + return f"{repr(expr.python_value)}f" + return repr(expr.python_value) + def _print_LiteralComplex(self, expr): if expr.real == LiteralFloat(0): return self._print(PyccelAssociativeParenthesis(PyccelMul(expr.imag, LiteralImaginaryUnit()))) @@ -793,16 +807,6 @@ def _print_LiteralString(self, expr): return '"{}"'.format(format_str) def get_print_format_and_arg(self, var): - type_to_format = {('float',8) : '%.12lf', - ('float',4) : '%.12f', - ('complex',8) : '(%.12lf + %.12lfj)', - ('complex',4) : '(%.12f + %.12fj)', - ('int',4) : '%d', - ('int',8) : '%ld', - ('int',2) : '%hd', - ('int',1) : '%c', - ('bool',4) : '%s', - ('string', 0) : '%s'} try: arg_format = type_to_format[(self._print(var.dtype), get_final_precision(var))] except KeyError: @@ -815,17 +819,25 @@ def get_print_format_and_arg(self, var): arg = self._print(var) return arg_format, arg + def _print_CStringExpression(self, expr): + return "".join(self._print(e) for e in expr.get_flat_expression_list()) + + def _print_CMacro(self, expr): + return str(expr.macro) + def extract_function_call_results(self, expr): tmp_list = [self.scope.get_temporary_variable(a.dtype) for a in expr.funcdef.results] return tmp_list def _print_PythonPrint(self, expr): self.add_import(c_imports['stdio']) + self.add_import(c_imports['inttypes']) end = '\n' sep = ' ' code = '' empty_end = FunctionCallArgument(LiteralString(''), 'end') space_end = FunctionCallArgument(LiteralString(' '), 'end') + empty_sep = FunctionCallArgument(LiteralString(''), 'sep') kwargs = [f for f in expr.expr if f.has_keyword] for f in kwargs: if f.keyword == 'sep' : sep = str(f.value) @@ -836,17 +848,38 @@ def _print_PythonPrint(self, expr): orig_args = [f for f in expr.expr if not f.has_keyword] def formatted_args_to_printf(args_format, args, end): - args_format = sep.join(args_format) + args_format = CStringExpression(sep).join(args_format) args_format += end - args_format = self._print(LiteralString(args_format)) + args_format = self._print(args_format) args_code = ', '.join([args_format, *args]) - return "printf({});\n".format(args_code) + if expr.file == 'stderr': + return f"fprintf(stderr, {args_code});\n" + return f"printf({args_code});\n" if len(orig_args) == 0: return formatted_args_to_printf(args_format, args, end) + tuple_start = FunctionCallArgument(LiteralString('(')) + tuple_sep = LiteralString(', ') + tuple_end = FunctionCallArgument(LiteralString(')')) + for i, f in enumerate(orig_args): f = f.value + if isinstance(f, (InhomogeneousTupleVariable, PythonTuple)): + if args_format: + code += formatted_args_to_printf(args_format, args, sep) + args_format = [] + args = [] + args = [FunctionCallArgument(print_arg) for tuple_elem in f for print_arg in (tuple_elem, tuple_sep)][:-1] + if len(f) == 1: + args.append(FunctionCallArgument(LiteralString(','))) + if i + 1 == len(orig_args): + end_of_tuple = FunctionCallArgument(LiteralString(end), 'end') + else: + end_of_tuple = FunctionCallArgument(LiteralString(sep), 'end') + code += self._print(PythonPrint([tuple_start, *args, tuple_end, empty_sep, end_of_tuple])) + args = [] + continue if isinstance(f, PythonType): f = f.print_string @@ -857,7 +890,8 @@ def formatted_args_to_printf(args_format, args, end): arg_format, arg = self.get_print_format_and_arg(a) tmp_arg_format_list.append(arg_format) args.append(arg) - args_format.append('({})'.format(', '.join(tmp_arg_format_list))) + tmp_arg_format_list = CStringExpression(', ').join(tmp_arg_format_list) + args_format.append(CStringExpression('(', tmp_arg_format_list, ')')) assign = Assign(tmp_list, f) self._additional_code += self._print(assign) elif f.rank > 0: @@ -872,14 +906,16 @@ def formatted_args_to_printf(args_format, args, end): if f.rank == 1: print_body.append(space_end) - for_body = [PythonPrint(print_body)] + for_body = [PythonPrint(print_body, file=expr.file)] for_scope = self.scope.create_new_loop_scope() for_loop = For(for_index, for_range, for_body, scope=for_scope) for_end = FunctionCallArgument(LiteralString(']'+end if i == len(orig_args)-1 else ']'), keyword='end') - body = CodeBlock([PythonPrint([ FunctionCallArgument(LiteralString('[')), empty_end]), + body = CodeBlock([PythonPrint([ FunctionCallArgument(LiteralString('[')), empty_end], + file=expr.file), for_loop, - PythonPrint([ FunctionCallArgument(f[max_index]), for_end])], + PythonPrint([ FunctionCallArgument(f[max_index]), for_end], + file=expr.file)], unravelled = True) code += self._print(body) else: @@ -1254,6 +1290,40 @@ def _print_NumpyUfuncBase(self, expr): code_args = ', '.join(args) return '{0}({1})'.format(func_name, code_args) + def _print_NumpySign(self, expr): + """ Print the corresponding C function for a call to Numpy.sign + + Parameters + ---------- + expr : Pyccel ast node + Python expression with Numpy.sign call + + Returns + ------- + string + Equivalent internal function in C + + Example + ------- + import numpy + + numpy.sign(x) => isign(x) (x is integer) + numpy.sign(x) => fsign(x) (x if float) + numpy.sign(x) => csign(x) (x is complex) + + """ + self.add_import(c_imports['numpy_c']) + dtype = expr.dtype + func = '' + if isinstance(dtype, NativeInteger): + func = 'isign' + elif isinstance(dtype, NativeFloat): + func = 'fsign' + elif isinstance(dtype, NativeComplex): + func = 'csign' + + return f'{func}({self._print(expr.args[0])})' + def _print_MathFunctionBase(self, expr): """ Convert a Python expression with a math function call to C function call @@ -1546,13 +1616,13 @@ def _print_Return(self, expr): # make sure that stmt contains one assign node. last_assign = last_assign[-1] variables = last_assign.rhs.get_attribute_nodes(Variable) - unneeded_var = not any(b in vars_in_deallocate_nodes for b in variables) + unneeded_var = not any(b in vars_in_deallocate_nodes or b.is_ndarray for b in variables) if unneeded_var: code = ''.join(self._print(a) for a in expr.stmt.body if a is not last_assign) return code + 'return {};\n'.format(self._print(last_assign.rhs)) else: - code = ''+self._print(expr.stmt) last_assign.lhs.is_temp = False + code = self._print(expr.stmt) return code + 'return {0};\n'.format(self._print(args[0])) @@ -1632,10 +1702,17 @@ def _print_PyccelUnarySub(self, expr): return '-{}'.format(self._print(expr.args[0])) def _print_AugAssign(self, expr): - lhs_code = self._print(expr.lhs) op = expr.op - rhs_code = self._print(expr.rhs) - return "{0} {1}= {2};\n".format(lhs_code, op, rhs_code) + lhs = expr.lhs + rhs = expr.rhs + + if op == '%' and isinstance(lhs.dtype, NativeFloat): + _expr = expr.to_basic_assign() + return self._print(_expr) + + lhs_code = self._print(lhs) + rhs_code = self._print(rhs) + return f'{lhs_code} {op}= {rhs_code};\n' def _print_Assign(self, expr): prefix_code = '' @@ -1663,8 +1740,6 @@ def _print_Assign(self, expr): return prefix_code+self.copy_NumpyArray_Data(expr) if isinstance(rhs, (NumpyFull)): return prefix_code+self.arrayFill(expr) - if isinstance(rhs, NumpyArange): - return prefix_code+self.fill_NumpyArange(rhs, lhs) lhs = self._print(expr.lhs) rhs = self._print(expr.rhs) return prefix_code+'{} = {};\n'.format(lhs, rhs) diff --git a/pyccel/codegen/printing/fcode.py b/pyccel/codegen/printing/fcode.py index 93a4320268..c059f1944a 100644 --- a/pyccel/codegen/printing/fcode.py +++ b/pyccel/codegen/printing/fcode.py @@ -33,15 +33,15 @@ If, IfSection, For, Deallocate) from pyccel.ast.variable import (Variable, - IndexedElement, HomogeneousTupleVariable, + IndexedElement, InhomogeneousTupleVariable, DottedName, PyccelArraySize) -from pyccel.ast.operators import PyccelAdd, PyccelMul, PyccelMinus, PyccelNot +from pyccel.ast.operators import PyccelAdd, PyccelMul, PyccelMinus from pyccel.ast.operators import PyccelMod from pyccel.ast.operators import PyccelUnarySub, PyccelLt, PyccelGt, IfTernaryOperator -from pyccel.ast.core import FunctionCall, DottedFunctionCall +from pyccel.ast.core import FunctionCall, DottedFunctionCall, PyccelFunctionDef from pyccel.ast.builtins import (PythonInt, PythonType, PythonPrint, PythonRange, @@ -64,12 +64,13 @@ from pyccel.ast.mathext import math_constants -from pyccel.ast.numpyext import NumpyEmpty +from pyccel.ast.numpyext import NumpyEmpty, NumpyInt32 from pyccel.ast.numpyext import NumpyFloat, NumpyBool from pyccel.ast.numpyext import NumpyReal, NumpyImag from pyccel.ast.numpyext import NumpyRand from pyccel.ast.numpyext import NumpyNewArray from pyccel.ast.numpyext import NumpyNonZero +from pyccel.ast.numpyext import NumpySign from pyccel.ast.numpyext import Shape from pyccel.ast.numpyext import DtypePrecisionToCastFunction @@ -210,7 +211,7 @@ def __init__(self, filename, prefix_module = None): errors.set_target(filename, 'file') super().__init__() - self._constantImports = set() + self._constantImports = {} self._current_class = None self._additional_code = None @@ -220,12 +221,16 @@ def __init__(self, filename, prefix_module = None): def print_constant_imports(self): """Prints the use line for the constant imports used""" - macro = "use, intrinsic :: ISO_C_Binding, only : " - rename = [c if isinstance(c, str) else c[0] + ' => ' + c[1] for c in self._constantImports] - if len(rename) == 0: - return '' - macro += " , ".join(rename) - return macro + macros = [] + for (name, imports) in self._constantImports.items(): + + macro = f"use, intrinsic :: {name}, only : " + rename = [c if isinstance(c, str) else c[0] + ' => ' + c[1] for c in imports] + if len(rename) == 0: + continue + macro += " , ".join(rename) + macros.append(macro) + return "\n".join(macros) def get_additional_imports(self): """return the additional modules collected for importing in printing stage""" @@ -280,10 +285,12 @@ def print_kind(self, expr): constant_name = iso_c_binding[self._print(expr.dtype)][precision] constant_shortcut = iso_c_binding_shortcut_mapping[constant_name] if constant_shortcut not in self.scope.all_used_symbols and constant_name != constant_shortcut: - self._constantImports.add((constant_shortcut, constant_name)) + self._constantImports.setdefault('ISO_C_Binding', set())\ + .add((constant_shortcut, constant_name)) constant_name = constant_shortcut else: - self._constantImports.add(constant_name) + self._constantImports.setdefault('ISO_C_Binding', set())\ + .add(constant_name) return constant_name def _handle_inline_func_call(self, expr, provided_args, assign_lhs = None): @@ -573,6 +580,7 @@ def _print_PythonPrint(self, expr): code = '' empty_end = FunctionCallArgument(LiteralString(''), 'end') space_end = FunctionCallArgument(LiteralString(' '), 'end') + empty_sep = FunctionCallArgument(LiteralString(''), 'sep') for f in expr.expr: if f.has_keyword: if f.keyword == 'sep': @@ -591,25 +599,31 @@ def _print_PythonPrint(self, expr): tuple_sep = LiteralString(', ') tuple_end = FunctionCallArgument(LiteralString(')')) - for f in orig_args: + for i, f in enumerate(orig_args): if f.keyword: continue else: f = f.value if isinstance(f, (InhomogeneousTupleVariable, PythonTuple, str)): if args_format: - code += self._formatted_args_to_print(args_format, args, sep, separator) + code += self._formatted_args_to_print(args_format, args, sep, separator, expr) args_format = [] args = [] + if i + 1 == len(orig_args): + end_of_tuple = empty_end + else: + end_of_tuple = FunctionCallArgument(sep, 'end') args = [FunctionCallArgument(print_arg) for tuple_elem in f for print_arg in (tuple_elem, tuple_sep)][:-1] - code += self._print(PythonPrint([tuple_start, *args, tuple_end])) + if len(f) == 1: + args.append(FunctionCallArgument(LiteralString(','))) + code += self._print(PythonPrint([tuple_start, *args, tuple_end, empty_sep, end_of_tuple], file=expr.file)) args = [] elif isinstance(f, PythonType): args_format.append('A') args.append(self._print(f.print_string)) elif isinstance(f.rank, int) and f.rank > 0: if args_format: - code += self._formatted_args_to_print(args_format, args, sep, separator) + code += self._formatted_args_to_print(args_format, args, sep, separator, expr) args_format = [] args = [] loop_scope = self.scope.create_new_loop_scope() @@ -620,25 +634,27 @@ def _print_PythonPrint(self, expr): if f.rank == 1: print_body.append(space_end) - for_body = [PythonPrint(print_body)] + for_body = [PythonPrint(print_body, file=expr.file)] for_loop = For(for_index, for_range, for_body, scope=loop_scope) for_end_char = LiteralString(']') for_end = FunctionCallArgument(for_end_char, keyword='end') - body = CodeBlock([PythonPrint([FunctionCallArgument(LiteralString('[')), empty_end]), + body = CodeBlock([PythonPrint([FunctionCallArgument(LiteralString('[')), empty_end], + file=expr.file), for_loop, - PythonPrint([FunctionCallArgument(f[max_index]), for_end])], + PythonPrint([FunctionCallArgument(f[max_index]), for_end], + file=expr.file)], unravelled=True) code += self._print(body) else: arg_format, arg = self._get_print_format_and_arg(f) args_format.append(arg_format) args.append(arg) - code += self._formatted_args_to_print(args_format, args, end, separator) + code += self._formatted_args_to_print(args_format, args, end, separator, expr) return code - def _formatted_args_to_print(self, fargs_format, fargs, fend, fsep): + def _formatted_args_to_print(self, fargs_format, fargs, fend, fsep, expr): """ Produce a write statement from a list of formats, args and an end statement @@ -650,6 +666,8 @@ def _formatted_args_to_print(self, fargs_format, fargs, fend, fsep): The args to be printed fend : PyccelAstNode The character describing the end of the line + expr : PyccelAstNode + The PythonPrint currently printed """ if fargs_format == ['*']: # To print the result of a FunctionCall @@ -670,8 +688,13 @@ def _formatted_args_to_print(self, fargs_format, fargs, fend, fsep): args_code = ' , '.join(args_list) args_formatting = ' '.join(fargs_format) - return "write(*, '({})', advance=\"{}\") {}\n"\ - .format(args_formatting, advance, args_code) + if expr.file == "stderr": + self._constantImports.setdefault('ISO_FORTRAN_ENV', set())\ + .add(("stderr", "error_unit")) + return f"write(stderr, '({args_formatting})', advance=\"{advance}\") {args_code}\n" + self._constantImports.setdefault('ISO_FORTRAN_ENV', set())\ + .add(("stdout", "output_unit")) + return f"write(stdout, '({args_formatting})', advance=\"{advance}\") {args_code}\n" def _get_print_format_and_arg(self,var): """ Get the format string and the printable argument for an object. @@ -1356,7 +1379,7 @@ def _print_Declare(self, expr): #TODO improve ,this is the case of character as argument elif isinstance(expr_dtype, BindCPointer): dtype = 'type(c_ptr)' - self._constantImports.add('c_ptr') + self._constantImports.setdefault('ISO_C_Binding', set()).add('c_ptr') else: dtype += '({0})'.format(self.print_kind(expr.variable)) @@ -2645,6 +2668,19 @@ def _print_ConstructorCall(self, expr): code = '{0}({1})'.format(name, code_args) return self._get_statement(code) + def _print_SysExit(self, expr): + code = "" + if expr.status.dtype is not NativeInteger() or expr.status.rank > 0: + print_arg = FunctionCallArgument(expr.status) + code = self._print(PythonPrint((print_arg, ), file="stderr")) + arg = "1" + else: + arg = expr.status + if arg.precision != 4: + arg = NumpyInt32(arg) + arg = self._print(arg) + return f'{code}stop {arg}\n' + def _print_NumpyUfuncBase(self, expr): type_name = type(expr).__name__ try: @@ -2657,6 +2693,31 @@ def _print_NumpyUfuncBase(self, expr): code = '{0}({1})'.format(func_name, code_args) return self._get_statement(code) + def _print_NumpySign(self, expr): + """ Print the corresponding Fortran function for a call to Numpy.sign + + Parameters + ---------- + expr : Pyccel ast node + Python expression with Numpy.sign call + + Returns + ------- + string + Equivalent internal function in Fortran + + Example + ------- + import numpy + + numpy.sign(x) => numpy_sign(x) + numpy_sign is an interface which calls the proper function depending on the data type of x + + """ + func = PyccelFunctionDef('numpy_sign', NumpySign) + self._additional_imports.add(Import('numpy_f90', AsName(func, 'numpy_sign'))) + return f'numpy_sign({self._print(expr.args[0])})' + def _print_NumpyTranspose(self, expr): var = expr.internal_var arg = self._print(var) @@ -3018,7 +3079,7 @@ def _print_PrecomputedCode(self, expr): def _print_CLocFunc(self, expr): lhs = self._print(expr.result) rhs = self._print(expr.arg) - self._constantImports.add('c_loc') + self._constantImports.setdefault('ISO_C_Binding', set()).add('c_loc') return f'{lhs} = c_loc({rhs})\n' #======================================================================================= diff --git a/pyccel/codegen/utilities.py b/pyccel/codegen/utilities.py index d2a0e88c89..62ac36bd19 100644 --- a/pyccel/codegen/utilities.py +++ b/pyccel/codegen/utilities.py @@ -31,6 +31,8 @@ "pyc_math_f90" : ("math", CompileObj("pyc_math_f90.f90",folder="math")), "pyc_math_c" : ("math", CompileObj("pyc_math_c.c",folder="math")), "cwrapper" : ("cwrapper", CompileObj("cwrapper.c",folder="cwrapper", accelerators=('python',))), + "numpy_f90" : ("numpy", CompileObj("numpy_f90.f90",folder="numpy")), + "numpy_c" : ("numpy", CompileObj("numpy_c.c",folder="numpy")), } internal_libs["cwrapper_ndarrays"] = ("cwrapper_ndarrays", CompileObj("cwrapper_ndarrays.c",folder="cwrapper_ndarrays", accelerators = ('python',), @@ -89,7 +91,7 @@ def copy_internal_library(lib_folder, pyccel_dirpath, extra_files = None): to_create = False # If folder exists check if it needs updating src_files = os.listdir(lib_path) - dst_files = os.listdir(lib_dest_path) + dst_files = [f for f in os.listdir(lib_dest_path) if not f.endswith('.lock')] # Check if all files are present in destination to_update = any(s not in dst_files for s in src_files) @@ -116,7 +118,8 @@ def copy_internal_library(lib_folder, pyccel_dirpath, extra_files = None): l.acquire() # Remove all files in destination directory for d in dst_files: - os.remove(os.path.join(lib_dest_path, d)) + d_file = os.path.join(lib_dest_path, d) + os.remove(d_file) # Copy all files from the source to the destination for s in src_files: shutil.copyfile(os.path.join(lib_path, s), @@ -124,7 +127,8 @@ def copy_internal_library(lib_folder, pyccel_dirpath, extra_files = None): # Create any requested extra files if extra_files: for filename, contents in extra_files.items(): - with open(os.path.join(lib_dest_path, filename), 'w') as f: + extra_file = os.path.join(lib_dest_path, filename) + with open(extra_file, 'w', encoding="utf-8") as f: f.writelines(contents) # Release the locks for l in locks: diff --git a/pyccel/compilers/default_compilers.py b/pyccel/compilers/default_compilers.py index 82b77f990a..1710764188 100644 --- a/pyccel/compilers/default_compilers.py +++ b/pyccel/compilers/default_compilers.py @@ -112,6 +112,8 @@ if sys.platform == "darwin": gcc_info['openmp']['flags'] = ("-Xpreprocessor",'-fopenmp') gcc_info['openmp']['libs'] = ('omp',) + gcc_info['openmp']['libdirs'] = ('/usr/local/opt/libomp/lib',) + gcc_info['openmp']['includes'] = ('/usr/local/opt/libomp/include',) elif sys.platform == "win32": gcc_info['mpi_exec'] = 'gcc' gcc_info['mpi']['flags'] = ('-D','USE_MPI_MODULE') diff --git a/pyccel/naming/cnameclashchecker.py b/pyccel/naming/cnameclashchecker.py index dd5dad709c..7bc31965bb 100644 --- a/pyccel/naming/cnameclashchecker.py +++ b/pyccel/naming/cnameclashchecker.py @@ -13,7 +13,7 @@ class CNameClashChecker(metaclass = Singleton): """ Class containing functions to help avoid problematic names in C """ # Keywords as mentioned on https://en.cppreference.com/w/c/keyword - keywords = set(['auto', 'break', 'case', 'char', 'const', + keywords = set(['isign', 'fsign', 'csign', 'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'inline', 'int', 'long', 'register', 'restrict', 'return', 'short', 'signed', diff --git a/pyccel/naming/fortrannameclashchecker.py b/pyccel/naming/fortrannameclashchecker.py index e4514d3433..f6c041cd06 100644 --- a/pyccel/naming/fortrannameclashchecker.py +++ b/pyccel/naming/fortrannameclashchecker.py @@ -40,7 +40,7 @@ class FortranNameClashChecker(metaclass = Singleton): 'unlock', 'test', 'abs', 'sqrt', 'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'exp', 'log', 'int', 'nint', 'floor', 'fraction', 'real', 'max', 'mod', 'count', - 'pack']) + 'pack', 'numpy_sign']) def has_clash(self, name, symbols): """ Indicate whether the proposed name causes any clashes diff --git a/pyccel/parser/scope.py b/pyccel/parser/scope.py index 054c38cb50..669ce59b96 100644 --- a/pyccel/parser/scope.py +++ b/pyccel/parser/scope.py @@ -477,6 +477,7 @@ def get_temporary_variable(self, dtype_or_var, name = None, **kwargs): kwargs : dict See Variable keyword arguments """ + assert isinstance(name, (str, type(None))) name = self.get_new_name(name) if isinstance(dtype_or_var, Variable): var = dtype_or_var.clone(name, **kwargs, is_temp = True) diff --git a/pyccel/parser/semantic.py b/pyccel/parser/semantic.py index 02dfb31342..57b12b78e6 100644 --- a/pyccel/parser/semantic.py +++ b/pyccel/parser/semantic.py @@ -66,14 +66,15 @@ from pyccel.ast.datatypes import default_precision from pyccel.ast.datatypes import (NativeInteger, NativeBool, NativeFloat, NativeString, - NativeGeneric, NativeComplex) + NativeGeneric, NativeComplex, + NativeVoid) from pyccel.ast.functionalexpr import FunctionalSum, FunctionalMax, FunctionalMin, GeneratorComprehension, FunctionalFor from pyccel.ast.headers import FunctionHeader, MethodHeader, Header from pyccel.ast.headers import MacroFunction, MacroVariable -from pyccel.ast.internals import Slice, PyccelSymbol, get_final_precision +from pyccel.ast.internals import PyccelInternalFunction, Slice, PyccelSymbol, get_final_precision from pyccel.ast.itertoolsext import Product from pyccel.ast.literals import LiteralTrue, LiteralFalse @@ -97,7 +98,7 @@ OMP_TaskLoop_Construct, OMP_Sections_Construct, Omp_End_Clause, OMP_Single_Construct) -from pyccel.ast.operators import PyccelIs, PyccelIsNot, IfTernaryOperator, PyccelUnarySub +from pyccel.ast.operators import PyccelArithmeticOperator, PyccelIs, PyccelIsNot, IfTernaryOperator, PyccelUnarySub from pyccel.ast.operators import PyccelNot, PyccelEq, PyccelAdd, PyccelMul, PyccelPow from pyccel.ast.operators import PyccelAssociativeParenthesis, PyccelDiv @@ -670,8 +671,6 @@ def _create_PyccelOperator(self, expr, visited_args): errors.report(msg, symbol=expr, bounding_box=(self._current_fst_node.lineno, self._current_fst_node.col_offset), severity='fatal') - #if stmts: - # expr_new = CodeBlock(stmts + [expr_new]) return expr_new def _create_Duplicate(self, val, length): @@ -707,6 +706,11 @@ def _handle_function_args(self, arguments, **settings): if isinstance(a.value, StarredArguments): args.extend([FunctionCallArgument(av) for av in a.value.args_var]) else: + if isinstance(a.value, PyccelArithmeticOperator) and a.value.rank: + tmp_var = PyccelSymbol(self.scope.get_new_name(), is_temp=True) + assign = self._visit(Assign(tmp_var, arg.value, fst= arg.value.fst)) + self._additional_exprs[-1].append(assign) + a = FunctionCallArgument(self._visit(tmp_var)) args.append(a) return args @@ -724,7 +728,7 @@ def get_type_description(self, var, include_rank = True): """ dtype = var.dtype prec = get_final_precision(var) - descr = f'{dtype}(kind={prec})' + descr = f'{dtype}{(prec * 2 if isinstance(dtype, NativeComplex) else prec) * 8 if prec else ""}' if include_rank and var.rank>0: dims = ','.join(':'*var.rank) descr += f'[{dims}]' @@ -755,6 +759,7 @@ def incompatible(i_arg, f_arg): get_final_precision(i_arg) != get_final_precision(f_arg) or i_arg.rank != f_arg.rank) + # Compare each set of arguments for idx, (i_arg, f_arg) in enumerate(zip(input_args, func_args)): i_arg = i_arg.value f_arg = f_arg.var @@ -830,14 +835,31 @@ def _handle_function(self, expr, func, args, **settings): errors.report("Too many arguments passed in function call", symbol = expr, severity='fatal') + + func_args = func.arguments if isinstance(func, FunctionDef) else func.functions[0].arguments + # Sort arguments to match the order in the function definition + input_args = [a for a in args if a.keyword is None] + nargs = len(input_args) + for ka in func_args[nargs:]: + key = ka.name + relevant_args = [a for a in args[nargs:] if a.keyword == key] + n_relevant_args = len(relevant_args) + assert n_relevant_args <= 1 + if n_relevant_args == 0 and ka.has_default: + input_args.append(ka.default_call_arg) + elif n_relevant_args == 1: + input_args.append(relevant_args[0]) + + args = input_args + new_expr = FunctionCall(func, args, self._current_function) if None in new_expr.args: errors.report("Too few arguments passed in function call", symbol = expr, severity='error') elif isinstance(func, FunctionDef): - self._check_argument_compatibility(new_expr.args, func.arguments, - expr, func.is_elemental) + self._check_argument_compatibility(args, func_args, + expr, func.is_elemental) return new_expr def _create_variable(self, name, dtype, rhs, d_lhs, arr_in_multirets=False): @@ -1188,7 +1210,7 @@ def _ensure_infered_type_matches_existing(self, dtype, d_var, var, is_augassign, self._current_fst_node.col_offset), severity='error', symbol=var.name) - elif var.is_ndarray and var.is_alias: + elif var.is_ndarray and var.is_alias and not is_augassign: # we allow pointers to be reassigned multiple times # pointers reassigning need to call free_pointer func # to remove memory leaks @@ -1386,7 +1408,7 @@ def _assign_GeneratorComprehension(self, lhs_name, expr, **settings): # Iterate over the loops # This provides the definitions of iterators as well # as the central expression - loops = [self._visit(expr.loops, **settings)] + loops = [self._visit(expr.loops, **settings)] # If necessary add additional expressions corresponding # to nested GeneratorComprehensions @@ -2151,6 +2173,14 @@ def _visit_FunctionCall(self, expr, **settings): return getattr(self, annotation_method)(expr, **settings) args = self._handle_function_args(expr.args, **settings) + # Correct keyword names if scope is available + # The scope is only available if the function body has been parsed + # (i.e. not for headers or builtin functions) + if isinstance(func, FunctionDef) and func.scope: + args = [a if a.keyword is None else \ + FunctionCallArgument(a.value, func.scope.get_expected_name(a.keyword)) \ + for a in args] + if name == 'lambdify': args = self.scope.find(str(expr.args[0]), 'symbolic_functions') @@ -2440,6 +2470,11 @@ def _visit_Assign(self, expr, **settings): d_var = self._infere_type(rhs, **settings) if d_var['memory_handling'] == 'alias' and not isinstance(lhs, IndexedElement): rhs = rhs.internal_var + elif isinstance(rhs, PyccelInternalFunction) and isinstance(rhs.dtype, NativeVoid): + if expr.lhs.is_temp: + return rhs + else: + raise NotImplementedError("Cannot assign result of a function without a return") else: d_var = self._infere_type(rhs, **settings) @@ -2596,11 +2631,10 @@ def _visit_Assign(self, expr, **settings): new_expr = Assign(l, r) - if is_pointer_i: - new_expr = AliasAssign(l, r) - - elif isinstance(expr, AugAssign): + if isinstance(expr, AugAssign): new_expr = AugAssign(l, expr.op, r) + elif is_pointer_i: + new_expr = AliasAssign(l, r) elif new_expr.is_symbolic_alias: @@ -3031,9 +3065,7 @@ def _visit_Return(self, expr, **settings): assigns = [] for v,r in zip(return_vars, results): if not (isinstance(r, PyccelSymbol) and r == (v.name if isinstance(v, Variable) else v)): - a = Assign(v, r) - a.set_fst(expr.fst) - a = self._visit_Assign(a) + a = self._visit(Assign(v, r, fst=expr.fst)) assigns.append(a) results = [self._visit(i, **settings) for i in return_vars] diff --git a/pyccel/parser/syntactic.py b/pyccel/parser/syntactic.py index 5f082cef08..0fed30cbf8 100644 --- a/pyccel/parser/syntactic.py +++ b/pyccel/parser/syntactic.py @@ -786,12 +786,14 @@ def fill_types(ls): results = [] result_counter = 1 + local_symbols = self.scope.local_used_symbols + for r in zip(*returns): r0 = r[0] pyccel_symbol = isinstance(r0, PyccelSymbol) same_results = all(r0 == ri for ri in r) - name_available = all(r0 != a.name for a in arguments) + name_available = all(r0 != a.name for a in arguments) and r0 in local_symbols if pyccel_symbol and same_results and name_available: result_name = r0 diff --git a/pyccel/stdlib/cwrapper_ndarrays/cwrapper_ndarrays.c b/pyccel/stdlib/cwrapper_ndarrays/cwrapper_ndarrays.c index ee3afd869b..4057470d70 100644 --- a/pyccel/stdlib/cwrapper_ndarrays/cwrapper_ndarrays.c +++ b/pyccel/stdlib/cwrapper_ndarrays/cwrapper_ndarrays.c @@ -321,7 +321,10 @@ PyObject* ndarray_to_pyarray(t_ndarray *o) PyObject* c_ndarray_to_pyarray(t_ndarray *o) { int FLAGS = NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_WRITEABLE; - return PyArray_NewFromDescr(&PyArray_Type, PyArray_DescrFromType(o->type), + + enum NPY_TYPES npy_type = get_numpy_type(o); + + return PyArray_NewFromDescr(&PyArray_Type, PyArray_DescrFromType(npy_type), o->nd, _ndarray_to_numpy_shape(o->shape, o->nd), _ndarray_to_numpy_strides(o->strides, o->type_size, o->nd), o->raw_data, FLAGS, NULL); diff --git a/pyccel/stdlib/numpy/numpy_c.c b/pyccel/stdlib/numpy/numpy_c.c new file mode 100644 index 0000000000..36e4a205ec --- /dev/null +++ b/pyccel/stdlib/numpy/numpy_c.c @@ -0,0 +1,24 @@ +/* --------------------------------------------------------------------------------------- */ +/* This file is part of Pyccel which is released under MIT License. See the LICENSE file */ +/* or go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. */ +/* --------------------------------------------------------------------------------------- */ + +#include "numpy_c.h" + +/* numpy.sign for float, double and integers */ +long long int isign(long long int x) +{ + return SIGN(x); +} + +/* numpy.sign for float, double and integers */ +double fsign(double x) +{ + return SIGN(x); +} + +/* numpy.sign for complex */ +double complex csign(double complex x) +{ + return x ? ((!creal(x) && cimag(x) < 0) || (creal(x) < 0) ? -1 : 1) : 0; +} diff --git a/pyccel/stdlib/numpy/numpy_c.h b/pyccel/stdlib/numpy/numpy_c.h new file mode 100644 index 0000000000..4133e9dbe9 --- /dev/null +++ b/pyccel/stdlib/numpy/numpy_c.h @@ -0,0 +1,20 @@ +/* --------------------------------------------------------------------------------------- */ +/* This file is part of Pyccel which is released under MIT License. See the LICENSE file */ +/* or go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. */ +/* --------------------------------------------------------------------------------------- */ + +#ifndef NUMPY_H +# define NUMPY_H + +# include +# include +# include +# include + +#define SIGN(x) (x ? (x < 0 ? -1 : 1) : 0) + +long long int isign(long long int x); +double fsign(double x); +double complex csign(double complex x); + +#endif diff --git a/pyccel/stdlib/numpy/numpy_f90.f90 b/pyccel/stdlib/numpy/numpy_f90.f90 new file mode 100644 index 0000000000..7e057225d1 --- /dev/null +++ b/pyccel/stdlib/numpy/numpy_f90.f90 @@ -0,0 +1,156 @@ +! ! --------------------------------------------------------------------------------------- ! +! ! This file is part of Pyccel which is released under MIT License. See the LICENSE file ! +! ! or go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. ! +! ! --------------------------------------------------------------------------------------- ! + +module numpy_f90 + + + use, intrinsic :: ISO_C_Binding, only : i64 => C_INT64_T , f32 => & + C_FLOAT , i32 => C_INT32_T , f64 => C_DOUBLE , i8 => C_INT8_T , & + c64 => C_DOUBLE_COMPLEX , i16 => C_INT16_T , c32 => & + C_FLOAT_COMPLEX + implicit none + + private + + public :: numpy_sign + + interface numpy_sign + module procedure numpy_sign_i8 + module procedure numpy_sign_i16 + module procedure numpy_sign_i32 + module procedure numpy_sign_i64 + module procedure numpy_sign_f32 + module procedure numpy_sign_f64 + module procedure numpy_sign_c32 + module procedure numpy_sign_c64 + end interface + + contains + + !........................................ + elemental function numpy_sign_i8(x) result(Out_0001) + + implicit none + + integer(i8) :: Out_0001 + integer(i8), value :: x + + Out_0001 = merge(0_i8, (merge(1_i8, -1_i8, x > 0_i8)), x == 0_i8) + return + + end function numpy_sign_i8 + !........................................ + + !........................................ + elemental function numpy_sign_i16(x) result(Out_0001) + + implicit none + + integer(i16) :: Out_0001 + integer(i16), value :: x + + Out_0001 = merge(0_i16, (merge(1_i16, -1_i16, x > 0_i16)), x == 0_i16) + return + + end function numpy_sign_i16 + !........................................ + + !........................................ + elemental function numpy_sign_i32(x) result(Out_0001) + + implicit none + + integer(i32) :: Out_0001 + integer(i32), value :: x + + Out_0001 = merge(0_i32, (merge(1_i32, -1_i32, x > 0_i32)), x == 0_i32) + return + + end function numpy_sign_i32 + !........................................ + + !........................................ + elemental function numpy_sign_i64(x) result(Out_0001) + + implicit none + + integer(i64) :: Out_0001 + integer(i64), value :: x + + Out_0001 = merge(0_i64, (merge(1_i64, -1_i64, x > 0_i64)), x == 0_i64) + return + + end function numpy_sign_i64 + !........................................ + + !........................................ + elemental function numpy_sign_f32(x) result(Out_0001) + + implicit none + + real(f32) :: Out_0001 + real(f32), value :: x + + Out_0001 = merge(0_f32, (merge(1_f32, -1_f32, x > 0_f32)), x == 0_f32) + return + + end function numpy_sign_f32 + !........................................ + + !........................................ + elemental function numpy_sign_f64(x) result(Out_0001) + + implicit none + + real(f64) :: Out_0001 + real(f64), value :: x + + Out_0001 = merge(0_f64, (merge(1_f64, -1_f64, x > 0_f64)), x == 0_f64) + return + + end function numpy_sign_f64 + !........................................ + + !........................................ + elemental function numpy_sign_c32(x) result(Out_0001) + + implicit none + + complex(c32) :: Out_0001 + complex(c32), value :: x + logical :: x_ne_zero ! Condition for x different than 0 + logical :: x_lt_zero ! Condition for x less than 0 + + x_ne_zero = (REALPART(x) .ne. 0_f32) .or. (IMAGPART(x) .ne. 0_f32) + x_lt_zero = ((REALPART(x) .eq. 0_f32) .and. IMAGPART(x) .lt. 0_f32) & + .or. (REALPART(x) .lt. 0_f32) + + Out_0001 = merge(merge(-1_c32, 1_c32, x_lt_zero), 0_c32, x_ne_zero) + return + + end function numpy_sign_c32 + !........................................ + + !........................................ + elemental function numpy_sign_c64(x) result(Out_0001) + + implicit none + + complex(c64) :: Out_0001 + complex(c64), value :: x + logical :: x_ne_zero ! Condition for x different than 0 + logical :: x_lt_zero ! Condition for x less than 0 + + x_ne_zero = (REALPART(x) .ne. 0_f64) .or. (IMAGPART(x) .ne. 0_f64) + x_lt_zero = ((REALPART(x) .eq. 0_f64) .and. IMAGPART(x) .lt. 0_f64) & + .or. (REALPART(x) .lt. 0_f64) + + Out_0001 = merge(merge(-1_c64, 1_c64, x_lt_zero), 0_c64, x_ne_zero) + return + + end function numpy_sign_c64 + !........................................ + + end module numpy_f90 diff --git a/pyccel/version.py b/pyccel/version.py index 050c34f9dc..fea6104e13 100644 --- a/pyccel/version.py +++ b/pyccel/version.py @@ -1,4 +1,4 @@ """ Module specifying the current version string for pyccel """ -__version__ = "1.6.0" +__version__ = "1.7.0" diff --git a/pyproject.toml b/pyproject.toml index c3118a9127..9f6ed0ea68 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,10 +1,10 @@ [build-system] requires = [ "setuptools >= 37, < 61", - "numpy", + "numpy >= 1.16", "sympy>=1.2", - "termcolor", + "termcolor >= 1.0.0", "textx>=2.2", - "filelock", + "filelock >= 3.4.0", ] build-backend = "setuptools.build_meta" diff --git a/setup.cfg b/setup.cfg index 1f4fdcb0e4..1fbf13e34c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -13,15 +13,24 @@ long_description_content_type = text/markdown [options] packages = find: install_requires = - numpy - sympy>=1.2 - termcolor - textx>=2.2 - filelock + numpy >= 1.16 + sympy >= 1.2 + termcolor >= 1.0.0 + textx >= 2.2 + filelock >= 3.4.0 python_requires = >= 3.7 zip_safe = False include_package_data = True +[options.extras_require] +test = + pytest >= 2.7 + scipy >= 1.5.0 + mpi4py >= 3.0.0 + coverage >= 3.1 + astunparse >= 1.6.0 + pytest-xdist >= 1.16 + [options.entry_points] console_scripts = pyccel = pyccel.commands.console:pyccel diff --git a/tests/epyccel/modules/arrays.py b/tests/epyccel/modules/arrays.py index 01ae040a42..1315866fed 100644 --- a/tests/epyccel/modules/arrays.py +++ b/tests/epyccel/modules/arrays.py @@ -1,12 +1,21 @@ # pylint: disable=missing-function-docstring, missing-module-docstring/ import numpy as np -from pyccel.decorators import types, stack_array, allow_negative_index +from pyccel.decorators import types, template, stack_array, allow_negative_index a_1d = np.array([1 << i for i in range(21)], dtype=int) a_2d_f = np.array([[1 << j for j in range(21)] for i in range(21)], dtype=int, order='F') a_2d_c = np.array([[1 << j for j in range(21)] for i in range(21)], dtype=int) + +@types('T', 'T') +@template(name='T' , types=['int', 'int8', 'int16', 'int32', 'int64', 'float', + 'float32', 'float64', 'complex64', 'complex128']) +def array_return_first_element(a, b): + from numpy import array + x = array([a,b]) + return x[0] + #============================================================================== # 1D ARRAYS OF INT-32 #============================================================================== @@ -287,6 +296,10 @@ def array_real_1d_scalar_mul( x, a ): def array_real_1d_scalar_div( x, a ): x[:] /= a +@types( 'real[:]', 'real') +def array_real_1d_scalar_mod( x, a ): + x[:] %= a + @types( 'real[:]', 'real' ) def array_real_1d_scalar_idiv( x, a ): x[:] = x // a @@ -307,6 +320,10 @@ def array_real_1d_mul( x, y ): def array_real_1d_div( x, y ): x[:] /= y +@types( 'real[:]', 'real[:]') +def array_real_1d_mod( x, y ): + x[:] %= y + @types( 'real[:]', 'real[:]' ) def array_real_1d_idiv( x, y ): x[:] = x // y @@ -331,6 +348,10 @@ def array_real_2d_C_scalar_mul( x, a ): def array_real_2d_C_scalar_div( x, a ): x[:,:] /= a +@types( 'real[:,:]', 'real' ) +def array_real_2d_C_scalar_mod( x, a ): + x[:,:] %= a + @types( 'real[:,:]', 'real[:,:]' ) def array_real_2d_C_add( x, y ): x[:,:] += y @@ -347,6 +368,10 @@ def array_real_2d_C_mul( x, y ): def array_real_2d_C_div( x, y ): x[:,:] /= y +@types( 'real[:,:]', 'real[:,:]' ) +def array_real_2d_C_mod( x, y ): + x[:,:] %= y + @types('real[:,:]') def array_real_2d_C_array_initialization(a): from numpy import array @@ -393,6 +418,10 @@ def array_real_2d_F_scalar_mul( x, a ): def array_real_2d_F_scalar_div( x, a ): x[:,:] /= a +@types( 'real[:,:](order=F)', 'real' ) +def array_real_2d_F_scalar_mod( x, a ): + x[:,:] %= a + @types( 'real[:,:](order=F)', 'real[:,:](order=F)' ) def array_real_2d_F_add( x, y ): x[:,:] += y @@ -409,6 +438,10 @@ def array_real_2d_F_mul( x, y ): def array_real_2d_F_div( x, y ): x[:,:] /= y +@types( 'real[:,:](order=F)', 'real[:,:](order=F)' ) +def array_real_2d_F_mod( x, y ): + x[:,:] %= y + @types('real[:,:](order=F)') def array_real_2d_F_array_initialization(a): from numpy import array @@ -1780,6 +1813,12 @@ def arr_arange_6(): a = np.arange(20, 1, -1.1) return np.shape(a)[0], a[0], a[-1] +def arr_arange_7(arr : 'int[:,:]'): + import numpy as np + n, m = arr.shape + for i in range(n): + arr[i] = np.arange(i, i+m) + def iterate_slice(i : int): import numpy as np a = np.arange(15) diff --git a/tests/epyccel/modules/augassign.py b/tests/epyccel/modules/augassign.py new file mode 100644 index 0000000000..bf3fbaeec4 --- /dev/null +++ b/tests/epyccel/modules/augassign.py @@ -0,0 +1,155 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +from pyccel.decorators import types + +# += + +@types('int[:]') +def augassign_add_1d_int(a): + b = a + b += 42 + return b[0] + +@types('float[:]') +def augassign_add_1d_float(a): + b = a + b += 4.2 + return b[0] + +@types('complex[:]') +def augassign_add_1d_complex(a): + b = a + b += (4.0 + 2.0j) + return b[0] + +@types('int[:,:]') +def augassign_add_2d_int(a): + b = a + b += 42 + return b[0][0] + +@types('float[:,:]') +def augassign_add_2d_float(a): + b = a + b += 4.2 + return b[0][0] + +@types('complex[:,:]') +def augassign_add_2d_complex(a): + b = a + b += (4.0 + 2.0j) + return b[0][0] + +# -= + +@types('int[:]') +def augassign_sub_1d_int(a): + b = a + b -= 42 + return b[0] + +@types('float[:]') +def augassign_sub_1d_float(a): + b = a + b -= 4.2 + return b[0] + +@types('complex[:]') +def augassign_sub_1d_complex(a): + b = a + b -= (4.0 + 2.0j) + return b[0] + +@types('int[:,:]') +def augassign_sub_2d_int(a): + b = a + b -= 42 + return b[0][0] + +@types('float[:,:]') +def augassign_sub_2d_float(a): + b = a + b -= 4.2 + return b[0][0] + +@types('complex[:,:]') +def augassign_sub_2d_complex(a): + b = a + b -= (4.0 + 2.0j) + return b[0][0] + +# *= + +@types('int[:]') +def augassign_mul_1d_int(a): + b = a + b *= 42 + return b[0] + +@types('float[:]') +def augassign_mul_1d_float(a): + b = a + b *= 4.2 + return b[0] + +@types('complex[:]') +def augassign_mul_1d_complex(a): + b = a + b *= (4.0 + 2.0j) + return b[0] + +@types('int[:,:]') +def augassign_mul_2d_int(a): + b = a + b *= 42 + return b[0][0] + +@types('float[:,:]') +def augassign_mul_2d_float(a): + b = a + b *= 4.2 + return b[0][0] + +@types('complex[:,:]') +def augassign_mul_2d_complex(a): + b = a + b *= (4.0 + 2.0j) + return b[0][0] + +# /= + +@types('int[:]') +def augassign_div_1d_int(a): + b = a + b /= 42 + return b[0] + +@types('float[:]') +def augassign_div_1d_float(a): + b = a + b /= 4.2 + return b[0] + +@types('complex[:]') +def augassign_div_1d_complex(a): + b = a + b /= (4.0 + 2.0j) + return b[0] + +@types('int[:,:]') +def augassign_div_2d_int(a): + b = a + b /= 42 + return b[0][0] + +@types('float[:,:]') +def augassign_div_2d_float(a): + b = a + b /= 4.2 + return b[0][0] + +@types('complex[:,:]') +def augassign_div_2d_complex(a): + b = a + b /= (4.0 + 2.0j) + return b[0][0] diff --git a/tests/epyccel/modules/call_user_defined_funcs.py b/tests/epyccel/modules/call_user_defined_funcs.py index 9e32e6cf3a..3c3c614bee 100644 --- a/tests/epyccel/modules/call_user_defined_funcs.py +++ b/tests/epyccel/modules/call_user_defined_funcs.py @@ -33,3 +33,12 @@ def circle_volume(radius): volume = my_mult(my_mult(my_div(3. , 4.), my_pi()), my_cub(radius)) not_change(volume) return volume + +def arr_mult_scalar(T: 'int[:]', t: int = 13): + x = T * t + return x + +def alias(T: 'int[:]', t: int): + x = arr_mult_scalar(T, t=t) + y = arr_mult_scalar(t=t, T=T) + return x, y diff --git a/tests/epyccel/modules/numpy_sign.py b/tests/epyccel/modules/numpy_sign.py new file mode 100644 index 0000000000..db5403a90b --- /dev/null +++ b/tests/epyccel/modules/numpy_sign.py @@ -0,0 +1,295 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring\ + +from pyccel.decorators import types + +def complex_nul(): + import numpy as np + b = np.sign(complex(0+0j)) + return b + +def complex_pos(): + import numpy as np + b = np.sign(complex(1+2j)) + return b + +def complex_neg(): + import numpy as np + b = np.sign(complex(-1-2j)) + return b + +def complex64_nul(): + import numpy as np + b = np.sign(np.complex64(0+0j)) + return b + +def complex64_pos(): + import numpy as np + b = np.sign(np.complex64(64+64j)) + return b + +def complex64_neg(): + import numpy as np + b = np.sign(np.complex64(-64-64j)) + return b + +def complex128_nul(): + import numpy as np + b = np.sign(np.complex128(0+0j)) + return b + +def complex128_pos(): + import numpy as np + b = np.sign(np.complex128(128+128j)) + return b + +def complex128_neg(): + import numpy as np + b = np.sign(np.complex128(-128-128j)) + return b + +def complex_pos_neg(): + import numpy as np + b = np.sign(complex(1-2j)) + return b + +def complex_neg_pos(): + import numpy as np + b = np.sign(complex(-1+2j)) + return b + +def complex64_pos_neg(): + import numpy as np + b = np.sign(np.complex64(64-64j)) + return b + +def complex64_neg_pos(): + import numpy as np + b = np.sign(np.complex64(-64+64j)) + return b + +def complex128_pos_neg(): + import numpy as np + b = np.sign(np.complex128(128-128j)) + return b + +def complex128_neg_pos(): + import numpy as np + b = np.sign(np.complex128(-128+128j)) + return b + +def int16_pos(): + import numpy as np + b = np.sign(np.int16(16)) + return b + +def int16_neg(): + import numpy as np + b = np.sign(np.int16(-16)) + return b + +def int32_pos(): + import numpy as np + b = np.sign(np.int32(32)) + return b + +def int32_neg(): + import numpy as np + b = np.sign(np.int32(-32)) + return b + +def int64_pos(): + import numpy as np + b = np.sign(np.int64(64)) + return b + +def int64_neg(): + import numpy as np + b = np.sign(np.int64(-64)) + return b + +def float_pos(): + import numpy as np + b = np.sign(float(32.32)) + return b + +def float_neg(): + import numpy as np + b = np.sign(float(-32.32)) + return b + +def float_nul(): + import numpy as np + b = np.sign(float(0.0)) + return b + +def float64_pos(): + import numpy as np + b = np.sign(np.float64(64.64)) + return b + +def float64_neg(): + import numpy as np + b = np.sign(np.float64(-64.64)) + return b + +def literal_complex_pos(): + import numpy as np + b = np.sign(1+2j) + return b + +def literal_complex_neg(): + import numpy as np + b = np.sign(-1-2j) + return b + +def literal_complex_nul_imag(): + import numpy as np + b = np.sign(0-42j) + return b + +def literal_complex_real_nul(): + import numpy as np + b = np.sign(-42+0j) + return b + +def literal_complex_nul_nul(): + import numpy as np + b = np.sign(-0-0j) + return b + +def literal_int_pos(): + import numpy as np + b = np.sign(42) + return b + +def literal_int_neg(): + import numpy as np + b = np.sign(-42) + return b + +def literal_int_nul(): + import numpy as np + b = np.sign(0) + return b + +def literal_float_pos(): + import numpy as np + b = np.sign(42.42) + return b + +def literal_float_neg(): + import numpy as np + b = np.sign(-42.42) + return b + +def literal_float_nul(): + import numpy as np + b = np.sign(0.0) + return b + +################### +# Arrays tests +################### + +# Intergers + +@types('int8[:]') +def array_1d_int8(x): + import numpy as np + y = np.sign(x) + return y + +@types('int16[:]') +def array_1d_int16(x): + import numpy as np + y = np.sign(x) + return y + +@types('int32[:]') +def array_1d_int32(x): + import numpy as np + y = np.sign(x) + return y + +@types('int64[:]') +def array_1d_int64(x): + import numpy as np + y = np.sign(x) + return y + +@types('int8[:,:]') +def array_2d_int8(x): + import numpy as np + y = np.sign(x) + return y + +@types('int16[:,:]') +def array_2d_int16(x): + import numpy as np + y = np.sign(x) + return y + +@types('int32[:,:]') +def array_2d_int32(x): + import numpy as np + y = np.sign(x) + return y + +@types('int64[:,:]') +def array_2d_int64(x): + import numpy as np + y = np.sign(x) + return y + +# Floats + +@types('float32[:]') +def array_1d_float32(x): + import numpy as np + y = np.sign(x) + return y + +@types('float64[:]') +def array_1d_float64(x): + import numpy as np + y = np.sign(x) + return y + +@types('float32[:,:]') +def array_2d_float32(x): + import numpy as np + y = np.sign(x) + return y + +@types('float64[:,:]') +def array_2d_float64(x): + import numpy as np + y = np.sign(x) + return y + +# Complexs + +@types('complex64[:]') +def array_1d_complex64(x): + import numpy as np + y = np.sign(x) + return y + +@types('complex128[:]') +def array_1d_complex128(x): + import numpy as np + y = np.sign(x) + return y + +@types('complex64[:,:]') +def array_2d_complex64(x): + import numpy as np + y = np.sign(x) + return y + + +@types('complex128[:,:]') +def array_2d_complex128(x): + import numpy as np + y = np.sign(x) + return y diff --git a/tests/epyccel/recognised_functions/test_numpy_funcs.py b/tests/epyccel/recognised_functions/test_numpy_funcs.py index cefe285639..4be09eeb20 100644 --- a/tests/epyccel/recognised_functions/test_numpy_funcs.py +++ b/tests/epyccel/recognised_functions/test_numpy_funcs.py @@ -5,7 +5,7 @@ from numpy import isclose, iinfo, finfo import numpy as np -from pyccel.decorators import types +from pyccel.decorators import types, template from pyccel.epyccel import epyccel min_int8 = iinfo('int8').min @@ -154,8 +154,8 @@ def absolute_call_r(x): f1 = epyccel(absolute_call_r, language = language) x = uniform(high=1e6) - assert(isclose(f1(x), absolute_call_r(x), rtol=RTOL, atol=ATOL)) - assert(isclose(f1(-x), absolute_call_r(-x), rtol=RTOL, atol=ATOL)) + assert f1(x) == absolute_call_r(x) + assert f1(-x) == absolute_call_r(-x) assert matching_types(f1(x), absolute_call_r(x)) def test_absolute_call_i(language): @@ -166,10 +166,31 @@ def absolute_call_i(x): f1 = epyccel(absolute_call_i, language = language) x = randint(1e6) - assert(isclose(f1(x), absolute_call_i(x), rtol=RTOL, atol=ATOL)) - assert(isclose(f1(-x), absolute_call_i(-x), rtol=RTOL, atol=ATOL)) + assert f1(x) == absolute_call_i(x) + assert f1(-x) == absolute_call_i(-x) assert matching_types(f1(x), absolute_call_i(x)) +def test_absolute_call_c(language): + @template(name='T', types=['complex','complex64','complex128']) + @types('T') + def absolute_call_c(x): + from numpy import absolute + return absolute(x) + + f1 = epyccel(absolute_call_c, language = language) + x = uniform(high=1e6)+1j*uniform(high=1e6) + assert(isclose(f1(x), absolute_call_c(x), rtol=RTOL, atol=ATOL)) + assert(isclose(f1(-x), absolute_call_c(-x), rtol=RTOL, atol=ATOL)) + assert matching_types(f1(x), absolute_call_c(x)) + + x = np.complex64(uniform(high=1e6)-1j*uniform(high=1e6)) + assert(isclose(f1(x), absolute_call_c(x), rtol=RTOL32, atol=ATOL32)) + assert matching_types(f1(x), absolute_call_c(x)) + + x = np.complex128(uniform(high=1e6)-1j*uniform(high=1e6)) + assert(isclose(f1(x), absolute_call_c(x), rtol=RTOL, atol=ATOL)) + assert matching_types(f1(x), absolute_call_c(x)) + def test_absolute_phrase_r_r(language): @types('real','real') def absolute_phrase_r_r(x,y): @@ -1413,6 +1434,69 @@ def create_full_val_real_complex128(val): assert(isclose( f_real_complex128(val_float) , create_full_val_real_complex128(val_float), rtol=RTOL, atol=ATOL)) assert matching_types(f_real_complex128(val_float), create_full_val_real_complex128(val_float)) +@pytest.mark.parametrize( 'language', ( + pytest.param("fortran", marks = pytest.mark.fortran), + pytest.param("c", marks = pytest.mark.c), + pytest.param("python", marks = [ + pytest.mark.skip("full handles types in __new__ so it " + "cannot be used in a translated interface in python"), + pytest.mark.python] + ), + ) +) + +def test_full_dtype_auto(language): + @types('T') + @template(name='T', types=['int','float', 'complex', 'int32', + 'float32', 'float64', 'complex64', 'complex128']) + def create_full_val_auto(val): + from numpy import full + a = full(3,val) + return a[0] + + integer = randint(low = min_int, high = max_int, dtype=int) + integer32 = randint(low = min_int32, high = max_int32, dtype=np.int32) + + fl = float(integer) + fl32 = np.float32(fl) + fl64 = np.float64(fl) + + cmplx = complex(integer) + cmplx64 = np.complex64(fl32) + cmplx128 = np.complex128(fl64) + + f_int = epyccel(create_full_val_auto, language = language) + assert(f_int(integer) == create_full_val_auto(integer)) + assert matching_types(f_int(integer), create_full_val_auto(integer)) + + f_float = epyccel(create_full_val_auto, language = language) + assert(isclose(f_float(fl), create_full_val_auto(fl), rtol=RTOL, atol=ATOL)) + assert matching_types(f_float(fl), create_full_val_auto(fl)) + + f_complex = epyccel(create_full_val_auto, language = language) + assert(isclose(f_complex(cmplx), create_full_val_auto(cmplx), rtol=RTOL, atol=ATOL)) + assert matching_types(f_complex(cmplx), create_full_val_auto(cmplx)) + + f_int32 = epyccel(create_full_val_auto, language = language) + assert(f_int32(integer32) == create_full_val_auto(integer32)) + assert matching_types(f_int32(integer32), create_full_val_auto(integer32)) + + f_float32 = epyccel(create_full_val_auto, language = language) + assert(isclose(f_float32(fl32) , create_full_val_auto(fl32), rtol=RTOL, atol=ATOL)) + assert matching_types(f_float32(fl32), create_full_val_auto(fl32)) + + f_float64 = epyccel(create_full_val_auto, language = language) + assert(isclose(f_float64(fl64) , create_full_val_auto(fl64), rtol=RTOL, atol=ATOL)) + assert matching_types(f_float64(fl64), create_full_val_auto(fl64)) + + f_complex64 = epyccel(create_full_val_auto, language = language) + assert(isclose(f_complex64(cmplx64) , create_full_val_auto(cmplx64), rtol=RTOL, atol=ATOL)) + assert matching_types(f_complex64(cmplx64), create_full_val_auto(cmplx64)) + + f_complex128 = epyccel(create_full_val_auto, language = language) + assert(isclose(f_complex128(cmplx128) , create_full_val_auto(cmplx128), rtol=RTOL, atol=ATOL)) + assert matching_types(f_complex128(cmplx128), create_full_val_auto(cmplx128)) + def test_full_combined_args(language): def create_full_1_shape(): from numpy import full, shape @@ -2568,47 +2652,102 @@ def create_full_like_shape_F(n): ) def test_full_like_dtype(language): @types('int') + def create_full_like_val_int_int_auto(val): + from numpy import full_like, array + arr = array([5, 1, 8, 0, 9], int) + a = full_like(arr,val) + return a[0] + @types('int') def create_full_like_val_int_int(val): from numpy import full_like, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,int) return a[0] + + @types('int') + def create_full_like_val_int_float_auto(val): + from numpy import full_like, array + arr = array([5, 1, 8, 0, 9], float) + a = full_like(arr,val) + return a[0] @types('int') def create_full_like_val_int_float(val): from numpy import full_like, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,float) return a[0] + + @types('int') + def create_full_like_val_int_complex_auto(val): + from numpy import full_like, array + arr = array([5, 1, 8, 0, 9], complex) + a = full_like(arr,val) + return a[0] @types('int') def create_full_like_val_int_complex(val): from numpy import full_like, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,complex) return a[0] + + @types('real') + def create_full_like_val_real_int32_auto(val): + from numpy import full_like, int32, array + arr = array([5, 1, 8, 0, 9], int32) + a = full_like(arr,val) + return a[0] @types('real') def create_full_like_val_real_int32(val): from numpy import full_like, int32, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,int32) return a[0] + + @types('real') + def create_full_like_val_real_float32_auto(val): + from numpy import full_like, float32, array + arr = array([5, 1, 8, 0, 9], float32) + a = full_like(arr,val) + return a[0] @types('real') def create_full_like_val_real_float32(val): from numpy import full_like, float32, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,float32) return a[0] + + @types('real') + def create_full_like_val_real_float64_auto(val): + from numpy import full_like, float64, array + arr = array([5, 1, 8, 0, 9], float64) + a = full_like(arr,val) + return a[0] @types('real') def create_full_like_val_real_float64(val): from numpy import full_like, float64, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,float64) return a[0] + + @types('real') + def create_full_like_val_real_complex64_auto(val): + from numpy import full_like, complex64, array + arr = array([5, 1, 8, 0, 9], complex64) + a = full_like(arr,val) + return a[0] @types('real') def create_full_like_val_real_complex64(val): from numpy import full_like, complex64, array arr = array([5, 1, 8, 0, 9]) a = full_like(arr,val,complex64) return a[0] + + @types('real') + def create_full_like_val_real_complex128_auto(val): + from numpy import full_like, complex128, array + arr = array([5, 1, 8, 0, 9], complex128) + a = full_like(arr,val) + return a[0] @types('real') def create_full_like_val_real_complex128(val): from numpy import full_like, complex128, array @@ -2651,6 +2790,38 @@ def create_full_like_val_real_complex128(val): assert(isclose( f_real_complex128(val_float) , create_full_like_val_real_complex128(val_float), rtol=RTOL, atol=ATOL)) assert matching_types(f_real_complex128(val_float), create_full_like_val_real_complex128(val_float)) + f_int_int_auto = epyccel(create_full_like_val_int_int_auto, language = language) + assert( f_int_int_auto(val_int) == create_full_like_val_int_int_auto(val_int)) + assert matching_types(f_int_int(val_int), create_full_like_val_int_int_auto(val_int)) + + f_int_float_auto = epyccel(create_full_like_val_int_float_auto, language = language) + assert(isclose( f_int_float_auto(val_int) , create_full_like_val_int_float_auto(val_int), rtol=RTOL, atol=ATOL)) + assert matching_types(f_int_float_auto(val_int), create_full_like_val_int_float_auto(val_int)) + + f_int_complex_auto = epyccel(create_full_like_val_int_complex_auto, language = language) + assert(isclose( f_int_complex_auto(val_int) , create_full_like_val_int_complex_auto(val_int), rtol=RTOL, atol=ATOL)) + assert matching_types(f_int_complex_auto(val_int), create_full_like_val_int_complex_auto(val_int)) + + f_real_int32_auto = epyccel(create_full_like_val_real_int32_auto, language = language) + assert( f_real_int32_auto(val_float) == create_full_like_val_real_int32_auto(val_float)) + assert matching_types(f_real_int32_auto(val_float), create_full_like_val_real_int32_auto(val_float)) + + f_real_float32_auto = epyccel(create_full_like_val_real_float32_auto, language = language) + assert(isclose( f_real_float32_auto(val_float) , create_full_like_val_real_float32_auto(val_float), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_float32_auto(val_float), create_full_like_val_real_float32_auto(val_float)) + + f_real_float64_auto = epyccel(create_full_like_val_real_float64_auto, language = language) + assert(isclose( f_real_float64_auto(val_float) , create_full_like_val_real_float64_auto(val_float), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_float64_auto(val_float), create_full_like_val_real_float64_auto(val_float)) + + f_real_complex64_auto = epyccel(create_full_like_val_real_complex64_auto, language = language) + assert(isclose( f_real_complex64_auto(val_float) , create_full_like_val_real_complex64_auto(val_float), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_complex64_auto(val_float), create_full_like_val_real_complex64_auto(val_float)) + + f_real_complex128_auto = epyccel(create_full_like_val_real_complex128_auto, language = language) + assert(isclose( f_real_complex128_auto(val_float) , create_full_like_val_real_complex128_auto(val_float), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_complex128_auto(val_float), create_full_like_val_real_complex128_auto(val_float)) + def test_full_like_combined_args(language): def create_full_like_1_shape(): from numpy import full_like, shape, array @@ -2756,48 +2927,96 @@ def create_empty_like_shape_F(n,m): def test_empty_like_dtype(language): + def create_empty_like_val_int_auto(): + from numpy import empty_like, array + arr = array([5, 1, 8, 0, 9], dtype=int) + a = empty_like(arr) + return a[0] + def create_empty_like_val_int(): from numpy import empty_like, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr, int) return a[0] + def create_empty_like_val_float_auto(): + from numpy import empty_like, array + arr = array([5, 1, 8, 0, 9], dtype=float) + a = empty_like(arr) + return a[0] + def create_empty_like_val_float(): from numpy import empty_like, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr, dtype=float) return a[0] + def create_empty_like_val_complex_auto(): + from numpy import empty_like, array + arr = array([5, 1, 8, 0, 9], dtype=complex) + a = empty_like(arr) + return a[0] + def create_empty_like_val_complex(): from numpy import empty_like, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr, dtype=complex) return a[0] + def create_empty_like_val_int32_auto(): + from numpy import empty_like, array, int32 + arr = array([5, 1, 8, 0, 9], dtype=int32) + a = empty_like(arr) + return a[0] + def create_empty_like_val_int32(): from numpy import empty_like, int32, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr, dtype=int32) return a[0] + def create_empty_like_val_float32_auto(): + from numpy import empty_like, array, float32 + arr = array([5, 1, 8, 0, 9], dtype='float32') + a = empty_like(arr) + return a[0] + def create_empty_like_val_float32(): from numpy import empty_like, float32, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr, dtype=float32) return a[0] + def create_empty_like_val_float64_auto(): + from numpy import empty_like, array, float64 + arr = array([5, 1, 8, 0, 9], dtype=float64) + a = empty_like(arr) + return a[0] + def create_empty_like_val_float64(): from numpy import empty_like, float64, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr,dtype=float64) return a[0] + def create_empty_like_val_complex64_auto(): + from numpy import empty_like, array, complex64 + arr = array([5, 1, 8, 0, 9], dtype=complex64) + a = empty_like(arr) + return a[0] + def create_empty_like_val_complex64(): from numpy import empty_like, complex64, array arr = array([5, 1, 8, 0, 9]) a = empty_like(arr,dtype=complex64) return a[0] + def create_empty_like_val_complex128_auto(): + from numpy import empty_like, array, complex128 + arr = array([5, 1, 8, 0, 9], dtype=complex128) + a = empty_like(arr) + return a[0] + def create_empty_like_val_complex128(): from numpy import empty_like, complex128, array arr = array([5, 1, 8, 0, 9]) @@ -2805,27 +3024,52 @@ def create_empty_like_val_complex128(): return a[0] + f_int_auto = epyccel(create_empty_like_val_int_auto, language = language) + assert matching_types(f_int_auto(), create_empty_like_val_int_auto()) + f_int_int = epyccel(create_empty_like_val_int, language = language) assert matching_types(f_int_int(), create_empty_like_val_int()) + f_float_auto = epyccel(create_empty_like_val_float_auto, language = language) + assert matching_types(f_float_auto(), create_empty_like_val_float_auto()) + f_int_float = epyccel(create_empty_like_val_float, language = language) assert matching_types(f_int_float(), create_empty_like_val_float()) + f_complex_auto = epyccel(create_empty_like_val_complex_auto, language = language) + assert matching_types(f_complex_auto(), create_empty_like_val_complex_auto()) + f_int_complex = epyccel(create_empty_like_val_complex, language = language) assert matching_types(f_int_complex(), create_empty_like_val_complex()) + f_int32_auto = epyccel(create_empty_like_val_int32_auto, language = language) + assert matching_types(f_int32_auto(), create_empty_like_val_int32_auto()) + f_real_int32 = epyccel(create_empty_like_val_int32, language = language) assert matching_types(f_real_int32(), create_empty_like_val_int32()) + f_float32_auto = epyccel(create_empty_like_val_float32_auto, language = language) + assert matching_types(f_float32_auto(), create_empty_like_val_float32_auto()) + f_real_float32 = epyccel(create_empty_like_val_float32, language = language) assert matching_types(f_real_float32(), create_empty_like_val_float32()) + f_float64_auto = epyccel(create_empty_like_val_float64_auto, language = language) + assert matching_types(f_float64_auto(), create_empty_like_val_float64_auto()) + f_real_float64 = epyccel(create_empty_like_val_float64, language = language) assert matching_types(f_real_float64(), create_empty_like_val_float64()) + f_complex64_auto = epyccel(create_empty_like_val_complex64_auto, language = language) + + assert matching_types(f_complex64_auto(), create_empty_like_val_complex64_auto()) + f_real_complex64 = epyccel(create_empty_like_val_complex64, language = language) assert matching_types(f_real_complex64(), create_empty_like_val_complex64()) + f_complex128_auto = epyccel(create_empty_like_val_complex128_auto, language = language) + assert matching_types(f_complex128_auto(), create_empty_like_val_complex128_auto()) + f_real_complex128 = epyccel(create_empty_like_val_complex128, language = language) assert matching_types(f_real_complex128(), create_empty_like_val_complex128()) @@ -2936,48 +3180,96 @@ def create_ones_like_shape_F(n,m): def test_ones_like_dtype(language): + def create_ones_like_val_int_auto(): + from numpy import ones_like, array + arr = array([5, 1, 8, 0, 9], int) + a = ones_like(arr) + return a[0] + def create_ones_like_val_int(): from numpy import ones_like, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr, int) return a[0] + def create_ones_like_val_float_auto(): + from numpy import ones_like, array + arr = array([5, 1, 8, 0, 9], float) + a = ones_like(arr) + return a[0] + def create_ones_like_val_float(): from numpy import ones_like, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr,float) return a[0] + def create_ones_like_val_complex_auto(): + from numpy import ones_like, array + arr = array([5, 1, 8, 0, 9], complex) + a = ones_like(arr) + return a[0] + def create_ones_like_val_complex(): from numpy import ones_like, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr, complex) return a[0] + def create_ones_like_val_int32_auto(): + from numpy import ones_like, int32, array + arr = array([5, 1, 8, 0, 9], int32) + a = ones_like(arr) + return a[0] + def create_ones_like_val_int32(): from numpy import ones_like, int32, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr,int32) return a[0] + def create_ones_like_val_float32_auto(): + from numpy import ones_like, float32, array + arr = array([5, 1, 8, 0, 9], float32) + a = ones_like(arr) + return a[0] + def create_ones_like_val_float32(): from numpy import ones_like, float32, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr, float32) return a[0] + def create_ones_like_val_float64_auto(): + from numpy import ones_like, float64, array + arr = array([5, 1, 8, 0, 9], float64) + a = ones_like(arr) + return a[0] + def create_ones_like_val_float64(): from numpy import ones_like, float64, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr, float64) return a[0] + def create_ones_like_val_complex64_auto(): + from numpy import ones_like, complex64, array + arr = array([5, 1, 8, 0, 9], complex64) + a = ones_like(arr) + return a[0] + def create_ones_like_val_complex64(): from numpy import ones_like, complex64, array arr = array([5, 1, 8, 0, 9]) a = ones_like(arr, complex64) return a[0] + def create_ones_like_val_complex128_auto(): + from numpy import ones_like, complex128, array + arr = array([5, 1, 8, 0, 9], complex128) + a = ones_like(arr) + return a[0] + def create_ones_like_val_complex128(): from numpy import ones_like, complex128, array arr = array([5, 1, 8, 0, 9]) @@ -3017,6 +3309,38 @@ def create_ones_like_val_complex128(): assert(isclose( f_real_complex128() , create_ones_like_val_complex128(), rtol=RTOL, atol=ATOL)) assert matching_types(f_real_complex128(), create_ones_like_val_complex128()) + f_int_int_auto = epyccel(create_ones_like_val_int_auto, language = language) + assert( f_int_int_auto() == create_ones_like_val_int_auto()) + assert matching_types(f_int_int_auto(), create_ones_like_val_int_auto()) + + f_int_float_auto = epyccel(create_ones_like_val_float_auto, language = language) + assert(isclose( f_int_float_auto() , create_ones_like_val_float_auto(), rtol=RTOL, atol=ATOL)) + assert matching_types(f_int_float_auto(), create_ones_like_val_float_auto()) + + f_int_complex_auto = epyccel(create_ones_like_val_complex_auto, language = language) + assert(isclose( f_int_complex_auto() , create_ones_like_val_complex_auto(), rtol=RTOL, atol=ATOL)) + assert matching_types(f_int_complex_auto(), create_ones_like_val_complex_auto()) + + f_real_int32_auto = epyccel(create_ones_like_val_int32_auto, language = language) + assert( f_real_int32_auto() == create_ones_like_val_int32_auto()) + assert matching_types(f_real_int32_auto(), create_ones_like_val_int32_auto()) + + f_real_float32_auto = epyccel(create_ones_like_val_float32_auto, language = language) + assert(isclose( f_real_float32_auto() , create_ones_like_val_float32_auto(), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_float32_auto(), create_ones_like_val_float32_auto()) + + f_real_float64_auto = epyccel(create_ones_like_val_float64_auto, language = language) + assert(isclose( f_real_float64_auto() , create_ones_like_val_float64_auto(), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_float64_auto(), create_ones_like_val_float64_auto()) + + f_real_complex64_auto = epyccel(create_ones_like_val_complex64_auto, language = language) + assert(isclose( f_real_complex64_auto() , create_ones_like_val_complex64_auto(), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_complex64_auto(), create_ones_like_val_complex64_auto()) + + f_real_complex128_auto = epyccel(create_ones_like_val_complex128_auto, language = language) + assert(isclose( f_real_complex128_auto() , create_ones_like_val_complex128_auto(), rtol=RTOL, atol=ATOL)) + assert matching_types(f_real_complex128_auto(), create_ones_like_val_complex128_auto()) + def test_ones_like_combined_args(language): def create_ones_like_1_shape(): @@ -3207,6 +3531,81 @@ def create_zeros_like_val_complex128(): assert(isclose( f_real_complex128() , create_zeros_like_val_complex128(), rtol=RTOL, atol=ATOL)) assert matching_types(f_real_complex128(), create_zeros_like_val_complex128()) +def test_zeros_like_dtype_auto(language): + + def create_zeros_like_val_int_auto(): + from numpy import zeros_like, array + arr = array([5, 1, 8, 0, 9], dtype=int) + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_float_auto(): + from numpy import zeros_like, array + arr = array([5, 1, 8, 0, 9], dtype=float) + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_complex_auto(): + from numpy import zeros_like, array + arr = array([5, 1, 8, 0, 9], dtype=complex) + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_int32_auto(): + from numpy import zeros_like, array, int32 + arr = array([5, 1, 8, 0, 9], dtype=int32) + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_float32_auto(): + from numpy import zeros_like, array, float32 + arr = array([5, 1, 8, 0, 9], dtype='float32') + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_float64_auto(): + from numpy import zeros_like, array, float64 + arr = array([5, 1, 8, 0, 9], dtype=float64) + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_complex64_auto(): + from numpy import zeros_like, array, complex64 + arr = array([5, 1, 8, 0, 9], dtype=complex64) + a = zeros_like(arr) + return a[0] + + def create_zeros_like_val_complex128_auto(): + from numpy import zeros_like, array, complex128 + arr = array([5, 1, 8, 0, 9], dtype=complex128) + a = zeros_like(arr) + return a[0] + + f_int_auto = epyccel(create_zeros_like_val_int_auto, language = language) + assert matching_types(f_int_auto(), create_zeros_like_val_int_auto()) + + f_float_auto = epyccel(create_zeros_like_val_float_auto, language = language) + assert matching_types(f_float_auto(), create_zeros_like_val_float_auto()) + + f_complex_auto = epyccel(create_zeros_like_val_complex_auto, language = language) + assert matching_types(f_complex_auto(), create_zeros_like_val_complex_auto()) + + f_int32_auto = epyccel(create_zeros_like_val_int32_auto, language = language) + assert matching_types(f_int32_auto(), create_zeros_like_val_int32_auto()) + + f_float32_auto = epyccel(create_zeros_like_val_float32_auto, language = language) + assert matching_types(f_float32_auto(), create_zeros_like_val_float32_auto()) + + f_float64_auto = epyccel(create_zeros_like_val_float64_auto, language = language) + assert matching_types(f_float64_auto(), create_zeros_like_val_float64_auto()) + + f_complex64_auto = epyccel(create_zeros_like_val_complex64_auto, language = language) + assert matching_types(f_complex64_auto(), create_zeros_like_val_complex64_auto()) + + f_complex128_auto = epyccel(create_zeros_like_val_complex128_auto, language = language) + assert matching_types(f_complex128_auto(), create_zeros_like_val_complex128_auto()) + + def test_zeros_like_combined_args(language): def create_zeros_like_1_shape(): @@ -4124,25 +4523,33 @@ def get_prod(arr): bl = randint(0, 2, size = size, dtype= bool) - integer8 = randint(min_int8, max_int8, size = size, dtype=np.int8) - integer16 = randint(min_int16, max_int16, size = size, dtype=np.int16) - integer = randint(min_int, max_int, size = size, dtype=int) - integer32 = randint(min_int32, max_int32, size = size, dtype=np.int32) - integer64 = randint(min_int64, max_int64, size = size, dtype=np.int64) + max_ok_int = int(max_int64 ** (1/5)) - fl = uniform(min_float / 2, max_float / 2, size = size) - fl32 = uniform(min_float32 / 2, max_float32 / 2, size = size) - fl32 = np.float32(fl32) - fl64 = uniform(min_float64 / 2, max_float64 / 2, size=size) + integer8 = randint(max(min_int8, -max_ok_int), min(max_ok_int, max_int8), size = size, dtype=np.int8) + integer16 = randint(max(min_int16, -max_ok_int), min(max_ok_int, max_int16), size = size, dtype=np.int16) + integer = randint(max(min_int, -max_ok_int), min(max_ok_int, max_int), size = size, dtype=int) + integer32 = randint(max(min_int32, -max_ok_int), min(max_ok_int, max_int32), size = size, dtype=np.int32) + integer64 = randint(-max_ok_int, max_ok_int, size = size, dtype=np.int64) + + fl = uniform(-((-min_float) ** (1/5)), max_float ** (1/5), size = size) + + min_ok_float32 = -((-min_float32) ** (1/5)) + min_ok_float64 = -((-min_float64) ** (1/5)) + max_ok_float32 = max_float32 ** (1/5) + max_ok_float64 = max_float64 ** (1/5) - cmplx128_from_float32 = uniform(low=-((-min_float32) ** (1/5)), - high=(max_float32 ** (1/5)), size = size) + \ - uniform(low=-((-min_float32) ** (1/5)), - high=(max_float32 ** (1/5)), size = size) * 1j - cmplx128_from_float64 = uniform(low=-((-min_float64) ** (1/5)), - high=(max_float64 ** (1/5)), size = size) + \ - uniform(low=-((-min_float64) ** (1/5)), - high=(max_float64 ** (1/5)), size = size) * 1j + fl32 = uniform(min_ok_float32, max_ok_float32, size = size) + fl32 = np.float32(fl32) + fl64 = uniform(min_ok_float64, max_ok_float64, size=size) + + cmplx128_from_float32 = uniform(low=min_ok_float32/2, + high=max_ok_float32/2, size = size) + \ + uniform(low=min_ok_float32/2, + high=max_ok_float32/2, size = size) * 1j + cmplx128_from_float64 = uniform(low=min_ok_float64/2, + high=max_ok_float64/2, size = size) + \ + uniform(low=min_ok_float64/2, + high=max_ok_float64/2, size = size) * 1j # the result of the last operation is a Python complex type which has 8 bytes in the alignment, # that's why we need to convert it to a numpy.complex64 the needed type. cmplx64 = np.complex64(cmplx128_from_float32) @@ -4161,6 +4568,17 @@ def get_prod(arr): assert np.isclose(epyccel_func(fl64), get_prod(fl64), rtol=RTOL, atol=ATOL) assert np.isclose(epyccel_func(cmplx64), get_prod(cmplx64), rtol=RTOL32, atol=ATOL32) assert np.isclose(epyccel_func(cmplx128), get_prod(cmplx128), rtol=RTOL, atol=ATOL) + assert matching_types(epyccel_func(bl), get_prod(bl)) + assert matching_types(epyccel_func(integer8), get_prod(integer8)) + assert matching_types(epyccel_func(integer16), get_prod(integer16)) + assert matching_types(epyccel_func(integer), get_prod(integer)) + assert matching_types(epyccel_func(integer32), get_prod(integer32)) + assert matching_types(epyccel_func(integer64), get_prod(integer64)) + assert matching_types(epyccel_func(fl), get_prod(fl)) + assert matching_types(epyccel_func(fl32), get_prod(fl32)) + assert matching_types(epyccel_func(fl64), get_prod(fl64)) + assert matching_types(epyccel_func(cmplx64), get_prod(cmplx64)) + assert matching_types(epyccel_func(cmplx128), get_prod(cmplx128)) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = [pytest.mark.fortran]), @@ -4198,25 +4616,33 @@ def get_prod(arr): bl = randint(0, 2, size = size, dtype= bool) - integer8 = randint(min_int8, max_int8, size = size, dtype=np.int8) - integer16 = randint(min_int16, max_int16, size = size, dtype=np.int16) - integer = randint(min_int, max_int, size = size, dtype=int) - integer32 = randint(min_int32, max_int32, size = size, dtype=np.int32) - integer64 = randint(min_int64, max_int64, size = size, dtype=np.int64) + max_ok_int = int(max_int64 ** (1/10)) - fl = uniform(min_float / 10, max_float / 10, size = size) - fl32 = uniform(min_float32 / 10, max_float32 / 10, size = size) + integer8 = randint(max(min_int8, -max_ok_int), min(max_ok_int, max_int8), size = size, dtype=np.int8) + integer16 = randint(max(min_int16, -max_ok_int), min(max_ok_int, max_int16), size = size, dtype=np.int16) + integer = randint(max(min_int, -max_ok_int), min(max_ok_int, max_int), size = size, dtype=int) + integer32 = randint(max(min_int32, -max_ok_int), min(max_ok_int, max_int32), size = size, dtype=np.int32) + integer64 = randint(-max_ok_int, max_ok_int, size = size, dtype=np.int64) + + fl = uniform(-((-min_float) ** (1/10)), max_float ** (1/10), size = size) + + min_ok_float32 = -((-min_float32) ** (1/10)) + min_ok_float64 = -((-min_float64) ** (1/10)) + max_ok_float32 = max_float32 ** (1/10) + max_ok_float64 = max_float64 ** (1/10) + + fl32 = uniform(min_ok_float32, max_ok_float32, size = size) fl32 = np.float32(fl32) - fl64 = uniform(min_float64 / 10, max_float64 / 10, size=size) - - cmplx128_from_float32 = uniform(low=-((-min_float32) ** (1/10)), - high=(max_float32 ** (1/10)), size = size) + \ - uniform(low=-((-min_float32) ** (1/10)), - high=(max_float32 ** (1/10)), size = size) * 1j - cmplx128_from_float64 = uniform(low=-((-min_float64) ** (1/10)), - high=(max_float64 ** (1/10)), size = size) + \ - uniform(low=-((-min_float64) ** (1/10)), - high=(max_float64 ** (1/10)), size = size) * 1j + fl64 = uniform(min_ok_float64, max_ok_float64, size=size) + + cmplx128_from_float32 = uniform(low=min_ok_float32/2, + high=max_ok_float32/2, size = size) + \ + uniform(low=min_ok_float32/2, + high=max_ok_float32/2, size = size) * 1j + cmplx128_from_float64 = uniform(low=min_ok_float64/2, + high=max_ok_float64/2, size = size) + \ + uniform(low=min_ok_float64/2, + high=max_ok_float64/2, size = size) * 1j # the result of the last operation is a Python complex type which has 8 bytes in the alignment, # that's why we need to convert it to a numpy.complex64 the needed type. cmplx64 = np.complex64(cmplx128_from_float32) @@ -4895,7 +5321,7 @@ def test_numpy_where_array_like_1d_with_condition(language): @types('float64[:]') def get_chosen_elements(arr): from numpy import where, shape - a = where(arr > 5, arr, arr*2) + a = where(arr > 5, arr, arr * 2) s = shape(a) return len(s), s[0], a[1], a[0] @@ -4988,7 +5414,7 @@ def test_numpy_where_array_like_2d_with_condition(language): @types('float64[:,:]') def get_chosen_elements(arr): from numpy import where, shape - a = where(arr < 0, arr, arr+1) + a = where(arr < 0, arr, arr + 1) s = shape(a) return len(s), s[0], a[0,0], a[0,1], a[1,0], a[1,1] @@ -5139,7 +5565,7 @@ def test_linspace_type2(start, end, result): for i in range(len(x)): result[i] = x[i] - integer8 = randint(min_int8, max_int8, dtype=np.int8) + integer8 = randint(min_int8, max_int8 // 2, dtype=np.int8) integer16 = randint(min_int16, max_int16, dtype=np.int16) integer = randint(min_int, max_int, dtype=int) integer32 = randint(min_int32, max_int32, dtype=np.int32) @@ -5163,8 +5589,8 @@ def test_linspace_type2(start, end, result): epyccel_func_type2(0, 10, out) assert (np.allclose(x, out)) arr = np.zeros - x = randint(100, 200) - assert np.isclose(epyccel_func(integer8, x, 100), get_linspace(integer8, x, 100), rtol=RTOL, atol=ATOL) + x = randint(1, 60) + assert np.isclose(epyccel_func(integer8, x, 30), get_linspace(integer8, x, 30), rtol=RTOL, atol=ATOL) assert matching_types(epyccel_func(integer8, x, 100), get_linspace(integer8, x, 100)) x = randint(100, 200) assert np.isclose(epyccel_func(integer, x, 30), get_linspace(integer, x, 30), rtol=RTOL, atol=ATOL) diff --git a/tests/epyccel/recognised_functions/test_numpy_types.py b/tests/epyccel/recognised_functions/test_numpy_types.py index dd22e7e678..24c7e2500c 100644 --- a/tests/epyccel/recognised_functions/test_numpy_types.py +++ b/tests/epyccel/recognised_functions/test_numpy_types.py @@ -9,9 +9,130 @@ from test_numpy_funcs import max_float, min_float, max_float32, min_float32,max_float64, min_float64 from test_numpy_funcs import matching_types -from pyccel.decorators import types +from pyccel.decorators import types, template from pyccel.epyccel import epyccel +numpy_basic_types_deprecated = tuple(int(v) for v in np.version.version.split('.'))>=(1,24,0) + +def test_mult_numpy_python_type(language): + + def mult_on_array_int8(): + from numpy import ones, int8 + a = ones(5, dtype=int8) + b = a * 2 + return b[0] + + def mult_on_array_int16(): + from numpy import ones, int16 + a = ones(5, dtype=int16) + b = a * 2 + return b[0] + + def mult_on_array_int32(): + from numpy import ones, int32 + a = ones(5, dtype=int32) + b = a * 2 + return b[0] + + def mult_on_array_int64(): + from numpy import ones, int64 + a = ones(5, dtype=int64) + b = a * 2 + return b[0] + + def mult_on_array_float32(): + from numpy import ones, float32 + a = ones(5, dtype=float32) + b = a * 2 + return b[0] + + def mult_on_array_float64(): + from numpy import ones, float64 + a = ones(5, dtype=float64) + b = a * 2 + return b[0] + + epyccel_func = epyccel(mult_on_array_int8, language=language) + python_result = mult_on_array_int8() + pyccel_result = epyccel_func() + assert python_result == pyccel_result + assert matching_types(pyccel_result, python_result) + + epyccel_func = epyccel(mult_on_array_int16, language=language) + python_result = mult_on_array_int16() + pyccel_result = epyccel_func() + assert python_result == pyccel_result + assert matching_types(pyccel_result, python_result) + + epyccel_func = epyccel(mult_on_array_int32, language=language) + python_result = mult_on_array_int32() + pyccel_result = epyccel_func() + assert python_result == pyccel_result + assert matching_types(pyccel_result, python_result) + + epyccel_func = epyccel(mult_on_array_int64, language=language) + python_result = mult_on_array_int64() + pyccel_result = epyccel_func() + assert python_result == pyccel_result + assert matching_types(pyccel_result, python_result) + + epyccel_func = epyccel(mult_on_array_float32, language=language) + python_result = mult_on_array_float32() + pyccel_result = epyccel_func() + assert python_result == pyccel_result + assert matching_types(pyccel_result, python_result) + + epyccel_func = epyccel(mult_on_array_float64, language=language) + python_result = mult_on_array_float64() + pyccel_result = epyccel_func() + assert python_result == pyccel_result + assert matching_types(pyccel_result, python_result) + +def test_numpy_scalar_promotion(language): + + @types('T', 'D') + @template(name='T', types=['int32', 'int64', 'float32', 'float64', 'complex64', 'complex128']) + @template(name='D', types=['int32', 'int64', 'float32', 'float64', 'complex64', 'complex128']) + def add_numpy_to_numpy_type(np_s_l, np_s_r): + rs = np_s_l + np_s_r + return rs + + integer32 = randint(min_int32 // 2, max_int32 // 2, dtype=np.int32) + integer64 = randint(min_int64 // 2, max_int64 // 2, dtype=np.int64) + fl32 = np.float32(uniform(min_float32 / 2, max_float32 / 2)) + fl64 = np.float64(uniform(min_float64 / 2, max_float64 / 2)) + complex64 = np.complex64(uniform(min_float32 / 2, max_float32 / 2)) + complex128 = np.complex64(uniform(min_float32 / 2, max_float32 / 2)) + + epyccel_func = epyccel(add_numpy_to_numpy_type, language=language) + + pyccel_result = epyccel_func(integer32, integer64) + python_result = add_numpy_to_numpy_type(integer32, integer64) + + assert pyccel_result == python_result + assert isinstance(pyccel_result, type(python_result)) + + pyccel_result = epyccel_func(integer64, fl32) + python_result = add_numpy_to_numpy_type(integer64, fl32) + assert pyccel_result == python_result + assert isinstance(pyccel_result, type(python_result)) + + pyccel_result = epyccel_func(integer64, fl64) + python_result = add_numpy_to_numpy_type(integer64, fl64) + assert pyccel_result == python_result + assert isinstance(pyccel_result, type(python_result)) + + pyccel_result = epyccel_func(fl64, complex64) + python_result = add_numpy_to_numpy_type(fl64, complex64) + assert pyccel_result == python_result + assert isinstance(pyccel_result, type(python_result)) + + pyccel_result = epyccel_func(complex128, fl64) + python_result = add_numpy_to_numpy_type(complex128, fl64) + assert pyccel_result == python_result + assert isinstance(pyccel_result, type(python_result)) + +@pytest.mark.skipif(numpy_basic_types_deprecated, reason="Can't import bool from numpy") def test_numpy_bool_scalar(language): @types('bool') @@ -174,15 +295,21 @@ def get_int8(a): b = int8(a) return b -@pytest.mark.parametrize( 'function_boundaries', [(get_int, min_int, max_int), (get_int64, min_int64, max_int64), (get_int32, min_int32, max_int32),\ - (get_int16, min_int16, max_int16), (get_int8, min_int8, max_int8)]) +if numpy_basic_types_deprecated: + int_functions_and_boundaries = [(get_int64, min_int64, max_int64), (get_int32, min_int32, max_int32),\ + (get_int16, min_int16, max_int16), (get_int8, min_int8, max_int8)] +else: + int_functions_and_boundaries = [(get_int, min_int, max_int), (get_int64, min_int64, max_int64), (get_int32, min_int32, max_int32),\ + (get_int16, min_int16, max_int16), (get_int8, min_int8, max_int8)] + +@pytest.mark.parametrize( 'function_boundaries', int_functions_and_boundaries) def test_numpy_int_scalar(language, function_boundaries): integer8 = randint(min_int8, max_int8, dtype=np.int8) - integer16 = randint(min_int16, max_int16, dtype=np.int16) - integer = randint(min_int, max_int, dtype=int) - integer32 = randint(min_int32, max_int32, dtype=np.int32) - integer64 = randint(min_int64, max_int64, dtype=np.int64) + integer16 = randint(min_int8, max_int8, dtype=np.int16) + integer = randint(min_int8, max_int8, dtype=int) + integer32 = randint(min_int8, max_int8, dtype=np.int32) + integer64 = randint(min_int8, max_int8, dtype=np.int64) get_int = function_boundaries[0] # Modifying a global variable in a scop will change it to a local variable, so it needs to be initialized. @@ -525,7 +652,12 @@ def get_float32(a): b = float32(a) return b -@pytest.mark.parametrize( 'get_float', [get_float64, get_float32, get_float]) +if numpy_basic_types_deprecated: + float_functions = [get_float64, get_float32] +else: + float_functions = [get_float64, get_float32, get_float] + +@pytest.mark.parametrize( 'get_float', float_functions) def test_numpy_float_scalar(language, get_float): integer8 = randint(min_int8, max_int8, dtype=np.int8) @@ -534,10 +666,10 @@ def test_numpy_float_scalar(language, get_float): integer32 = randint(min_int32, max_int32, dtype=np.int32) integer64 = randint(min_int64, max_int64, dtype=np.int64) - fl = uniform(min_float / 2, max_float / 2) + fl = uniform(min_float32 / 2, max_float32 / 2) fl32 = uniform(min_float32 / 2, max_float32 / 2) fl32 = np.float32(fl32) - fl64 = uniform(min_float64 / 2, max_float64 / 2) + fl64 = uniform(min_float32 / 2, max_float32 / 2) epyccel_func = epyccel(get_float, language=language) @@ -660,10 +792,10 @@ def test_numpy_float_array_like_1d(language, get_float): integer32 = randint(min_int32, max_int32, size=size, dtype=np.int32) integer64 = randint(min_int64, max_int64, size=size, dtype=np.int64) - fl = uniform(min_float / 2, max_float / 2, size = size) + fl = uniform(min_float32 / 2, max_float32 / 2, size = size) fl32 = uniform(min_float32 / 2, max_float32 / 2, size = size) fl32 = np.float32(fl32) - fl64 = uniform(min_float64 / 2, max_float64 / 2, size = size) + fl64 = uniform(min_float32 / 2, max_float32 / 2, size = size) epyccel_func = epyccel(get_float, language=language) @@ -734,10 +866,10 @@ def test_numpy_float_array_like_2d(language, get_float): integer32 = randint(min_int32, max_int32, size=size, dtype=np.int32) integer64 = randint(min_int64, max_int64, size=size, dtype=np.int64) - fl = uniform(min_float / 2, max_float / 2, size = size) + fl = uniform(min_float32 / 2, max_float32 / 2, size = size) fl32 = uniform(min_float32 / 2, max_float32 / 2, size = size) fl32 = np.float32(fl32) - fl64 = uniform(min_float64 / 2, max_float64 / 2, size = size) + fl64 = uniform(min_float32 / 2, max_float32 / 2, size = size) epyccel_func = epyccel(get_float, language=language) @@ -1232,3 +1364,31 @@ def test_numpy_complex_array_like_2d(language, get_complex): assert epyccel_func(fl) == get_complex(fl) assert epyccel_func(fl64) == get_complex(fl64) assert epyccel_func(fl32) == get_complex(fl32) + +def test_literal_complex64(language): + def get_complex64(): + from numpy import complex64 + compl = complex64(3+4j) + return compl, compl.real, compl.imag + + epyccel_func = epyccel(get_complex64, language=language) + + pyth_res = get_complex64() + pycc_res = epyccel_func() + for pyth, pycc in zip(pyth_res, pycc_res): + assert pyth == pycc + assert isinstance(pycc, type(pyth)) + +def test_literal_complex128(language): + def get_complex128(): + from numpy import complex128 + compl = complex128(3+4j) + return compl, compl.real, compl.imag + + epyccel_func = epyccel(get_complex128, language=language) + + pyth_res = get_complex128() + pycc_res = epyccel_func() + for pyth, pycc in zip(pyth_res, pycc_res): + assert pyth == pycc + assert isinstance(pycc, type(pyth)) diff --git a/tests/epyccel/test_arrays.py b/tests/epyccel/test_arrays.py index 3879892626..d9c80f7c33 100644 --- a/tests/epyccel/test_arrays.py +++ b/tests/epyccel/test_arrays.py @@ -1,11 +1,72 @@ # pylint: disable=missing-function-docstring, missing-module-docstring/ import pytest import numpy as np +from numpy import iinfo from numpy.random import randint from pyccel.epyccel import epyccel from modules import arrays +#============================================================================== +# TEST: VERIFY ARRAY'S DTYPE CORRESPONDENCE TO THE PASSED ELEMENTS +#============================================================================== + +def test_array_assigned_dtype(language): + integer = randint(low = iinfo('int').min, high = iinfo('int').max, dtype=int) + integer8 = randint(low = iinfo('int8').min, high = iinfo('int8').max, dtype=np.int8) + integer16 = randint(low = iinfo('int16').min, high = iinfo('int16').max, dtype=np.int16) + integer32 = randint(low = iinfo('int32').min, high = iinfo('int32').max, dtype=np.int32) + integer64 = randint(low = iinfo('int64').min, high = iinfo('int64').max, dtype=np.int64) + + fl = float(integer) + fl32 = np.float32(fl) + fl64 = np.float64(fl) + + cmplx64 = np.complex64(fl32) + cmplx128 = np.complex128(fl64) + + epyccel_func = epyccel(arrays.array_return_first_element, language=language) + + f_integer_output = epyccel_func(integer, integer) + test_int_output = arrays.array_return_first_element(integer, integer) + assert isinstance(f_integer_output, type(test_int_output)) + + f_integer8_output = epyccel_func(integer8, integer8) + test_int8_output = arrays.array_return_first_element(integer8, integer8) + assert isinstance(f_integer8_output, type(test_int8_output)) + + f_integer16_output = epyccel_func(integer16, integer16) + test_int16_output = arrays.array_return_first_element(integer16, integer16) + assert isinstance(f_integer16_output, type(test_int16_output)) + + f_integer32_output = epyccel_func(integer32, integer32) + test_int32_output = arrays.array_return_first_element(integer32, integer32) + assert isinstance(f_integer32_output, type(test_int32_output)) + + f_integer64_output = epyccel_func(integer64, integer64) + test_int64_output = arrays.array_return_first_element(integer64, integer64) + assert isinstance(f_integer64_output, type(test_int64_output)) + + f_fl_output = epyccel_func(fl, fl) + test_float_output = arrays.array_return_first_element(fl, fl) + assert isinstance(f_fl_output, type(test_float_output)) + + f_fl32_output = epyccel_func(fl32, fl32) + test_float32_output = arrays.array_return_first_element(fl32, fl32) + assert isinstance(f_fl32_output, type(test_float32_output)) + + f_fl64_output = epyccel_func(fl64, fl64) + test_float64_output = arrays.array_return_first_element(fl64, fl64) + assert isinstance(f_fl64_output, type(test_float64_output)) + + f_cmplx64_output = epyccel_func(cmplx64, cmplx64) + test_cmplx64_output = arrays.array_return_first_element(cmplx64, cmplx64) + assert isinstance(f_cmplx64_output, type(test_cmplx64_output)) + + f_cmplx128_output = epyccel_func(cmplx128, cmplx128) + test_cmplx128_output = arrays.array_return_first_element(cmplx128, cmplx128) + assert isinstance(f_cmplx128_output, type(test_cmplx128_output)) + #============================================================================== # TEST: 1D ARRAYS OF INT-32 #============================================================================== @@ -873,6 +934,19 @@ def test_array_real_1d_scalar_div(language): assert np.array_equal( x1, x2 ) +def test_array_real_1d_scalar_mod(language): + f1 = arrays.array_real_1d_scalar_mod + f2 = epyccel( f1 , language = language) + + x1 = np.array( [1.,2.,3.] ) + x2 = np.copy(x1) + a = 5. + + f1(x1, a) + f2(x2, a) + + assert np.array_equal( x1, x2 ) + def test_array_real_1d_scalar_idiv(language): f1 = arrays.array_real_1d_scalar_idiv @@ -943,6 +1017,20 @@ def test_array_real_1d_div(language): assert np.array_equal( x1, x2 ) +def test_array_real_1d_mod(language): + + f1 = arrays.array_real_1d_mod + f2 = epyccel( f1 , language = language) + + x1 = np.array( [1.,2.,3.] ) + x2 = np.copy(x1) + a = np.array( [1.,2.,3.] ) + + f1(x1, a) + f2(x2, a) + + assert np.array_equal( x1, x2) + def test_array_real_1d_idiv(language): f1 = arrays.array_real_1d_idiv @@ -1017,6 +1105,20 @@ def test_array_real_2d_C_scalar_div(language): assert np.array_equal( x1, x2 ) +def test_array_real_2d_C_scalar_mod(language): + + f1 = arrays.array_real_2d_C_scalar_mod + f2 = epyccel( f1 , language = language) + + x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) + x2 = np.copy(x1) + a = 5. + + f1(x1, a) + f2(x2, a) + + assert np.array_equal( x1, x2 ) + def test_array_real_2d_C_add(language): f1 = arrays.array_real_2d_C_add @@ -1073,6 +1175,20 @@ def test_array_real_2d_C_div(language): assert np.array_equal( x1, x2 ) +def test_array_real_2d_C_mod(language): + + f1 = arrays.array_real_2d_C_mod + f2 = epyccel( f1 , language = language) + + x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) + x2 = np.copy(x1) + a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]] ) + + f1(x1, a) + f2(x2, a) + + assert np.array_equal( x1, x2 ) + def test_array_real_2d_C_array_initialization(language): f1 = arrays.array_real_2d_C_array_initialization @@ -1206,6 +1322,20 @@ def test_array_real_2d_F_scalar_div(language): assert np.array_equal( x1, x2 ) +def test_array_real_2d_F_scalar_mod(language): + + f1 = arrays.array_real_2d_F_scalar_mod + f2 = epyccel( f1 , language = language) + + x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) + x2 = np.copy(x1) + a = 5. + + f1(x1, a) + f2(x2, a) + + assert np.array_equal( x1, x2 ) + def test_array_real_2d_F_add(language): f1 = arrays.array_real_2d_F_add @@ -1262,6 +1392,20 @@ def test_array_real_2d_F_div(language): assert np.array_equal( x1, x2 ) +def test_array_real_2d_F_mod(language): + + f1 = arrays.array_real_2d_F_mod + f2 = epyccel( f1 , language = language) + + x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) + x2 = np.copy(x1) + a = np.array( [[-1.,-2.,-3.], [-4.,-5.,-6.]], order='F' ) + + f1(x1, a) + f2(x2, a) + + assert np.array_equal( x1, x2 ) + def test_array_real_2d_F_array_initialization(language): f1 = arrays.array_real_2d_F_array_initialization @@ -3823,6 +3967,8 @@ def test_arrs_2d_negative_index(language): #============================================================================== # TEST : NUMPY ARANGE #============================================================================== +RTOL = 1e-12 +ATOL = 1e-16 def test_numpy_arange_one_arg(language): f1 = arrays.arr_arange_1 @@ -3837,7 +3983,12 @@ def test_numpy_arange_two_arg(language): def test_numpy_arange_full_arg(language): f1 = arrays.arr_arange_3 f2 = epyccel(f1, language = language) - np.testing.assert_array_almost_equal(f1(), f2(), decimal=9) + + r_f1 = f1() + r_f2 = f2() + + assert (type(r_f1[1]) is type(r_f2[1])) + np.testing.assert_allclose(f1(), f2(), rtol=RTOL, atol=ATOL) def test_numpy_arange_with_dtype(language): f1 = arrays.arr_arange_4 @@ -3847,17 +3998,38 @@ def test_numpy_arange_with_dtype(language): def test_numpy_arange_negative_step(language): f1 = arrays.arr_arange_5 f2 = epyccel(f1, language = language) - np.testing.assert_array_almost_equal(f1(), f2(), decimal = 9) + + r_f1 = f1() + r_f2 = f2() + + assert (type(r_f1[1]) is type(r_f2[1])) + np.testing.assert_allclose(f1(), f2(), rtol=RTOL, atol=ATOL) def test_numpy_arange_negative_step_2(language): f1 = arrays.arr_arange_6 f2 = epyccel(f1, language = language) - np.testing.assert_array_almost_equal(f1(), f2(), decimal = 9) + + r_f1 = f1() + r_f2 = f2() + + assert (type(r_f1[1]) is type(r_f2[1])) + np.testing.assert_allclose(f1(), f2(), rtol=RTOL, atol=ATOL) + +def test_numpy_arange_into_slice(language): + f1 = arrays.arr_arange_7 + f2 = epyccel(f1, language = language) + n = randint(2, 10) + m = randint(2, 10) + x = np.array(100 * np.random.random((n, m)), dtype=int) + x_expected = x.copy() + f1(x_expected) + f2(x) + np.testing.assert_allclose(x, x_expected, rtol=RTOL, atol=ATOL) def test_iterate_slice(language): f1 = arrays.iterate_slice f2 = epyccel(f1, language = language) - i = randint(2,10) + i = randint(2, 10) assert f1(i) == f2(i) ##============================================================================== diff --git a/tests/epyccel/test_builtins.py b/tests/epyccel/test_builtins.py index cdc4419e2c..0b5790f1ab 100644 --- a/tests/epyccel/test_builtins.py +++ b/tests/epyccel/test_builtins.py @@ -7,6 +7,10 @@ from pyccel.epyccel import epyccel from pyccel.decorators import types, template +ATOL = 1e-15 +RTOL = 2e-14 + + min_int = iinfo('int').min max_int = iinfo('int').max @@ -23,9 +27,9 @@ def f1(x): negative_test = randint(min_int, 0) positive_test = randint(0, max_int) - assert f1(0) == f2(0) - assert f1(negative_test) == f2(negative_test) - assert f1(positive_test) == f2(positive_test) + assert np.isclose(f1(0), f2(0), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(negative_test), f2(negative_test), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(positive_test), f2(positive_test), rtol=RTOL, atol=ATOL) def test_abs_r(language): @types('real') @@ -37,9 +41,9 @@ def f1(x): negative_test = uniform(min_float, 0.0) positive_test = uniform(0.0, max_float) - assert f1(0.00000) == f2(0.00000) - assert f1(negative_test) == f2(negative_test) - assert f1(positive_test) == f2(positive_test) + assert np.isclose(f1(0.00000), f2(0.00000), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(negative_test), f2(negative_test), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(positive_test), f2(positive_test), rtol=RTOL, atol=ATOL) @@ -60,23 +64,14 @@ def f1(x): zero_rand = 1j*uniform(min_compl_abs, max_compl_abs) rand_zero = uniform(min_compl_abs, max_compl_abs) + 0j - assert f1(pos_pos) == f2(pos_pos) - assert f1(pos_neg) == f2(pos_neg) - assert f1(neg_pos) == f2(neg_pos) - assert f1(neg_neg) == f2(neg_neg) - assert f1(zero_rand) == f2(zero_rand) - assert f1(rand_zero) == f2(rand_zero) - assert f1(0j + 0) == f2(0j + 0) + assert np.isclose(f1(pos_pos), f2(pos_pos), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(pos_neg), f2(pos_neg), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(neg_pos), f2(neg_pos), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(neg_neg), f2(neg_neg), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(zero_rand), f2(zero_rand), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(rand_zero), f2(rand_zero), rtol=RTOL, atol=ATOL) + assert np.isclose(f1(0j + 0), f2(0j + 0), rtol=RTOL, atol=ATOL) -@pytest.mark.parametrize( 'language', ( - pytest.param("fortran", marks = pytest.mark.fortran), - pytest.param("c", marks = [ - pytest.mark.skip(reason="min not implemented in C for integers"), - pytest.mark.c] - ), - pytest.param("python", marks = pytest.mark.python) - ) -) def test_min_2_args_i(language): @types('int','int') def f(x, y): @@ -88,15 +83,6 @@ def f(x, y): assert epyc_f(*int_args) == f(*int_args) -@pytest.mark.parametrize( 'language', ( - pytest.param("fortran", marks = pytest.mark.fortran), - pytest.param("c", marks = [ - pytest.mark.skip(reason="min not implemented in C for integers"), - pytest.mark.c] - ), - pytest.param("python", marks = pytest.mark.python) - ) -) def test_min_2_args_i_adhoc(language): def f(x:int): return min(x, 0) @@ -115,7 +101,7 @@ def f(x:float): float_arg = uniform(min_float /2, max_float/2) - assert epyc_f(float_arg) == f(float_arg) + assert np.isclose(epyc_f(float_arg), f(float_arg), rtol=RTOL, atol=ATOL) def test_min_2_args_f(language): @types('float','float') @@ -126,7 +112,7 @@ def f(x, y): float_args = [uniform(min_float/2, max_float/2) for _ in range(2)] - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -149,7 +135,7 @@ def f(x, y, z): float_args = [uniform(min_float/2, max_float/2) for _ in range(3)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -172,7 +158,7 @@ def f(x, y, z): float_args = [uniform(min_float/2, max_float/2) for _ in range(3)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -195,17 +181,8 @@ def f(x, y, z): float_args = [uniform(min_float/2, max_float/2) for _ in range(3)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) -@pytest.mark.parametrize( 'language', ( - pytest.param("fortran", marks = pytest.mark.fortran), - pytest.param("c", marks = [ - pytest.mark.skip(reason="max not implemented in C for integers"), - pytest.mark.c] - ), - pytest.param("python", marks = pytest.mark.python) - ) -) def test_max_2_args_i(language): @types('int','int') def f(x, y): @@ -226,7 +203,7 @@ def f(x, y): float_args = [uniform(min_float/2, max_float/2) for _ in range(2)] - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -249,7 +226,7 @@ def f(x, y, z): float_args = [uniform(min_float/2, max_float/2) for _ in range(3)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -272,7 +249,7 @@ def f(x, y, z): float_args = [uniform(min_float/2, max_float/2) for _ in range(3)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -295,7 +272,7 @@ def f(x, y, z): float_args = [uniform(min_float/2, max_float/2) for _ in range(3)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -320,5 +297,5 @@ def f(x, y): for _ in range(2)] assert epyc_f(*int_args) == f(*int_args) - assert epyc_f(*float_args) == f(*float_args) - assert epyc_f(*complex_args) == f(*complex_args) + assert np.isclose(epyc_f(*float_args), f(*float_args), rtol=RTOL, atol=ATOL) + assert np.isclose(epyc_f(*complex_args), f(*complex_args), rtol=RTOL, atol=ATOL) diff --git a/tests/epyccel/test_default_precision_template.py b/tests/epyccel/test_default_precision_template.py new file mode 100644 index 0000000000..8e8139f6ef --- /dev/null +++ b/tests/epyccel/test_default_precision_template.py @@ -0,0 +1,31 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring + +from numpy.random import randint +from numpy import isclose +import numpy as np + +from pyccel.decorators import types +from pyccel.epyccel import epyccel + +RTOL = 1e-12 +ATOL = 1e-16 + +def test_default_precision_template(language): + + @types('int[:]') + @types('float[:]') + @types('complex[:]') + def return_array_element(array): + return array[0] + + test_types = ['int', 'float', 'complex'] + f1 = return_array_element + f2 = epyccel(f1, language=language) + for t in test_types: + d1 = randint(1, 15) + arr = np.ones(d1, dtype=t) + python_result = f1(arr) + pyccel_result = f2(arr) + + assert isinstance(pyccel_result, type(python_result)) + assert isclose(pyccel_result, python_result, rtol=RTOL, atol=ATOL) diff --git a/tests/epyccel/test_epyccel_augassign.py b/tests/epyccel/test_epyccel_augassign.py new file mode 100644 index 0000000000..c654e5971c --- /dev/null +++ b/tests/epyccel/test_epyccel_augassign.py @@ -0,0 +1,212 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import numpy as np +import modules.augassign as mod + +from pyccel.epyccel import epyccel + + +# += tests + +def test_augassign_add_1d(language): + f_int = mod.augassign_add_1d_int + f_float = mod.augassign_add_1d_float + f_complex = mod.augassign_add_1d_complex + f_int_epyc = epyccel(f_int, language = language) + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_int = np.zeros(5, dtype=int) + x1_float = np.zeros(5, dtype=float) + x1_complex = np.zeros(5, dtype=complex) + x2_int = np.zeros(5, dtype=int) + x2_float = np.zeros(5, dtype=float) + x2_complex = np.zeros(5, dtype=complex) + + y1_int = f_int(x1_int) + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_int = f_int_epyc(x2_int) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_int == y2_int and np.array_equal(x1_int, x2_int) + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + +def test_augassign_add_2d(language): + f_int = mod.augassign_add_2d_int + f_float = mod.augassign_add_2d_float + f_complex = mod.augassign_add_2d_complex + f_int_epyc = epyccel(f_int, language = language) + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_int = np.zeros((5, 5), dtype=int) + x1_float = np.zeros((5, 5), dtype=float) + x1_complex = np.zeros((5, 5), dtype=complex) + x2_int = np.zeros((5, 5), dtype=int) + x2_float = np.zeros((5, 5), dtype=float) + x2_complex = np.zeros((5, 5), dtype=complex) + + y1_int = f_int(x1_int) + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_int = f_int_epyc(x2_int) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_int == y2_int and np.array_equal(x1_int, x2_int) + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + + +# -= tests + +def test_augassign_sub_1d(language): + f_int = mod.augassign_sub_1d_int + f_float = mod.augassign_sub_1d_float + f_complex = mod.augassign_sub_1d_complex + f_int_epyc = epyccel(f_int, language = language) + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_int = np.zeros(5, dtype=int) + x1_float = np.zeros(5, dtype=float) + x1_complex = np.zeros(5, dtype=complex) + x2_int = np.zeros(5, dtype=int) + x2_float = np.zeros(5, dtype=float) + x2_complex = np.zeros(5, dtype=complex) + + y1_int = f_int(x1_int) + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_int = f_int_epyc(x2_int) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_int == y2_int and np.array_equal(x1_int, x2_int) + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + +def test_augassign_sub_2d(language): + f_int = mod.augassign_sub_2d_int + f_float = mod.augassign_sub_2d_float + f_complex = mod.augassign_sub_2d_complex + f_int_epyc = epyccel(f_int, language = language) + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_int = np.zeros((5, 5), dtype=int) + x1_float = np.zeros((5, 5), dtype=float) + x1_complex = np.zeros((5, 5), dtype=complex) + x2_int = np.zeros((5, 5), dtype=int) + x2_float = np.zeros((5, 5), dtype=float) + x2_complex = np.zeros((5, 5), dtype=complex) + + y1_int = f_int(x1_int) + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_int = f_int_epyc(x2_int) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_int == y2_int and np.array_equal(x1_int, x2_int) + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + + +# *= tests + +def test_augassign_mul_1d(language): + f_int = mod.augassign_mul_1d_int + f_float = mod.augassign_mul_1d_float + f_complex = mod.augassign_mul_1d_complex + f_int_epyc = epyccel(f_int, language = language) + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_int = np.zeros(5, dtype=int) + x1_float = np.zeros(5, dtype=float) + x1_complex = np.zeros(5, dtype=complex) + x2_int = np.zeros(5, dtype=int) + x2_float = np.zeros(5, dtype=float) + x2_complex = np.zeros(5, dtype=complex) + + y1_int = f_int(x1_int) + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_int = f_int_epyc(x2_int) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_int == y2_int and np.array_equal(x1_int, x2_int) + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + +def test_augassign_mul_2d(language): + f_int = mod.augassign_mul_2d_int + f_float = mod.augassign_mul_2d_float + f_complex = mod.augassign_mul_2d_complex + f_int_epyc = epyccel(f_int, language = language) + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_int = np.zeros((5, 5), dtype=int) + x1_float = np.zeros((5, 5), dtype=float) + x1_complex = np.zeros((5, 5), dtype=complex) + x2_int = np.zeros((5, 5), dtype=int) + x2_float = np.zeros((5, 5), dtype=float) + x2_complex = np.zeros((5, 5), dtype=complex) + + y1_int = f_int(x1_int) + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_int = f_int_epyc(x2_int) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_int == y2_int and np.array_equal(x1_int, x2_int) + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + + +# /= tests + +def test_augassign_div_1d(language): + f_float = mod.augassign_div_1d_float + f_complex = mod.augassign_div_1d_complex + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_float = np.zeros(5, dtype=float) + x1_complex = np.zeros(5, dtype=complex) + x2_float = np.zeros(5, dtype=float) + x2_complex = np.zeros(5, dtype=complex) + + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) + +def test_augassign_div_2d(language): + f_float = mod.augassign_div_2d_float + f_complex = mod.augassign_div_2d_complex + f_float_epyc = epyccel(f_float, language = language) + f_complex_epyc = epyccel(f_complex, language = language) + + x1_float = np.zeros((5, 5), dtype=float) + x1_complex = np.zeros((5, 5), dtype=complex) + x2_float = np.zeros((5, 5), dtype=float) + x2_complex = np.zeros((5, 5), dtype=complex) + + y1_float = f_float(x1_float) + y1_complex = f_complex(x1_complex) + y2_float = f_float_epyc(x2_float) + y2_complex = f_complex_epyc(x2_complex) + + assert y1_float == y2_float and np.array_equal(x1_float, x2_float) + assert y1_complex == y2_complex and np.array_equal(x1_complex, x2_complex) diff --git a/tests/epyccel/test_epyccel_complex_func.py b/tests/epyccel/test_epyccel_complex_func.py index 8ad32c038b..67ea1df4c5 100644 --- a/tests/epyccel/test_epyccel_complex_func.py +++ b/tests/epyccel/test_epyccel_complex_func.py @@ -7,6 +7,9 @@ import modules.complex_func as mod from pyccel.epyccel import epyccel +ATOL = 1e-15 +RTOL = 2e-14 + @pytest.mark.parametrize("f", [ mod.create_complex_literal__int_int, mod.create_complex_literal__int_float, mod.create_complex_literal__int_complex, @@ -43,7 +46,7 @@ def test_create_complex_var__complex_float(language): a = complex(randint(100), randint(100)) b = rand()*100 - assert f_epyc(a,b) == f(a,b) + assert np.allclose(f_epyc(a,b), f(a,b), rtol=RTOL, atol=ATOL) def test_create_complex_var__complex_complex(language): f = mod.create_complex_var__complex_complex @@ -51,7 +54,7 @@ def test_create_complex_var__complex_complex(language): a = complex(randint(100), randint(100)) b = complex(randint(100), randint(100)) - assert f_epyc(a,b) == f(a,b) + assert np.allclose(f_epyc(a,b), f(a,b), rtol=RTOL, atol=ATOL) def test_create_complex__int_int(language): f = mod.create_complex__int_int @@ -72,35 +75,35 @@ def test_create_complex__float_float(language): f_epyc = epyccel(f, language = language) a = rand()*100 - assert f_epyc(a) == f(a) + assert np.allclose(f_epyc(a), f(a), rtol=RTOL, atol=ATOL) def test_create_complex_0__float_float(language): f = mod.create_complex_0__float_float f_epyc = epyccel(f, language = language) a = rand()*100 - assert f_epyc(a) == f(a) + assert np.allclose(f_epyc(a), f(a), rtol=RTOL, atol=ATOL) def test_create_complex__complex_complex(language): f = mod.create_complex__complex_complex f_epyc = epyccel(f, language = language) a = complex(randint(100), randint(100)) - assert f_epyc(a) == f(a) + assert np.allclose(f_epyc(a), f(a), rtol=RTOL, atol=ATOL) def test_cast_complex_1(language): f = mod.cast_complex_1 f_epyc = epyccel(f, language = language) a = np.complex64(complex(randint(100), randint(100))) - assert np.isclose(f_epyc(a), f(a), rtol = 1e-7, atol = 1e-8) + assert np.allclose(f_epyc(a), f(a), rtol = 1e-7, atol = 1e-8) def test_cast_complex_2(language): f = mod.cast_complex_2 f_epyc = epyccel(f, language = language) a = np.complex128(complex(randint(100), randint(100))) - assert f_epyc(a) == f(a) + assert np.allclose(f_epyc(a), f(a), rtol=RTOL, atol=ATOL) def test_cast_float_complex(language): f = mod.cast_float_complex @@ -108,4 +111,4 @@ def test_cast_float_complex(language): a = rand()*100 b = complex(randint(100), randint(100)) - assert f_epyc(a,b) == f(a,b) + assert np.allclose(f_epyc(a,b), f(a,b), rtol=RTOL, atol=ATOL) diff --git a/tests/epyccel/test_epyccel_functions.py b/tests/epyccel/test_epyccel_functions.py index 122ebddd5f..52758cd814 100644 --- a/tests/epyccel/test_epyccel_functions.py +++ b/tests/epyccel/test_epyccel_functions.py @@ -68,14 +68,23 @@ def p_func(): with pytest.raises(TypeError): c_func(unexpected_arg) -def test_func_no_args_f1(): +def test_func_no_args_f1(language): def f1(): from numpy import pi value = (2*pi)**(3/2) return value f = epyccel(f1) - assert abs(f()-f1()) < 1e-13 + assert np.isclose(f(), f1(), rtol=RTOL, atol=ATOL) + +def test_func_return_constant(language): + def f1(): + from numpy import pi + return pi + + f = epyccel(f1) + assert np.isclose(f(), f1(), rtol=RTOL, atol=ATOL) + #------------------------------------------------------------------------------ def test_decorator_f1(language): @types('int') diff --git a/tests/epyccel/test_epyccel_generators.py b/tests/epyccel/test_epyccel_generators.py index caf1bd4d7b..c81bee0a24 100644 --- a/tests/epyccel/test_epyccel_generators.py +++ b/tests/epyccel/test_epyccel_generators.py @@ -173,4 +173,4 @@ def f(a : 'float[:,:,:,:]'): f_epyc = epyccel(f, language = language) - assert f(x) == f_epyc(x) + assert f(x) == f_epyc(x) \ No newline at end of file diff --git a/tests/epyccel/test_epyccel_modules.py b/tests/epyccel/test_epyccel_modules.py index 061e6c7576..6981333e9a 100644 --- a/tests/epyccel/test_epyccel_modules.py +++ b/tests/epyccel/test_epyccel_modules.py @@ -70,7 +70,18 @@ def test_module_3(language): r = 4.5 x_expected = mod.circle_volume(r) x = modnew.circle_volume(r) - assert np.isclose( x, x_expected, rtol=1e-14, atol=1e-14 ) + assert np.isclose( x, x_expected, rtol=RTOL, atol=ATOL ) + + i = np.random.randint(4,20) + n = np.random.randint(2,8) + arr = np.array(100*np.random.random_sample(n), dtype=int) + x_expected, y_expected = mod.alias(arr, i) + x, y = modnew.alias(arr, i) + + assert np.allclose( x, x_expected, rtol=RTOL, atol=ATOL ) + assert np.allclose( y, y_expected, rtol=RTOL, atol=ATOL ) + assert x.dtype is x_expected.dtype + assert y.dtype is y_expected.dtype def test_module_4(language): import modules.Module_6 as mod diff --git a/tests/epyccel/test_epyccel_optional_args.py b/tests/epyccel/test_epyccel_optional_args.py index ab355cf3de..0b49a70d0f 100644 --- a/tests/epyccel/test_epyccel_optional_args.py +++ b/tests/epyccel/test_epyccel_optional_args.py @@ -6,6 +6,9 @@ from pyccel.epyccel import epyccel from pyccel.decorators import types +RTOL = 2e-14 +ATOL = 1e-15 + @pytest.fixture(scope="module") def Module_5(language): import modules.Module_5 as mod @@ -40,10 +43,10 @@ def f2(x = None): f = epyccel(f2, language = language) # ... - assert f(2.0) == f2(2.0) - assert f() == f2() - assert f(None) == f2(None) - assert f(0.0) == f2(0.0) + assert np.isclose(f(2.0), f2(2.0), rtol=RTOL, atol=ATOL) + assert np.isclose(f(), f2(), rtol=RTOL, atol=ATOL) + assert np.isclose(f(None), f2(None), rtol=RTOL, atol=ATOL) + assert np.isclose(f(0.0), f2(0.0), rtol=RTOL, atol=ATOL) # ... #------------------------------------------------------------------------------ def test_f3(language): @@ -56,9 +59,9 @@ def f3(x = None): f = epyccel(f3, language = language) # ... - assert f(complex(1, 2.2)) == f3(complex(1, 2.2)) - assert f() == f3() - assert f(None) == f3(None) + assert np.isclose(f(complex(1, 2.2)), f3(complex(1, 2.2)), rtol=RTOL, atol=ATOL) + assert np.isclose(f(), f3(), rtol=RTOL, atol=ATOL) + assert np.isclose(f(None), f3(None), rtol=RTOL, atol=ATOL) # ... #------------------------------------------------------------------------------ def test_f4(language): diff --git a/tests/epyccel/test_epyccel_return_arrays.py b/tests/epyccel/test_epyccel_return_arrays.py index 3b108b936d..ffeed20fa9 100644 --- a/tests/epyccel/test_epyccel_return_arrays.py +++ b/tests/epyccel/test_epyccel_return_arrays.py @@ -228,6 +228,487 @@ def return_array(a, b): assert np.array_equal(f_cmplx128_output, test_cmplx128_output) assert f_cmplx128_output[0].dtype == test_cmplx128_output[0].dtype +def test_return_array_array_op(language): + + @types('int', 'int') + @types('int8', 'int8') + @types('int16', 'int16') + @types('int32', 'int32') + @types('int64', 'int64') + @types('float', 'float') + @types('float32', 'float32') + @types('float64', 'float64') + @types('complex64', 'complex64') + @types('complex128', 'complex128') + def return_array(a, b): + from numpy import array + x = array([a,b], dtype=type(a)) + y = array([a,b], dtype=type(a)) + return x + y + + integer8 = randint(min_int8, max_int8, dtype=np.int8) + integer16 = randint(min_int16, max_int16, dtype=np.int16) + integer = randint(min_int, max_int, dtype=int) + integer32 = randint(min_int32, max_int32, dtype=np.int32) + integer64 = randint(min_int64, max_int64, dtype=np.int64) + + fl = uniform(min_float / 2, max_float / 2) + fl32 = uniform(min_float32 / 2, max_float32 / 2) + fl32 = np.float32(fl32) + fl64 = uniform(min_float64 / 2, max_float64 / 2) + + cmplx128_from_float32 = uniform(low=min_float32 / 2, high=max_float32 / 2) + uniform(low=min_float32 / 2, high=max_float32 / 2) * 1j + # the result of the last operation is a Python complex type which has 8 bytes in the alignment, + # that's why we need to convert it to a numpy.complex64 the needed type. + cmplx64 = np.complex64(cmplx128_from_float32) + cmplx128 = np.complex128(uniform(low=min_float64 / 2, high=max_float64 / 2) + uniform(low=min_float64 / 2, high=max_float64 / 2) * 1j) + + epyccel_func = epyccel(return_array, language=language) + + f_integer_output = epyccel_func(integer, integer) + test_int_output = return_array(integer, integer) + + assert np.array_equal(f_integer_output, test_int_output) + assert f_integer_output[0].dtype == test_int_output[0].dtype + + f_integer8_output = epyccel_func(integer8, integer8) + test_int8_output = return_array(integer8, integer8) + + assert np.array_equal(f_integer8_output, test_int8_output) + assert f_integer8_output[0].dtype == test_int8_output[0].dtype + + f_integer16_output = epyccel_func(integer16, integer16) + test_int16_output = return_array(integer16, integer16) + + assert np.array_equal(f_integer16_output, test_int16_output) + assert f_integer16_output[0].dtype == test_int16_output[0].dtype + + f_integer32_output = epyccel_func(integer32, integer32) + test_int32_output = return_array(integer32, integer32) + + assert np.array_equal(f_integer32_output, test_int32_output) + assert f_integer32_output[0].dtype == test_int32_output[0].dtype + + f_integer64_output = epyccel_func(integer64, integer64) + test_int64_output = return_array(integer64, integer64) + + assert np.array_equal(f_integer64_output, test_int64_output) + assert f_integer64_output[0].dtype == test_int64_output[0].dtype + + f_fl_output = epyccel_func(fl, fl) + test_float_output = return_array(fl, fl) + + assert np.array_equal(f_fl_output, test_float_output) + assert f_fl_output[0].dtype == test_float_output[0].dtype + + f_fl32_output = epyccel_func(fl32, fl32) + test_float32_output = return_array(fl32, fl32) + + assert np.array_equal(f_fl32_output, test_float32_output) + assert f_fl32_output[0].dtype == test_float32_output[0].dtype + + f_fl64_output = epyccel_func(fl64, fl64) + test_float64_output = return_array(fl64, fl64) + + assert np.array_equal(f_fl64_output, test_float64_output) + assert f_fl64_output[0].dtype == test_float64_output[0].dtype + + f_cmplx64_output = epyccel_func(cmplx64, cmplx64) + test_cmplx64_output = return_array(cmplx64, cmplx64) + + assert np.array_equal(f_cmplx64_output, test_cmplx64_output) + assert f_cmplx64_output[0].dtype == test_cmplx64_output[0].dtype + + f_cmplx128_output = epyccel_func(cmplx128, cmplx128) + test_cmplx128_output = return_array(cmplx128, cmplx128) + + assert np.array_equal(f_cmplx128_output, test_cmplx128_output) + assert f_cmplx128_output[0].dtype == test_cmplx128_output[0].dtype + +def test_return_multi_array_array_op(language): + + @types('int', 'int') + @types('int8', 'int8') + @types('int16', 'int16') + @types('int32', 'int32') + @types('int64', 'int64') + @types('float', 'float') + @types('float32', 'float32') + @types('float64', 'float64') + @types('complex64', 'complex64') + @types('complex128', 'complex128') + def return_array(a, b): + from numpy import array + x = array([a,b], dtype=type(a)) + y = array([a,b], dtype=type(a)) + return x + y, x - y + + integer8 = randint(min_int8, max_int8, dtype=np.int8) + integer16 = randint(min_int16, max_int16, dtype=np.int16) + integer = randint(min_int, max_int, dtype=int) + integer32 = randint(min_int32, max_int32, dtype=np.int32) + integer64 = randint(min_int64, max_int64, dtype=np.int64) + + fl = uniform(min_float / 2, max_float / 2) + fl32 = uniform(min_float32 / 2, max_float32 / 2) + fl32 = np.float32(fl32) + fl64 = uniform(min_float64 / 2, max_float64 / 2) + + cmplx128_from_float32 = uniform(low=min_float32 / 2, high=max_float32 / 2) + uniform(low=min_float32 / 2, high=max_float32 / 2) * 1j + # the result of the last operation is a Python complex type which has 8 bytes in the alignment, + # that's why we need to convert it to a numpy.complex64 the needed type. + cmplx64 = np.complex64(cmplx128_from_float32) + cmplx128 = np.complex128(uniform(low=min_float64 / 2, high=max_float64 / 2) + uniform(low=min_float64 / 2, high=max_float64 / 2) * 1j) + + epyccel_func = epyccel(return_array, language=language) + + f_integer_output = epyccel_func(integer, integer) + test_int_output = return_array(integer, integer) + + assert np.array_equal(f_integer_output, test_int_output) + assert f_integer_output[0].dtype == test_int_output[0].dtype + + f_integer8_output = epyccel_func(integer8, integer8) + test_int8_output = return_array(integer8, integer8) + + assert np.array_equal(f_integer8_output, test_int8_output) + assert f_integer8_output[0].dtype == test_int8_output[0].dtype + + f_integer16_output = epyccel_func(integer16, integer16) + test_int16_output = return_array(integer16, integer16) + + assert np.array_equal(f_integer16_output, test_int16_output) + assert f_integer16_output[0].dtype == test_int16_output[0].dtype + + f_integer32_output = epyccel_func(integer32, integer32) + test_int32_output = return_array(integer32, integer32) + + assert np.array_equal(f_integer32_output, test_int32_output) + assert f_integer32_output[0].dtype == test_int32_output[0].dtype + + f_integer64_output = epyccel_func(integer64, integer64) + test_int64_output = return_array(integer64, integer64) + + assert np.array_equal(f_integer64_output, test_int64_output) + assert f_integer64_output[0].dtype == test_int64_output[0].dtype + + f_fl_output = epyccel_func(fl, fl) + test_float_output = return_array(fl, fl) + + assert np.array_equal(f_fl_output, test_float_output) + assert f_fl_output[0].dtype == test_float_output[0].dtype + + f_fl32_output = epyccel_func(fl32, fl32) + test_float32_output = return_array(fl32, fl32) + + assert np.array_equal(f_fl32_output, test_float32_output) + assert f_fl32_output[0].dtype == test_float32_output[0].dtype + + f_fl64_output = epyccel_func(fl64, fl64) + test_float64_output = return_array(fl64, fl64) + + assert np.array_equal(f_fl64_output, test_float64_output) + assert f_fl64_output[0].dtype == test_float64_output[0].dtype + + f_cmplx64_output = epyccel_func(cmplx64, cmplx64) + test_cmplx64_output = return_array(cmplx64, cmplx64) + + assert np.array_equal(f_cmplx64_output, test_cmplx64_output) + assert f_cmplx64_output[0].dtype == test_cmplx64_output[0].dtype + + f_cmplx128_output = epyccel_func(cmplx128, cmplx128) + test_cmplx128_output = return_array(cmplx128, cmplx128) + + assert np.array_equal(f_cmplx128_output, test_cmplx128_output) + assert f_cmplx128_output[0].dtype == test_cmplx128_output[0].dtype + +def test_return_array_scalar_op(language): + + @types('int8') + @types('int16') + @types('int32') + @types('int64') + @types('int') + @types('float32') + @types('float64') + @types('float') + @types('complex64') + @types('complex128') + def return_array_scalar_op(a): + from numpy import ones, int8, int16, int32, int64, float32, float64, complex64, complex128 # pylint: disable=unused-import + x = ones(5, dtype=type(a)) + return x * a + + integer8 = randint(min_int8, max_int8, dtype=np.int8) + integer16 = randint(min_int16, max_int16, dtype=np.int16) + integer = randint(min_int, max_int, dtype=int) + integer32 = randint(min_int32, max_int32, dtype=np.int32) + integer64 = randint(min_int64, max_int64, dtype=np.int64) + + fl = uniform(min_float / 2, max_float / 2) + fl32 = uniform(min_float32 / 2, max_float32 / 2) + fl32 = np.float32(fl32) + fl64 = uniform(min_float64 / 2, max_float64 / 2) + + cmplx128_from_float32 = uniform(low=min_float32 / 2, high=max_float32 / 2) + uniform(low=min_float32 / 2, high=max_float32 / 2) * 1j + # the result of the last operation is a Python complex type which has 8 bytes in the alignment, + # that's why we need to convert it to a numpy.complex64 the needed type. + cmplx64 = np.complex64(cmplx128_from_float32) + cmplx128 = np.complex128(uniform(low=min_float64 / 2, high=max_float64 / 2) + uniform(low=min_float64 / 2, high=max_float64 / 2) * 1j) + + epyccel_func = epyccel(return_array_scalar_op, language=language) + + f_integer_output = epyccel_func(integer) + test_int_output = return_array_scalar_op(integer) + + assert np.array_equal(f_integer_output, test_int_output) + assert f_integer_output[0].dtype == test_int_output[0].dtype + + f_integer8_output = epyccel_func(integer8) + test_int8_output = return_array_scalar_op(integer8) + + assert np.array_equal(f_integer8_output, test_int8_output) + assert f_integer8_output[0].dtype == test_int8_output[0].dtype + + f_integer16_output = epyccel_func(integer16) + test_int16_output = return_array_scalar_op(integer16) + + assert np.array_equal(f_integer16_output, test_int16_output) + assert f_integer16_output[0].dtype == test_int16_output[0].dtype + + f_integer32_output = epyccel_func(integer32) + test_int32_output = return_array_scalar_op(integer32) + + assert np.array_equal(f_integer32_output, test_int32_output) + assert f_integer32_output[0].dtype == test_int32_output[0].dtype + + f_integer64_output = epyccel_func(integer64) + test_int64_output = return_array_scalar_op(integer64) + + assert np.array_equal(f_integer64_output, test_int64_output) + assert f_integer64_output[0].dtype == test_int64_output[0].dtype + + f_fl_output = epyccel_func(fl) + test_float_output = return_array_scalar_op(fl) + + assert np.array_equal(f_fl_output, test_float_output) + assert f_fl_output[0].dtype == test_float_output[0].dtype + + f_fl32_output = epyccel_func(fl32) + test_float32_output = return_array_scalar_op(fl32) + + assert np.array_equal(f_fl32_output, test_float32_output) + assert f_fl32_output[0].dtype == test_float32_output[0].dtype + + f_fl64_output = epyccel_func(fl64) + test_float64_output = return_array_scalar_op(fl64) + + assert np.array_equal(f_fl64_output, test_float64_output) + assert f_fl64_output[0].dtype == test_float64_output[0].dtype + + f_cmplx64_output = epyccel_func(cmplx64) + test_cmplx64_output = return_array_scalar_op(cmplx64) + + assert np.array_equal(f_cmplx64_output, test_cmplx64_output) + assert f_cmplx64_output[0].dtype == test_cmplx64_output[0].dtype + + f_cmplx128_output = epyccel_func(cmplx128) + test_cmplx128_output = return_array_scalar_op(cmplx128) + + assert np.array_equal(f_cmplx128_output, test_cmplx128_output) + assert f_cmplx128_output[0].dtype == test_cmplx128_output[0].dtype + +def test_multi_return_array_scalar_op(language): + + @types('int8') + @types('int16') + @types('int32') + @types('int64') + @types('int') + @types('float32') + @types('float64') + @types('float') + @types('complex64') + @types('complex128') + def return_multi_array_scalar_op(a): + from numpy import ones, int8, int16, int32, int64, float32, float64, complex64, complex128 #pylint: disable=unused-import + x = ones(5, dtype=type(a)) + y = ones(5, dtype=type(a)) + return x * a, y * a + + integer8 = randint(min_int8, max_int8, dtype=np.int8) + integer16 = randint(min_int16, max_int16, dtype=np.int16) + integer = randint(min_int, max_int, dtype=int) + integer32 = randint(min_int32, max_int32, dtype=np.int32) + integer64 = randint(min_int64, max_int64, dtype=np.int64) + + fl = uniform(min_float / 2, max_float / 2) + fl32 = uniform(min_float32 / 2, max_float32 / 2) + fl32 = np.float32(fl32) + fl64 = uniform(min_float64 / 2, max_float64 / 2) + + cmplx128_from_float32 = uniform(low=min_float32 / 2, high=max_float32 / 2) + uniform(low=min_float32 / 2, high=max_float32 / 2) * 1j + # the result of the last operation is a Python complex type which has 8 bytes in the alignment, + # that's why we need to convert it to a numpy.complex64 the needed type. + cmplx64 = np.complex64(cmplx128_from_float32) + cmplx128 = np.complex128(uniform(low=min_float64 / 2, high=max_float64 / 2) + uniform(low=min_float64 / 2, high=max_float64 / 2) * 1j) + + epyccel_func = epyccel(return_multi_array_scalar_op, language=language) + + f_integer_output = epyccel_func(integer) + test_int_output = return_multi_array_scalar_op(integer) + + assert np.array_equal(f_integer_output, test_int_output) + assert f_integer_output[0].dtype == test_int_output[0].dtype + + f_integer8_output = epyccel_func(integer8) + test_int8_output = return_multi_array_scalar_op(integer8) + + assert np.array_equal(f_integer8_output, test_int8_output) + assert f_integer8_output[0].dtype == test_int8_output[0].dtype + + f_integer16_output = epyccel_func(integer16) + test_int16_output = return_multi_array_scalar_op(integer16) + + assert np.array_equal(f_integer16_output, test_int16_output) + assert f_integer16_output[0].dtype == test_int16_output[0].dtype + + f_integer32_output = epyccel_func(integer32) + test_int32_output = return_multi_array_scalar_op(integer32) + + assert np.array_equal(f_integer32_output, test_int32_output) + assert f_integer32_output[0].dtype == test_int32_output[0].dtype + + f_integer64_output = epyccel_func(integer64) + test_int64_output = return_multi_array_scalar_op(integer64) + + assert np.array_equal(f_integer64_output, test_int64_output) + assert f_integer64_output[0].dtype == test_int64_output[0].dtype + + f_fl_output = epyccel_func(fl) + test_float_output = return_multi_array_scalar_op(fl) + + assert np.array_equal(f_fl_output, test_float_output) + assert f_fl_output[0].dtype == test_float_output[0].dtype + + f_fl32_output = epyccel_func(fl32) + test_float32_output = return_multi_array_scalar_op(fl32) + + assert np.array_equal(f_fl32_output, test_float32_output) + assert f_fl32_output[0].dtype == test_float32_output[0].dtype + + f_fl64_output = epyccel_func(fl64) + test_float64_output = return_multi_array_scalar_op(fl64) + + assert np.array_equal(f_fl64_output, test_float64_output) + assert f_fl64_output[0].dtype == test_float64_output[0].dtype + + f_cmplx64_output = epyccel_func(cmplx64) + test_cmplx64_output = return_multi_array_scalar_op(cmplx64) + + assert np.array_equal(f_cmplx64_output, test_cmplx64_output) + assert f_cmplx64_output[0].dtype == test_cmplx64_output[0].dtype + + f_cmplx128_output = epyccel_func(cmplx128) + test_cmplx128_output = return_multi_array_scalar_op(cmplx128) + + assert np.array_equal(f_cmplx128_output, test_cmplx128_output) + assert f_cmplx128_output[0].dtype == test_cmplx128_output[0].dtype + +def test_multi_return_array_array_op(language): + + @types('int8[:]') + @types('int16[:]') + @types('int32[:]') + @types('int64[:]') + @types('int[:]') + @types('float32[:]') + @types('float64[:]') + @types('float[:]') + @types('complex64[:]') + @types('complex128[:]') + def return_array_arg_array_op(a): + from numpy import ones + x = ones(7) + return x * a + + arr_integer8 = np.ones(7, dtype=np.int8) + arr_integer16 = np.ones(7, dtype=np.int16) + arr_integer = np.ones(7, dtype=int) + arr_integer32 = np.ones(7, dtype=np.int32) + arr_integer64 = np.ones(7, dtype=np.int64) + + arr_fl = np.ones(7, dtype=float) + arr_fl32 = np.ones(7, dtype=np.float32) + arr_fl64 = np.ones(7, dtype=np.float64) + + # the result of the last operation is a Python complex type which has 8 bytes in the alignment, + # that's why we need to convert it to a numpy.complex64 the needed type. + cmplx64 = np.ones(7, dtype=np.float32) + np.ones(7, dtype=np.float32) * 1j + cmplx128 = np.ones(7, dtype=np.float64) + np.ones(7, dtype=np.float64) * 1j + + epyccel_func = epyccel(return_array_arg_array_op, language=language) + + f_integer_output = epyccel_func(arr_integer) + test_int_output = return_array_arg_array_op(arr_integer) + + assert np.array_equal(f_integer_output, test_int_output) + assert f_integer_output[0].dtype == test_int_output[0].dtype + + f_integer8_output = epyccel_func(arr_integer8) + test_int8_output = return_array_arg_array_op(arr_integer8) + + assert np.array_equal(f_integer8_output, test_int8_output) + assert f_integer8_output[0].dtype == test_int8_output[0].dtype + + f_integer16_output = epyccel_func(arr_integer16) + test_int16_output = return_array_arg_array_op(arr_integer16) + + assert np.array_equal(f_integer16_output, test_int16_output) + assert f_integer16_output[0].dtype == test_int16_output[0].dtype + + f_integer32_output = epyccel_func(arr_integer32) + test_int32_output = return_array_arg_array_op(arr_integer32) + + assert np.array_equal(f_integer32_output, test_int32_output) + assert f_integer32_output[0].dtype == test_int32_output[0].dtype + + f_integer64_output = epyccel_func(arr_integer64) + test_int64_output = return_array_arg_array_op(arr_integer64) + + assert np.array_equal(f_integer64_output, test_int64_output) + assert f_integer64_output[0].dtype == test_int64_output[0].dtype + + f_fl_output = epyccel_func(arr_fl) + test_float_output = return_array_arg_array_op(arr_fl) + + assert np.array_equal(f_fl_output, test_float_output) + assert f_fl_output[0].dtype == test_float_output[0].dtype + + f_fl32_output = epyccel_func(arr_fl32) + test_float32_output = return_array_arg_array_op(arr_fl32) + + assert np.array_equal(f_fl32_output, test_float32_output) + assert f_fl32_output[0].dtype == test_float32_output[0].dtype + + f_fl64_output = epyccel_func(arr_fl64) + test_float64_output = return_array_arg_array_op(arr_fl64) + + assert np.array_equal(f_fl64_output, test_float64_output) + assert f_fl64_output[0].dtype == test_float64_output[0].dtype + + f_cmplx64_output = epyccel_func(cmplx64) + test_cmplx64_output = return_array_arg_array_op(cmplx64) + + assert np.array_equal(f_cmplx64_output, test_cmplx64_output) + assert f_cmplx64_output[0].dtype == test_cmplx64_output[0].dtype + + f_cmplx128_output = epyccel_func(cmplx128) + test_cmplx128_output = return_array_arg_array_op(cmplx128) + + assert np.array_equal(f_cmplx128_output, test_cmplx128_output) + assert f_cmplx128_output[0].dtype == test_cmplx128_output[0].dtype + @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), pytest.param("c", marks = [ diff --git a/tests/epyccel/test_epyccel_sign.py b/tests/epyccel/test_epyccel_sign.py new file mode 100644 index 0000000000..5655e8554c --- /dev/null +++ b/tests/epyccel/test_epyccel_sign.py @@ -0,0 +1,310 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import numpy as np +import modules.numpy_sign as mod + +from pyccel.epyccel import epyccel + +def test_sign_complex(language): + f_nul = mod.complex_nul + f_pos = mod.complex_pos + f_neg = mod.complex_neg + f_pos_neg = mod.complex_pos_neg + f_neg_pos = mod.complex_neg_pos + f_nul_epyc = epyccel(f_nul, language = language) + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_pos_neg_epyc = epyccel(f_pos_neg, language = language) + f_neg_pos_epyc = epyccel(f_neg_pos, language = language) + + x1_nul, x2_nul = f_nul(), f_nul_epyc() + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_pos_neg, x2_pos_neg = f_pos_neg(), f_pos_neg_epyc() + x1_neg_pos, x2_neg_pos = f_neg_pos(), f_neg_pos_epyc() + + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_pos_neg == x2_pos_neg and x1_pos_neg.dtype == x2_pos_neg.dtype + assert x1_neg_pos == x2_neg_pos and x1_neg_pos.dtype == x2_neg_pos.dtype + +def test_sign_complex64(language): + f_nul = mod.complex64_nul + f_pos = mod.complex64_pos + f_neg = mod.complex64_neg + f_pos_neg = mod.complex64_pos_neg + f_neg_pos = mod.complex64_neg_pos + f_nul_epyc = epyccel(f_nul, language = language) + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_pos_neg_epyc = epyccel(f_pos_neg, language = language) + f_neg_pos_epyc = epyccel(f_neg_pos, language = language) + + x1_nul, x2_nul = f_nul(), f_nul_epyc() + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_pos_neg, x2_pos_neg = f_pos_neg(), f_pos_neg_epyc() + x1_neg_pos, x2_neg_pos = f_neg_pos(), f_neg_pos_epyc() + + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_pos_neg == x2_pos_neg and x1_pos_neg.dtype == x2_pos_neg.dtype + assert x1_neg_pos == x2_neg_pos and x1_neg_pos.dtype == x2_neg_pos.dtype + +def test_sign_complex128(language): + f_nul = mod.complex128_nul + f_pos = mod.complex128_pos + f_neg = mod.complex128_neg + f_pos_neg = mod.complex128_pos_neg + f_neg_pos = mod.complex128_neg_pos + f_nul_epyc = epyccel(f_nul, language = language) + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_pos_neg_epyc = epyccel(f_pos_neg, language = language) + f_neg_pos_epyc = epyccel(f_neg_pos, language = language) + + x1_nul, x2_nul = f_nul(), f_nul_epyc() + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_pos_neg, x2_pos_neg = f_pos_neg(), f_pos_neg_epyc() + x1_neg_pos, x2_neg_pos = f_neg_pos(), f_neg_pos_epyc() + + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_pos_neg == x2_pos_neg and x1_pos_neg.dtype == x2_pos_neg.dtype + assert x1_neg_pos == x2_neg_pos and x1_neg_pos.dtype == x2_neg_pos.dtype + +def test_sign_int16(language): + f_pos = mod.int16_pos + f_neg = mod.int16_neg + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + +def test_sign_int32(language): + f_pos = mod.int32_pos + f_neg = mod.int32_neg + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + +def test_sign_int64(language): + f_pos = mod.int64_pos + f_neg = mod.int64_neg + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + +def test_sign_float(language): + f_pos = mod.float_pos + f_neg = mod.float_neg + f_nul = mod.float_nul + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_nul_epyc = epyccel(f_nul, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_nul, x2_nul = f_nul(), f_nul_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + +def test_sign_float64(language): + f_pos = mod.float64_pos + f_neg = mod.float64_neg + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + +def test_sign_literal_complex(language): + f_pos = mod.literal_complex_pos + f_neg = mod.literal_complex_neg + f_nul = mod.literal_complex_nul_nul + f_nul_imag = mod.literal_complex_nul_imag + f_real_nul = mod.literal_complex_real_nul + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_nul_epyc = epyccel(f_nul, language = language) + f_nul_imag_epyc = epyccel(f_nul_imag, language = language) + f_real_nul_epyc = epyccel(f_real_nul, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_nul, x2_nul = f_nul(), f_nul_epyc() + x1_nul_imag, x2_nul_imag = f_nul_imag(), f_nul_imag_epyc() + x1_real_nul, x2_real_nul = f_real_nul(), f_real_nul_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + assert x1_nul_imag == x2_nul_imag and x1_nul_imag.dtype == x2_nul_imag.dtype + assert x1_real_nul == x2_real_nul and x1_real_nul.dtype == x2_real_nul.dtype + +def test_sign_literal_int(language): + f_pos = mod.literal_int_pos + f_neg = mod.literal_int_neg + f_nul = mod.literal_int_nul + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_nul_epyc = epyccel(f_nul, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_nul, x2_nul = f_nul(), f_nul_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + +def test_sign_literal_float(language): + f_pos = mod.literal_float_pos + f_neg = mod.literal_float_neg + f_nul = mod.literal_float_nul + f_pos_epyc = epyccel(f_pos, language = language) + f_neg_epyc = epyccel(f_neg, language = language) + f_nul_epyc = epyccel(f_nul, language = language) + + x1_pos, x2_pos = f_pos(), f_pos_epyc() + x1_neg, x2_neg = f_neg(), f_neg_epyc() + x1_nul, x2_nul = f_nul(), f_nul_epyc() + + assert x1_pos == x2_pos and x1_pos.dtype == x2_pos.dtype + assert x1_neg == x2_neg and x1_neg.dtype == x2_neg.dtype + assert x1_nul == x2_nul and x1_nul.dtype == x2_nul.dtype + +# Tests on arrays + +def test_sign_array_1d_int(language): + f_int8 = mod.array_1d_int8 + f_int16 = mod.array_1d_int16 + f_int32 = mod.array_1d_int32 + f_int64 = mod.array_1d_int64 + f_int8_epyc = epyccel(f_int8, language = language) + f_int16_epyc = epyccel(f_int16, language = language) + f_int32_epyc = epyccel(f_int32, language = language) + f_int64_epyc = epyccel(f_int64, language = language) + + arr8 = np.array([2, 0, -0, -13, 37, 42], dtype=np.int8) + arr16 = np.array([2, 0, -0, -13, 37, 42], dtype=np.int16) + arr32 = np.array([2, 0, -0, -13, 37, 42], dtype=np.int32) + arr64 = np.array([2, 0, -0, -13, 37, 42], dtype=np.int64) + + x_int8, y_int8 = f_int8(arr8), f_int8_epyc(arr8) + x_int16, y_int16 = f_int16(arr16), f_int16_epyc(arr16) + x_int32, y_int32 = f_int32(arr32), f_int32_epyc(arr32) + x_int64, y_int64 = f_int64(arr64), f_int64_epyc(arr64) + + assert np.array_equal(x_int8, y_int8) and x_int8.dtype == y_int8.dtype + assert np.array_equal(x_int16, y_int16) and x_int16.dtype == y_int16.dtype + assert np.array_equal(x_int32, y_int32) and x_int32.dtype == y_int32.dtype + assert np.array_equal(x_int64, y_int64) and x_int64.dtype == y_int64.dtype + +def test_sign_array_2d_int(language): + f_int8 = mod.array_2d_int8 + f_int16 = mod.array_2d_int16 + f_int32 = mod.array_2d_int32 + f_int64 = mod.array_2d_int64 + f_int8_epyc = epyccel(f_int8, language = language) + f_int16_epyc = epyccel(f_int16, language = language) + f_int32_epyc = epyccel(f_int32, language = language) + f_int64_epyc = epyccel(f_int64, language = language) + + arr8 = np.array([[2, 0], [-0, -13], [37, 42]], dtype=np.int8) + arr16 = np.array([[2, 0], [-0, -13], [37, 42]], dtype=np.int16) + arr32 = np.array([[2, 0], [-0, -13], [37, 42]], dtype=np.int32) + arr64 = np.array([[2, 0], [-0, -13], [37, 42]], dtype=np.int64) + + x_int8, y_int8 = f_int8(arr8), f_int8_epyc(arr8) + x_int16, y_int16 = f_int16(arr16), f_int16_epyc(arr16) + x_int32, y_int32 = f_int32(arr32), f_int32_epyc(arr32) + x_int64, y_int64 = f_int64(arr64), f_int64_epyc(arr64) + + assert np.array_equal(x_int8, y_int8) and x_int8.dtype == y_int8.dtype + assert np.array_equal(x_int16, y_int16) and x_int16.dtype == y_int16.dtype + assert np.array_equal(x_int32, y_int32) and x_int32.dtype == y_int32.dtype + assert np.array_equal(x_int64, y_int64) and x_int64.dtype == y_int64.dtype + +def test_sign_array_1d_float(language): + f_float32 = mod.array_1d_float32 + f_float64 = mod.array_1d_float64 + f_float32_epyc = epyccel(f_float32, language = language) + f_float64_epyc = epyccel(f_float64, language = language) + + arr32 = np.array([2., 0., -0., -1.3, 3.7, .42], dtype=np.float32) + arr64 = np.array([2., 0., -0., -1.3, 3.7, .42], dtype=np.float64) + + x_float32, y_float32 = f_float32(arr32), f_float32_epyc(arr32) + x_float64, y_float64 = f_float64(arr64), f_float64_epyc(arr64) + + assert np.array_equal(x_float32, y_float32) and x_float32.dtype == y_float32.dtype + assert np.array_equal(x_float64, y_float64) and x_float64.dtype == y_float64.dtype + +def test_sign_array_2d_float(language): + f_float32 = mod.array_2d_float32 + f_float64 = mod.array_2d_float64 + f_float32_epyc = epyccel(f_float32, language = language) + f_float64_epyc = epyccel(f_float64, language = language) + + arr32 = np.array([[2., 0.], [-0., -1.3], [3.7, .42]], dtype=np.float32) + arr64 = np.array([[2., 0.], [-0., -1.3], [3.7, .42]], dtype=np.float64) + + x_float32, y_float32 = f_float32(arr32), f_float32_epyc(arr32) + x_float64, y_float64 = f_float64(arr64), f_float64_epyc(arr64) + + assert np.array_equal(x_float32, y_float32) and x_float32.dtype == y_float32.dtype + assert np.array_equal(x_float64, y_float64) and x_float64.dtype == y_float64.dtype + +def test_sign_array_1d_complex(language): + f_complex64 = mod.array_1d_complex64 + f_complex128 = mod.array_1d_complex128 + f_complex64_epyc = epyccel(f_complex64, language = language) + f_complex128_epyc = epyccel(f_complex128, language = language) + + arr64 = np.array([0.+0j, 0.j, 1.+2.j, -1.+2.j, 1.-2.j, -1.-2.j, 2.j, -2.j], dtype=np.complex64) + arr128 = np.array([0.+0j, 0.j, 1.+2.j, -1.+2.j, 1.-2.j, -1.-2.j, 2.j, -2.j], dtype=np.complex128) + + x_complex64, y_complex64 = f_complex64(arr64), f_complex64_epyc(arr64) + x_complex128, y_complex128 = f_complex128(arr128), f_complex128_epyc(arr128) + + assert np.array_equal(x_complex64, y_complex64) and x_complex64.dtype == y_complex64.dtype + assert np.array_equal(x_complex128, y_complex128) and x_complex128.dtype == y_complex128.dtype + +def test_sign_array_2d_complex(language): + f_complex64 = mod.array_2d_complex64 + f_complex128 = mod.array_2d_complex128 + f_complex64_epyc = epyccel(f_complex64, language = language) + f_complex128_epyc = epyccel(f_complex128, language = language) + + arr64 = np.array([[0.+0j, 0.j], [1.+2.j, -1.+2.j], [1.-2.j, -1.-2.j], [2.j, -2.j]], dtype=np.complex64) + arr128 = np.array([[0.+0j, 0.j], [1.+2.j, -1.+2.j], [1.-2.j, -1.-2.j], [2.j, -2.j]], dtype=np.complex128) + + x_complex64, y_complex64 = f_complex64(arr64), f_complex64_epyc(arr64) + x_complex128, y_complex128 = f_complex128(arr128), f_complex128_epyc(arr128) + + assert np.array_equal(x_complex64, y_complex64) and x_complex64.dtype == y_complex64.dtype + assert np.array_equal(x_complex128, y_complex128) and x_complex128.dtype == y_complex128.dtype diff --git a/tests/epyccel/test_generic_functions.py b/tests/epyccel/test_generic_functions.py index 5f1bcf802b..62dba5d8e7 100644 --- a/tests/epyccel/test_generic_functions.py +++ b/tests/epyccel/test_generic_functions.py @@ -317,7 +317,7 @@ def test_mix_int_array(language): f2(x2, a) assert np.array_equal( x1, x2 ) - x1 = np.array([166,20,-5], dtype=np.int8) + x1 = np.array([126,20,-5], dtype=np.int8) x2 = np.copy(x1) f1(x1, a) f2(x2, a) @@ -418,4 +418,3 @@ def test_zeros_types(language): assert fl_1 == fl_2 assert isinstance(fl_1, type(fl_2)) - diff --git a/tests/epyccel/test_return.py b/tests/epyccel/test_return.py index 07cce2a0f3..e3482b9ffd 100644 --- a/tests/epyccel/test_return.py +++ b/tests/epyccel/test_return.py @@ -174,3 +174,24 @@ def divide_by(a : 'float[:]', b : 'float'): # pylint: disable=inconsistent-retur divide_by(x_copy,b) assert np.allclose(x, x_copy, rtol=1e-13, atol=1e-14) +def test_arg_arr_element_op(language): + def return_mult_arr_arg_element(i: 'int', arg:'float[:]'): + import numpy as np + a = np.ones(i) + return a[0] * arg[0] + def return_add_arr_arg_element(i: 'int', arg:'float[:]'): + import numpy as np + a = np.ones(i) + return a[0] + arg[0] + def return_op_arr_arg_element(i: 'int', arg:'float[:]'): + import numpy as np + a = np.ones(i) + return ((a[2] + arg[0]) * arg[2] - 2) / 4 * 2 + + arr = np.array([1,2,3,4], dtype=float) + epyc_return_mult_arr_arg_element = epyccel(return_mult_arr_arg_element, language=language, fflags="-Werror -Wunused-variable") + assert (epyc_return_mult_arr_arg_element(7, arr) == return_mult_arr_arg_element(7, arr)) + epyc_return_add_arr_arg_element = epyccel(return_add_arr_arg_element, language=language, fflags="-Werror -Wunused-variable") + assert (epyc_return_add_arr_arg_element(7, arr) == return_add_arr_arg_element(7, arr)) + epyc_return_op_arr_arg_element = epyccel(return_op_arr_arg_element, language=language, fflags="-Werror -Wunused-variable") + assert (epyc_return_op_arr_arg_element(7, arr) == return_op_arr_arg_element(7, arr)) diff --git a/tests/pyccel/scripts/array_binary_operation.py b/tests/pyccel/scripts/array_binary_operation.py new file mode 100644 index 0000000000..e52c028aa9 --- /dev/null +++ b/tests/pyccel/scripts/array_binary_operation.py @@ -0,0 +1,92 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ +import numpy as np +from pyccel.decorators import types + +@types('int', 'int') +def my_pow(n, m): + return n ** m + +def array_func_mult(): + arr = np.array([1,2,3,4]) + arr1 = arr * my_pow(2, 3) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_func_div(): + arr = np.array([1,2,3,4]) + arr1 = arr / my_pow(2, 3) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_1(): + arr = np.array([1,2,3,4]) + arr1 = np.array(arr * 2) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_2(): + arr = np.array([1,2,3,4]) + arr1 = np.array(arr / 2) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_3(): + arr = np.array([1,2,3,4]) + arr1 = np.array(arr * my_pow(2, 2)) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_4(): + arr = np.array([1,2,3,4]) + arr1 = np.array(arr / my_pow(2, 2) + arr * 2) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_5(): + arr = np.array([1,2,3,4]) + arr1 = np.where(arr > 5, arr, (arr * 2) + arr) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_6(): + arr = np.array([1,2,3,4]) + arr1 = np.where(arr < 5, arr / 2, arr * 2) + shape = np.shape(arr1) + return arr[0], arr1[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_7(): + arr = np.array([1,2,3,4]) + arr1 = np.array([4,3,2,1]) + arr2 = np.array(arr + arr1) + shape = np.shape(arr2) + return arr[0], arr2[0], len(shape), shape[0] + +def array_arithmetic_op_func_call_8(): + arr = np.array([1,2,3,4]) + arr1 = np.array([4,3,2,1]) + arr2 = np.array(arr - arr1) + shape = np.shape(arr2) + return arr[0], arr2[0], len(shape), shape[0] + + +if __name__ == "__main__": + a_0, a1_0, ls_0, s_0 = array_func_mult() + print(a_0, a1_0, ls_0, s_0) + a_1, a1_1, ls_1, s_1 = array_func_div() + print(a_1, a1_1, ls_1, s_1) + a_2, a1_2, ls_2, s_2 = array_arithmetic_op_func_call_1() + print(a_2, a1_2, ls_2, s_2) + a_3, a1_3, ls_3, s_3 = array_arithmetic_op_func_call_2() + print(a_3, a1_3, ls_3, s_3) + a_4, a1_4, ls_4, s_4 = array_arithmetic_op_func_call_3() + print(a_4, a1_4, ls_4, s_4) + a_5, a1_5, ls_5, s_5 = array_arithmetic_op_func_call_4() + print(a_5, a1_5, ls_5, s_5) + a_6, a1_6, ls_6, s_6 = array_arithmetic_op_func_call_5() + print(a_6, a1_6, ls_6, s_6) + a_7, a1_7, ls_7, s_7 = array_arithmetic_op_func_call_6() + print(a_7, a1_7, ls_7, s_7) + a_8, a1_8, ls_8, s_8 = array_arithmetic_op_func_call_7() + print(a_8, a1_8, ls_8, s_8) + a_9, a1_9, ls_9, s_9 = array_arithmetic_op_func_call_8() + print(a_9, a1_9, ls_9, s_9) diff --git a/tests/pyccel/scripts/exits/empty_exit.py b/tests/pyccel/scripts/exits/empty_exit.py new file mode 100644 index 0000000000..e1a274c1e4 --- /dev/null +++ b/tests/pyccel/scripts/exits/empty_exit.py @@ -0,0 +1,6 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import sys + +if __name__ == "__main__": + sys.exit() diff --git a/tests/pyccel/scripts/exits/negative_exit1.py b/tests/pyccel/scripts/exits/negative_exit1.py new file mode 100644 index 0000000000..e848e36738 --- /dev/null +++ b/tests/pyccel/scripts/exits/negative_exit1.py @@ -0,0 +1,6 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +from sys import exit as sys_exit + +if __name__ == "__main__": + sys_exit(-1) diff --git a/tests/pyccel/scripts/exits/negative_exit2.py b/tests/pyccel/scripts/exits/negative_exit2.py new file mode 100644 index 0000000000..096997ed50 --- /dev/null +++ b/tests/pyccel/scripts/exits/negative_exit2.py @@ -0,0 +1,6 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import sys + +if __name__ == "__main__": + sys.exit(-345) diff --git a/tests/pyccel/scripts/exits/positive_exit1.py b/tests/pyccel/scripts/exits/positive_exit1.py new file mode 100644 index 0000000000..8b13dfc8d4 --- /dev/null +++ b/tests/pyccel/scripts/exits/positive_exit1.py @@ -0,0 +1,6 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import sys + +if __name__ == "__main__": + sys.exit(1) diff --git a/tests/pyccel/scripts/exits/positive_exit2.py b/tests/pyccel/scripts/exits/positive_exit2.py new file mode 100644 index 0000000000..3d0bfff6dc --- /dev/null +++ b/tests/pyccel/scripts/exits/positive_exit2.py @@ -0,0 +1,6 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import sys + +if __name__ == "__main__": + sys.exit(1024) diff --git a/tests/pyccel/scripts/exits/positive_exit3.py b/tests/pyccel/scripts/exits/positive_exit3.py new file mode 100644 index 0000000000..b4cf7f9b27 --- /dev/null +++ b/tests/pyccel/scripts/exits/positive_exit3.py @@ -0,0 +1,7 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +from sys import exit as sys_exit + +if __name__ == "__main__": + exit_code = 2147483647 + sys_exit(exit_code) diff --git a/tests/pyccel/scripts/exits/zero_exit.py b/tests/pyccel/scripts/exits/zero_exit.py new file mode 100644 index 0000000000..d8984534c2 --- /dev/null +++ b/tests/pyccel/scripts/exits/zero_exit.py @@ -0,0 +1,6 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ + +import sys + +if __name__ == "__main__": + sys.exit(0) diff --git a/tests/pyccel/scripts/numpy/numpy_sign.py b/tests/pyccel/scripts/numpy/numpy_sign.py new file mode 100644 index 0000000000..c17c806602 --- /dev/null +++ b/tests/pyccel/scripts/numpy/numpy_sign.py @@ -0,0 +1,84 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ +import numpy as np + +if __name__ == "__main__": + print(np.sign(0)) + print(np.sign(-0)) + print(np.sign(np.int8(0))) + print(np.sign(np.int8(-0))) + print(np.sign(np.int16(0))) + print(np.sign(np.int16(-0))) + print(np.sign(np.int32(0))) + print(np.sign(np.int32(-0))) + print(np.sign(np.int64(0))) + print(np.sign(np.int64(-0))) + print(np.sign(42)) + print(np.sign(-42)) + print(np.sign(np.int8(42))) + print(np.sign(np.int8(-42))) + print(np.sign(np.int16(42))) + print(np.sign(np.int16(-42))) + print(np.sign(np.int32(42))) + print(np.sign(np.int32(-42))) + print(np.sign(np.int64(42))) + print(np.sign(np.int64(-42))) + print(np.sign(0.0)) + print(np.sign(-0.0)) + print(np.sign(np.float32(0.0))) + print(np.sign(np.float32(-0.0))) + print(np.sign(np.float64(0.0))) + print(np.sign(np.float64(-0.0))) + print(np.sign(4.2)) + print(np.sign(-4.2)) + print(np.sign(np.float32(4.2))) + print(np.sign(np.float32(-4.2))) + print(np.sign(np.float64(4.2))) + print(np.sign(np.float64(-4.2))) + print(np.sign(0-0j)) + print(np.sign(-0-0j)) + print(np.sign(np.complex64(0-0j))) + print(np.sign(np.complex64(-0-0j))) + print(np.sign(np.complex128(0-0j))) + print(np.sign(np.complex128(-0-0j))) + print(np.sign(0+0j)) + print(np.sign(-0+0j)) + print(np.sign(np.complex64(0+0j))) + print(np.sign(np.complex64(-0+0j))) + print(np.sign(np.complex128(0+0j))) + print(np.sign(np.complex128(-0+0j))) + print(np.sign(4+2j)) + print(np.sign(-4+2j)) + print(np.sign(np.complex64(4+2j))) + print(np.sign(np.complex64(-4+2j))) + print(np.sign(np.complex128(4+2j))) + print(np.sign(np.complex128(-4+2j))) + print(np.sign(0+2j)) + print(np.sign(-0+2j)) + print(np.sign(np.complex64(0+2j))) + print(np.sign(np.complex64(-0+2j))) + print(np.sign(np.complex128(0+2j))) + print(np.sign(np.complex128(-0+2j))) + print(np.sign(4+0j)) + print(np.sign(-4+0j)) + print(np.sign(np.complex64(4+0j))) + print(np.sign(np.complex64(-4+0j))) + print(np.sign(np.complex128(4+0j))) + print(np.sign(np.complex128(-4+0j))) + print(np.sign(4-2j)) + print(np.sign(-4-2j)) + print(np.sign(np.complex64(4-2j))) + print(np.sign(np.complex64(-4-2j))) + print(np.sign(np.complex128(4-2j))) + print(np.sign(np.complex128(-4-2j))) + print(np.sign(0-2j)) + print(np.sign(-0-2j)) + print(np.sign(np.complex64(0-2j))) + print(np.sign(np.complex64(-0-2j))) + print(np.sign(np.complex128(0-2j))) + print(np.sign(np.complex128(-0-2j))) + print(np.sign(4-0j)) + print(np.sign(-4-0j)) + print(np.sign(np.complex64(4-0j))) + print(np.sign(np.complex64(-4-0j))) + print(np.sign(np.complex128(4-0j))) + print(np.sign(np.complex128(-4-0j))) diff --git a/tests/pyccel/scripts/print_integers.py b/tests/pyccel/scripts/print_integers.py new file mode 100644 index 0000000000..bad1f54557 --- /dev/null +++ b/tests/pyccel/scripts/print_integers.py @@ -0,0 +1,37 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ +# ------------------------------- Strings ------------------------------------ + +from numpy import int32, int64, int16, int8 +if __name__ == '__main__': + print(0) + print(00) + print(1) + print(-1) + print(-0) + print(10000) + print(-10000) + print(2147483647) + + print(int64(2147483648)) + print(int64(9223372036854775807)) + + print(int32(0)) + print(int32(00)) + print(int32(1)) + print(int32(-1)) + print(int32(-0)) + print(int32(10000)) + print(int32(-10000)) + print(int32(2147483647)) + + print(int16(0)) + print(int16(-10)) + print(int16(1)) + print(int16(32767)) + print(int16(-32768)) + + print(int8(0)) + print(int8(-10)) + print(int8(1)) + print(int8(127)) + print(int8(-128)) diff --git a/tests/pyccel/scripts/print_tuples.py b/tests/pyccel/scripts/print_tuples.py new file mode 100644 index 0000000000..31cb6a453d --- /dev/null +++ b/tests/pyccel/scripts/print_tuples.py @@ -0,0 +1,12 @@ +# pylint: disable=missing-function-docstring, missing-module-docstring/ +# ------------------------------- Strings ------------------------------------ +if __name__ == '__main__': + print(()) + print((1,)) + print((1,2,3)) + print(((1,2),3)) + print(((1,2),(3,))) + print((((1,),2),(3,))) + print((1, True)) + print((1, True), sep=",") + print((1, True), end="!\n") diff --git a/tests/pyccel/scripts/runtest_type_print.py b/tests/pyccel/scripts/runtest_type_print.py index 121b3aa1fb..e9ef5fe840 100644 --- a/tests/pyccel/scripts/runtest_type_print.py +++ b/tests/pyccel/scripts/runtest_type_print.py @@ -2,11 +2,13 @@ import numpy as np if __name__ == '__main__': + print(type(int(3))) print(type(np.int16(3))) print(type(np.int32(3))) print(type(np.int64(3))) + print(type(float(3))) print(type(np.float32(3))) print(type(np.float64(3))) - print(type(np.complex(3))) + print(type(complex(3))) print(type(np.complex64(3))) print(type(np.complex128(3))) diff --git a/tests/pyccel/test_pyccel.py b/tests/pyccel/test_pyccel.py index a5134b8f6d..1d09d47a6b 100644 --- a/tests/pyccel/test_pyccel.py +++ b/tests/pyccel/test_pyccel.py @@ -634,6 +634,16 @@ def test_print_strings(language): types = str pyccel_test("scripts/print_strings.py", language=language, output_dtype=types) +#------------------------------------------------------------------------------ +def test_print_integers(language): + types = str + pyccel_test("scripts/print_integers.py", language=language, output_dtype=types) + +#------------------------------------------------------------------------------ +def test_print_tuples(language): + types = str + pyccel_test("scripts/print_tuples.py", language=language, output_dtype=types) + #------------------------------------------------------------------------------ def test_print_sp_and_end(language): types = str @@ -664,6 +674,19 @@ def test_return_numpy_arrays(language): types += [int]*4 # 4 ints for k pyccel_test("scripts/return_numpy_arrays.py", language=language, output_dtype=types) +#------------------------------------------------------------------------------ +def test_array_binary_op(language): + types = [int] * 4 + types += [int, float, int, int] + types += [int] * 4 + types += [int, float, int, int] + types += [int] * 4 + types += [int, float, int, int] + types += [int] * 4 + types += [int, float, int, int] + types += [int] * 8 + pyccel_test("scripts/array_binary_operation.py", language = language, output_dtype=types) + #------------------------------------------------------------------------------ @pytest.mark.parametrize( 'language', ( pytest.param("c", marks = pytest.mark.c), @@ -779,46 +802,8 @@ def test_lapack( test_file ): #------------------------------------------------------------------------------ def test_type_print( language ): - test_file = 'scripts/runtest_type_print.py' - - test_file = os.path.normpath(test_file) - - cwd = os.path.dirname(test_file) - cwd = get_abs_path(cwd) - - test_file = get_abs_path(test_file) - - pyccel_commands = "--language="+language - - if language=="python": - output_dir = get_abs_path('__pyccel__') - pyccel_commands += " --output=" + output_dir - output_test_file = os.path.join(output_dir, os.path.basename(test_file)) - else: - output_test_file = test_file - - compile_pyccel(cwd, test_file, pyccel_commands) - - lang_out = get_lang_output(output_test_file, language) - lang_out = lang_out.split('\n') - assert len(lang_out)>=5 - - if language=="python": - assert 'int16' in lang_out[0] - if sys.platform == "win32": - assert 'int' in lang_out[1] - assert 'int64' in lang_out[2] - else: - assert 'int32' in lang_out[1] - assert 'int' in lang_out[2] - assert 'float32' in lang_out[3] - assert 'float' in lang_out[4] - else: - assert 'int16' in lang_out[0] - assert 'int32' in lang_out[1] - assert 'int64' in lang_out[2] - assert 'float32' in lang_out[3] - assert 'float64' in lang_out[4] + pyccel_test("scripts/runtest_type_print.py", + language = language, output_dtype=str) @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), @@ -898,6 +883,35 @@ def test_assert(language, test_file): pyth_out = get_lang_exit_value(test_file, "python") assert (not lang_out and not pyth_out) or (lang_out and pyth_out) +#------------------------------------------------------------------------------ +@pytest.mark.parametrize( "test_file", ["scripts/exits/empty_exit.py", + "scripts/exits/negative_exit1.py", + "scripts/exits/negative_exit2.py", + "scripts/exits/positive_exit1.py", + "scripts/exits/positive_exit2.py", + "scripts/exits/positive_exit3.py", + "scripts/exits/zero_exit.py", + ] ) + +def test_exit(language, test_file): + test_dir = os.path.dirname(test_file) + test_file = get_abs_path(os.path.normpath(test_file)) + + output_dir = os.path.join(get_abs_path(test_dir),'__pyccel__') + output_test_file = os.path.join(output_dir, os.path.basename(test_file)) + + cwd = get_abs_path(test_dir) + + if not language: + language = "fortran" + pyccel_commands = " --language="+language + pyccel_commands += " --output="+ output_dir + + compile_pyccel(cwd, test_file, pyccel_commands) + lang_out = get_lang_exit_value(output_test_file, language) + pyth_out = get_lang_exit_value(test_file, "python") + assert lang_out == pyth_out + #------------------------------------------------------------------------------ @pytest.mark.parametrize( 'language', ( pytest.param("fortran", marks = pytest.mark.fortran), diff --git a/tutorial/builtin-functions.md b/tutorial/builtin-functions.md index 1f31e74a8f..ee0bb46f64 100644 --- a/tutorial/builtin-functions.md +++ b/tutorial/builtin-functions.md @@ -4,72 +4,72 @@ Python contains a limited number of builtin functions defined [here](https://doc | Function | Supported | |----------|-----------| -| **abs** | **Yes** | -| all | No | -| any | No | -| ascii | No | -| bin | No | -| **bool** | **Yes** | -| breakpoint | No | -| bytearray | No | -| bytes | No | -| callable | No | -| chr | No | -| classmethod | No | -| compile | No | -| **complex** | **Yes** | -| delattr | No | -| dict | No | -| dir | No | -| divmod | No | -| **enumerate** | **Yes** | -| eval | No | -| exec | No | -| filter | No | -| **float** | **Yes** | -| format | No | -| frozenset | No | -| getattr | No | -| globals | No | -| hasattr | No | -| hash | No | -| help | No | -| hex | No | -| id | No | -| input | No | -| **int** | **Yes** | -| isinstance | No | -| issubclass | No | -| iter | No | -| **len** | **Yes** | -| *list* | implemented as a tuple | -| locals | No | -| **map** | **Yes** | -| **max** | Fortran-only | -| memoryview | No | -| **min** | Fortran-only | -| next | No | -| object | No | -| oct | No | -| open | No | -| ord | No | -| pow | No | -| **print** | **Yes** | -| property | No | -| **range** | **Yes** | -| repr | No | -| reversed | No | -| round | No | -| set | No | -| setattr | No | -| slice | No | -| sorted | No | -| staticmethod | No | -| str | No | -| **sum** | Fortran-only | -| super | No | -| **tuple** | **Yes** | -| **type** | **Yes** | -| vars | No | -| **zip** | **Yes** | -| \_\_import\_\_ | No +| **`abs`** | **Yes** | +| `all` | No | +| `any` | No | +| `ascii` | No | +| `bin` | No | +| **`bool`** | **Yes** | +| `breakpoint` | No | +| `bytearray` | No | +| `bytes` | No | +| `callable` | No | +| `chr` | No | +| `classmethod` | No | +| `compile` | No | +| **`complex`** | **Yes** | +| `delattr` | No | +| `dict` | No | +| `dir` | No | +| `divmod` | No | +| **`enumerate`** | **Yes** | +| `eval` | No | +| `exec` | No | +| `filter` | No | +| **`float`** | **Yes** | +| `format` | No | +| `frozenset` | No | +| `getattr` | No | +| `globals` | No | +| `hasattr` | No | +| `hash` | No | +| `help` | No | +| `hex` | No | +| `id` | No | +| `input` | No | +| **`int`** | **Yes** | +| `isinstance` | No | +| `issubclass` | No | +| `iter` | No | +| **`len`** | **Yes** | +| *`list`* | implemented as a tuple | +| `locals` | No | +| **`map`** | **Yes** | +| **`max`** | Fortran-only | +| `memoryview` | No | +| **`min`** | Fortran-only | +| `next` | No | +| `object` | No | +| `oct` | No | +| `open` | No | +| `ord` | No | +| `pow` | No | +| **`print`** | **Yes** | +| `property` | No | +| **`range`** | **Yes** | +| `repr` | No | +| `reversed` | No | +| `round` | No | +| `set` | No | +| `setattr` | No | +| `slice` | No | +| `sorted` | No | +| `staticmethod` | No | +| `str` | No | +| **`sum`** | Fortran-only | +| `super` | No | +| **`tuple`** | **Yes** | +| **`type`** | **Yes** | +| `vars` | No | +| **`zip`** | **Yes** | +| \_\_`import`\_\_ | No diff --git a/tutorial/compiler.md b/tutorial/compiler.md index e61fb506fc..b12fa4ecfe 100644 --- a/tutorial/compiler.md +++ b/tutorial/compiler.md @@ -2,16 +2,16 @@ ## Compilers supported by Pyccel Pyccel provides default compiler settings for 4 different compiler families: -- **GNU** : gcc / gfortran -- **intel** : icc / ifort -- **PGI** : pgcc / pgfortran -- **nvidia** : nvc / nvfort +- **GNU** : `gcc` / `gfortran` +- **intel** : `icc` / `ifort` +- **PGI** : `pgcc` / `pgfortran` +- **nvidia** : `nvc` / `nvfort` **Warning** : The **GNU** compiler is currently the only compiler which is tested regularly ## Specifying a compiler -The default compiler family is **GNU**. To use a different compiler, the compiler family should be passed to either pyccel or epyccel. +The default compiler family is **GNU**. To use a different compiler, the compiler family should be passed to either `pyccel` or `epyccel`. E.g. ```shell pyccel example.py --compiler=intel @@ -23,30 +23,30 @@ epyccel(my_func, compiler='intel') ## User-defined compiler -The user can also define their own compiler in a json file. To use this definition, the location of the json file must be passed to the _compiler_ argument. The json file must define the following: - -- **exec** : The name of the executable -- **mpi\_exec** : The name of the mpi executable -- **language** : The language handled by this compiler -- **module\_output\_flag** : This flag is only required when the language is fortran. It specifies the flag which indicates where .mod files should be saved (e.g. '-J' for gfortran) -- **debug\_flags** : A list of flags used when compiling in debug mode \[optional\] -- **release\_flags** : A list of flags used when compiling in release mode \[optional\] -- **general\_flags** : A list of flags used when compiling in any mode \[optional\] -- **standard\_flags** : A list of flags used to impose the expected language standard \[optional\] -- **libs** : A list of libraries necessary for compiling \[optional\] -- **libdirs** : A list of library directories necessary for compiling \[optional\] -- **includes** : A list of include directories necessary for compiling \[optional\] +The user can also define their own compiler in a JSON file. To use this definition, the location of the JSON file must be passed to the _compiler_ argument. The JSON file must define the following: + +- `exec` : The name of the executable +- `mpi_exec` : The name of the MPI executable +- `language` : The language handled by this compiler +- `module_output_flag` : This flag is only required when the language is Fortran. It specifies the flag which indicates where .mod files should be saved (e.g. '-J' for `gfortran`) +- `debug_flags` : A list of flags used when compiling in debug mode \[optional\] +- `release_flags` : A list of flags used when compiling in release mode \[optional\] +- `general_flags` : A list of flags used when compiling in any mode \[optional\] +- `standard_flags` : A list of flags used to impose the expected language standard \[optional\] +- `libs` : A list of libraries necessary for compiling \[optional\] +- `libdirs` : A list of library directories necessary for compiling \[optional\] +- `includes` : A list of include directories necessary for compiling \[optional\] -In addition, for each accelerator (mpi/openmp/openacc/python) that you will use the json file must define the following: +In addition, for each accelerator (`mpi`/`openmp`/`openacc`/`python`) that you will use the JSON file must define the following: -- **flags** : A list of flags used to impose the expected language standard \[optional\] -- **libs** : A list of libraries necessary for compiling \[optional\] -- **libdirs** : A list of library directories necessary for compiling \[optional\] -- **includes** : A list of include directories necessary for compiling \[optional\] +- `flags` : A list of flags used to impose the expected language standard \[optional\] +- `libs` : A list of libraries necessary for compiling \[optional\] +- `libdirs` : A list of library directories necessary for compiling \[optional\] +- `includes` : A list of include directories necessary for compiling \[optional\] Python is considered to be an accelerator and must additionally specify shared\_suffix. -The default compilers can provide examples compatible with your system once pyccel has been executed at least. To export the json file describing your setup, use the `--export-compile-info` flag and provide a target file name. +The default compilers can provide examples compatible with your system once Pyccel has been executed at least. To export the JSON file describing your setup, use the `--export-compile-info` flag and provide a target file name. E.g. ```shell pyccel --compiler=PGI --language=c --export-compile-info=icc.json diff --git a/tutorial/const_keyword.md b/tutorial/const_keyword.md index fa58ddad1d..5a17ab99c4 100644 --- a/tutorial/const_keyword.md +++ b/tutorial/const_keyword.md @@ -1,4 +1,4 @@ -# Const keyword +# `const` keyword In order to make sure that a function argument is not modified by the function call, Pyccel provides the `const` keyword, which is converted to an equivalent datatype qualifier in the target language. Here is a simple example of its usage: @@ -28,7 +28,7 @@ int64_t func1(t_ndarray arr) /*........................................*/ ``` -The fortran equivalent: +The Fortran equivalent: ```fortran module boo @@ -57,7 +57,7 @@ module boo end module boo ``` -Now we will see what happens if we try to modify a const array: +Now we will see what happens if we try to modify a constant array: ```Python def func1(arr: 'const int[:]', i: 'int', v: 'int', z:'int'): @@ -67,7 +67,7 @@ def func1(arr: 'const int[:]', i: 'int', v: 'int', z:'int'): return 0 ``` -Pyccel will recognize that a const array cannot be changed and will raise an error similar to: +Pyccel will recognise that a constant array cannot be changed and will raise an error similar to: ```sh ERROR at annotation (semantic) stage @@ -77,10 +77,10 @@ pyccel: ## Getting Help -If you face problems with pyccel, please take the following steps: +If you face problems with Pyccel, please take the following steps: -1. Consult our documention in the tutorial directory; +1. Consult our documentation in the tutorial directory; 2. Send an email message to pyccel@googlegroups.com; 3. Open an issue on GitHub. -Thank you! \ No newline at end of file +Thank you! diff --git a/tutorial/decorators.md b/tutorial/decorators.md index c6ee77171a..cf7edb3589 100644 --- a/tutorial/decorators.md +++ b/tutorial/decorators.md @@ -117,7 +117,7 @@ positive or negative. As a result an if block must be added. This implies a (pot cost. Non-literal negative indexes are not especially common, therefore Pyccel does not add this costly if block unless it is specifically requested. This can be done using the `allow_negative_index` decorator. -An example shows how Pyccel handles negative indexes beween Python and C: +An example shows how Pyccel handles negative indexes between Python and C: ```python from pyccel.decorators import allow_negative_index @@ -209,7 +209,7 @@ end module boo ## Elemental -In Python it is often the case that a function with scalar arguments and a single scalar output (if any) is also able to accept Numpy arrays with identical rank and shape - in such a case the scalar function is simply applied element-wise to the input arrays. In order to mimic this behavior in the generated C or Fortran code, Pyccel provides the decorator `elemental`. +In Python it is often the case that a function with scalar arguments and a single scalar output (if any) is also able to accept NumPy arrays with identical rank and shape - in such a case the scalar function is simply applied element-wise to the input arrays. In order to mimic this behaviour in the generated C or Fortran code, Pyccel provides the decorator `elemental`. Important note: applying the `elemental` decorator to a function will not make a difference to the C translation of the function definition itself since C doesn't have the elementwise feature. However, Pyccel implements that functionality by calling the function in a `for` loop when an array argument is passed. In the following example, we will use the function `square` where `@elemental` will be useful: @@ -602,9 +602,9 @@ The generated C code: ## Getting Help -If you face problems with pyccel, please take the following steps: +If you face problems with Pyccel, please take the following steps: -1. Consult our documention in the tutorial directory; +1. Consult our documentation in the tutorial directory; 2. Send an email message to pyccel@googlegroups.com; 3. Open an issue on GitHub. diff --git a/tutorial/function-pointers-as-arguments.md b/tutorial/function-pointers-as-arguments.md index aab0614257..4d269fde05 100644 --- a/tutorial/function-pointers-as-arguments.md +++ b/tutorial/function-pointers-as-arguments.md @@ -155,11 +155,11 @@ module boo end module boo ``` -## Pyccel Optimization Case +## Pyccel Optimisation Case -Now, we will see a special case that is optimized by Pyccel (not optimized in C yet): +Now, we will see a special case that is optimised by Pyccel (not optimised in C yet): -In this example, Pyccel will recognize that foo doesn't change `x`, so it will automatically add `const` or `intent(in)` (depending on the language: C/Fortran) to the data type of `x`. This provides useful information for C/Fortran compilers to make optimizations to the code: +In this example, Pyccel will recognise that foo doesn't change `x`, so it will automatically add `const` or `intent(in)` (depending on the language: C/Fortran) to the data type of `x`. This provides useful information for C/Fortran compilers to make optimisations to the code: ```python def foo(x: 'int[:]', i: 'int'): @@ -245,7 +245,7 @@ if __name__ == '__main__': func1(a, b, foo) ``` -After trying to pyccelize the Python code above, here are the generated codes: +After trying to pyccelise the Python code above, here are the generated codes: The generated code of the Fortran module: @@ -433,9 +433,9 @@ end program prog_prog_boo ## Getting Help -If you face problems with pyccel, please take the following steps: +If you face problems with Pyccel, please take the following steps: -1. Consult our documention in the tutorial directory; +1. Consult our documentation in the tutorial directory; 2. Send an email message to pyccel@googlegroups.com; 3. Open an issue on GitHub. diff --git a/tutorial/header-files.md b/tutorial/header-files.md index 09ff4f9140..4791131ebd 100644 --- a/tutorial/header-files.md +++ b/tutorial/header-files.md @@ -8,8 +8,8 @@ Header files serve two purposes: - Accelerate the parsing process of an imported Python module by parsing only its header file (automatically generated) instead of the full module. ### Examples -#### Link with openmp -We create the file `header.pyh` that contains an openmp function definition: +#### Link with OpenMP +We create the file `header.pyh` that contains an OpenMP function definition: ```python #$ header metavar module_name = 'omp_lib' @@ -59,8 +59,8 @@ end module funcs ``` We then create a static library using these commands: -- gfortran -c funcs.f90 -- ar rcs libfuncs.a funcs.o +- `gfortran -c funcs.f90` +- `ar rcs libfuncs.a funcs.o` In order to use this library the user needs to create a header file, we call it `funcs_headers.pyh` ```python @@ -69,7 +69,7 @@ In order to use this library the user needs to create a header file, we call it #$ header function fib(int) results(int) ``` -After that we can create a Python file `test_funcs.py`,where we can import the fortran functions and use them +After that we can create a Python file `test_funcs.py`,where we can import the Fortran functions and use them ```python from pyccel.decorators import types @@ -82,7 +82,7 @@ def print_fib(x): To compile this file we execute the following command `pyccel test_funcs.py --libs=funcs --libdir=$PWD`, this will create the shared library `test_funcs.so` ## Pickling header files -Parsing a large Pyccel header file with hundreds of function declarations may require a significant amount of time, therefore it is important that this process is only done once when pyccelizing multiple Python source files in a large project. +Parsing a large Pyccel header file with hundreds of function declarations may require a significant amount of time, therefore it is important that this process is only done once when pyccelising multiple Python source files in a large project. To this end, Pyccel uses the [pickle](https://docs.python.org/3/library/pickle.html) Python module to store the result of the parser to a `.pyccel` binary file, which is created in the same directory as the header file. Afterwards Pyccel will load the precompiled parser from the `.pyccel` file, instead of parsing the header file again. diff --git a/tutorial/ndarrays.md b/tutorial/ndarrays.md index 2a2a602b34..08f2dc15fa 100644 --- a/tutorial/ndarrays.md +++ b/tutorial/ndarrays.md @@ -9,7 +9,7 @@ Different ndarrays can share the same data, so that changes made in one ndarray ## Pyccel ndarrays ## -Pyccel uses the same implementation as Numpy ndarrays with some rules due to the difference between the host language (Python) "dynamically typed / internal garbage collector" and the target languages such as C and Fortran which are statically typed languages and don't have a garbage collector. +Pyccel uses the same implementation as NumPy ndarrays with some rules due to the difference between the host language (Python) "dynamically typed / internal garbage collector" and the target languages such as C and Fortran which are statically typed languages and don't have a garbage collector. Below we will show some rules that Pyccel has set to handles those differences. @@ -311,18 +311,18 @@ Some examples: end program prog_ex ``` -## Numpy [ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) functions/properties progress in Pyccel ## +## NumPy [ndarray](https://numpy.org/doc/stable/reference/generated/numpy.ndarray.html) functions/properties progress in Pyccel ## - Supported [types](https://numpy.org/devdocs/user/basics.types.html): - bool, int, int8, int16, int32, int64, float, float32, float64, complex64 and complex128. They can be used as cast functions too. + `bool`, `int`, `int8`, `int16`, `int32`, `int64`, `float`, `float32`, `float64`, `complex`, `complex64` and `complex128`. They can be used as cast functions too. - Note: np.bool, np.int and np.float are just aliases to the Python native types, and are considered as a deprecated way to work with Python built-in types in NumPy. + Note: `np.bool`, `np.int`, `np.float` and `np.complex` are just aliases to the Python native types, and are considered as a deprecated way to work with Python built-in types in NumPy. - Properties: - - real, imag, shape, amax, amin + - `real`, `imag`, `shape`, `amax`, `amin` - Methods: - - sum \ No newline at end of file + - `sum` diff --git a/tutorial/numpy-functions.md b/tutorial/numpy-functions.md index 44d6916673..8899a88860 100644 --- a/tutorial/numpy-functions.md +++ b/tutorial/numpy-functions.md @@ -1,11 +1,12 @@ -# Supported Numpy function by Pyccel +# Supported NumPy function by Pyccel -In Pyccel we try to support the Numpy functions which developers use the most.. Here are some of them: +In Pyccel we try to support the NumPy functions which developers use the most.. Here are some of them: ## [norm](https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html) - Supported parameters: + ```python x: array_like Input array. If axis is None, x must be 1-D or 2-D, unless ord is None. If both axis and ord are None, the 2-norm of x.ravel will be returned. @@ -15,10 +16,11 @@ In Pyccel we try to support the Numpy functions which developers use the most.. If axis is a 2-tuple, it specifies the axes that hold 2-D matrices, and the matrix norms of these matrices are computed. If axis is None then either a vector norm (when x is 1-D) or a matrix norm (when x is 2-D) is returned. The default is None. New in version 1.8.0. + ``` - Supported languages: Fortran (2-norm) -- python code: +- Python code: ```python from numpy.linalg import norm @@ -32,7 +34,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print(nrm) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_test_norm @@ -63,9 +65,9 @@ In Pyccel we try to support the Numpy functions which developers use the most.. ## [real](https://numpy.org/doc/stable/reference/generated/numpy.real.html) and [imag](https://numpy.org/doc/stable/reference/generated/numpy.imag.html) functions -- Supported languages: C, fortran +- Supported languages: C, Fortran -- python code: +- Python code: ```python from numpy import imag, real, array @@ -75,7 +77,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print("real part for arr1: " , real_part, "\nimag part for arr1: ", imag_part) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_test_imag_real @@ -150,7 +152,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. } ``` -- python code with arrays: +- Python code with arrays: ```python from numpy import imag, real, array @@ -160,7 +162,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print("real part for arr1: " , real_part, "\nimag part for arr1: ", imag_part) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_test_imag_real @@ -239,12 +241,14 @@ In Pyccel we try to support the Numpy functions which developers use the most.. - Supported parameters: + ```python a: array_like, Input data. + ``` -- Supported languages: fortran +- Supported languages: Fortran -- python code: +- Python code: ```python from numpy import array, prod @@ -254,7 +258,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print("prd: ", prd) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_test_prod @@ -278,16 +282,18 @@ In Pyccel we try to support the Numpy functions which developers use the most.. - Supported parameters: + ```python x1: array_like Dividend array. x2: array_like, Divisor array. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). + ``` -- Supported language: fortran. +- Supported language: Fortran. -- python code: +- Python code: ```python from numpy import array, mod @@ -297,7 +303,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print("res: ", res) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_test_mod @@ -322,12 +328,14 @@ In Pyccel we try to support the Numpy functions which developers use the most.. - Supported parameters: + ```python x1, x2: array_like, Input arrays (must be 1d or 2d), scalars not allowed. + ``` -- Supported languages: fortran (1d or 2d arrays only). +- Supported languages: Fortran (1d or 2d arrays only). -- python code: +- Python code: ```python from numpy import array, matmul @@ -337,7 +345,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print("res: ", res) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_test_matmul @@ -361,10 +369,11 @@ In Pyccel we try to support the Numpy functions which developers use the most.. ## [linspace](https://numpy.org/doc/stable/reference/generated/numpy.linspace.html) -- Supported languages: C, fortran +- Supported languages: C, Fortran - Supported parameters: + ```python start, stop: array_like, num: int, optional (Default is 50) @@ -372,8 +381,9 @@ In Pyccel we try to support the Numpy functions which developers use the most.. endpoint: bool, optional (Default is True) dtype: dtype, optional + ``` -- python code: +- Python code: ```python from numpy import linspace @@ -383,7 +393,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print(x) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_prog_test @@ -441,13 +451,15 @@ In Pyccel we try to support the Numpy functions which developers use the most.. ## [Transpose](https://numpy.org/doc/stable/reference/generated/numpy.transpose.html) -- Supported languages: C, fortran +- Supported languages: C, Fortran - Supported parameters: + ```python a: array_like, + ``` -- python code: +- Python code: ```python from numpy import transpose @@ -457,7 +469,7 @@ In Pyccel we try to support the Numpy functions which developers use the most.. print(y.T) ``` -- fortran equivalent: +- Fortran equivalent: ```fortran program prog_prog_tmp @@ -545,19 +557,19 @@ In Pyccel we try to support the Numpy functions which developers use the most.. - Supported [math functions](https://numpy.org/doc/stable/reference/routines.math.html) (optional parameters are not supported): - sqrt, abs, sin, cos, exp, log, tan, arcsin, arccos, arctan, arctan2, sinh, cosh, tanh, arcsinh, arccosh and - arctanh. + `sqrt`, `abs`, `sin`, `cos`, `exp`, `log`, `tan`, `arcsin`, `arccos`, `arctan`, `arctan2`, `sinh`, `cosh`, `tanh`, `arcsinh`, `arccosh` and + `arctanh`. - Supported [array creation routines](https://numpy.org/doc/stable/reference/routines.array-creation.html) (fully supported): - - empty, full, ones, zeros, arange (`like` parameter is not supported). - - empty_like, full_like, and zeros_like, ones_like (`subok` parameter is not supported). - - rand, randint - - where, count_nonzero (fortran only) - - nonzero (fortran only, 1D only) + - `empty`, `full`, `ones`, `zeros`, `arange` (`like` parameter is not supported). + - `empty_like`, `full_like`, `zeros_like`, and `ones_like` (`subok` parameter is not supported). + - `rand`, `randint` + - `where`, `count_nonzero` (Fortran only) + - `nonzero` (Fortran only, 1D only) - others: - - amax, amin, sum, shape, size, floor + - `amax`, `amin`, `sum`, `shape`, `size`, `floor`, `sign` -If discrepancies beyond round-off error are found between [Numpy](https://numpy.org/doc/stable/reference/)'s and [Pyccel](https://github.com/pyccel/pyccel)'s results, please create an issue at and provide a small example of your problem. Do not forget to specify your target language. +If discrepancies beyond round-off error are found between [NumPy](https://numpy.org/doc/stable/reference/)'s and [Pyccel](https://github.com/pyccel/pyccel)'s results, please create an issue at and provide a small example of your problem. Do not forget to specify your target language. diff --git a/tutorial/openmp.md b/tutorial/openmp.md index c7ebc1f8a5..3a55e21633 100644 --- a/tutorial/openmp.md +++ b/tutorial/openmp.md @@ -2,9 +2,9 @@ ## Using the Runtime Library Routines -OpenMP Runtime Library Routines for Pyccel work by importing the OpenMP routine needed from the Pyccel stdlib: +OpenMP Runtime Library Routines for Pyccel work by importing the OpenMP routine needed from the `pyccel.stdlib`: -Please note that files using the OpenMP Runtime library routines will only work when compiled with pyccel (i.e. they won't work in pure python mode). +Please note that files using the OpenMP Runtime library routines will only work when compiled with Pyccel (i.e. they won't work in pure python mode). ```python from pyccel.stdlib.internal.openmp import omp_set_num_threads @@ -65,9 +65,9 @@ Other references: [*OpenMP Clauses*](https://docs.microsoft.com/en-us/cpp/parallel/openmp/reference/openmp-clauses) -### parallel Construct +### `parallel` Construct -#### Syntax of *parallel* +#### Syntax of `parallel` ```python #$ omp parallel [clause[ [,] clause] ... ] @@ -96,9 +96,9 @@ hello from thread: 0 hello from thread: 1 ``` -### loop Construct +### `loop` Construct -#### Syntax of *loop* +#### Syntax of `loop` ```python #$ omp for [nowait] [clause[ [,] clause] ... ] @@ -127,9 +127,9 @@ The output of this program is: 893116 ``` -### single Construct +### `single` Construct -#### Syntax of *single* +#### Syntax of `single` ```python #$ omp single [nowait] [clause[ [,] clause] ... ] @@ -163,9 +163,9 @@ hello from thread number: 3 hello from thread number: 0 ``` -### critical Construct +### `critical` Construct -#### Syntax of *critical* +#### Syntax of `critical` ```python #$ omp critical [(name) [ [,] hint (hint-expression)]] @@ -195,9 +195,9 @@ The output of this program is: 893116 ``` -### barrier Construct +### `barrier` Construct -#### Syntax of *barrier* +#### Syntax of `barrier` ```python #$ omp barrier @@ -233,9 +233,9 @@ The output of this program is: 1786232 ``` -### masked Construct +### `masked` Construct -#### Syntax of *masked* +#### Syntax of `masked` ```python #$ omp masked [ filter(integer-expression) ] @@ -263,16 +263,16 @@ The output of this program is: result : 1 ``` -### taskloop/atomic Construct +### `taskloop`/`atomic` Construct -#### Syntax of *taskloop* +#### Syntax of `taskloop` ```python #$ omp taskloop [clause[ [,]clause] ... ] for-loops ``` -#### Syntax of *atomic* +#### Syntax of `atomic` ```python #$ omp atomic [clause[ [,]clause] ... ] @@ -315,9 +315,9 @@ x1 : 200 x2 : 100 ``` -### simd Construct +### `simd` Construct -#### Syntax of *simd* +#### Syntax of `simd` ```python #$ omp simd [clause[ [,]clause] ... ] @@ -349,9 +349,9 @@ The output of this program is: Result: 893116 ``` -### task / taskwait Construct +### `task` / `taskwait` Construct -#### Syntax of *task* +#### Syntax of `task` ```python #$ omp task [clause[ [,]clause] ... ] @@ -359,7 +359,7 @@ structured-block #$ omp end task ``` -#### Syntax *taskwait* +#### Syntax `taskwait` ```python #$ omp taskwait @@ -368,7 +368,7 @@ structured-block #### Example The ``` #$ omp task ``` pragma is used here to define an explicit task.\ -The ``` #$ omp taskwait ``` pragma is used here to specify that the current task region remains suspended until all child tasks that it generated before the taskwait construct complete execution. +The ``` #$ omp taskwait ``` pragma is used here to specify that the current task region remains suspended until all child tasks that it generated before the `taskwait` construct complete execution. ```python @types('int', results='int') def fib(n): @@ -397,9 +397,9 @@ The output of this program is: 55 ``` -### taskyield Construct +### `taskyield` Construct -#### Syntax of *taskyield* +#### Syntax of `taskyield` ```python #$ omp taskyield @@ -407,7 +407,7 @@ The output of this program is: #### Example -The ``` #$ omp taskyield ``` pragma specifies that the current task can be suspended at this point, in favor of execution of a different task. +The ``` #$ omp taskyield ``` pragma specifies that the current task can be suspended at this point, in favour of execution of a different task. ```python #$ omp task @@ -417,9 +417,9 @@ long_function_2() #$ omp end task ``` -### flush Construct +### `flush` Construct -#### Syntax of *flush* +#### Syntax of `flush` ```python #$ omp flush @@ -454,9 +454,9 @@ Thread 1 released flag: 2 ``` -### cancel Construct +### `cancel` Construct -#### Syntax of *cancel* +#### Syntax of `cancel` ```python #$ omp cancel construct-type-clause[ [ , ] if-clause] @@ -479,9 +479,9 @@ for i in range(len(v)): #$ omp end parallel ``` -### teams/target/distribute Constructs +### `teams`/`target`/`distribute` Constructs -#### Syntax *target* +#### Syntax of `target` ```python #$ omp target [clause[ [,]clause] ... ] @@ -489,7 +489,7 @@ structured-block #$ omp end target ``` -#### Syntax of *teams* +#### Syntax of `teams` ```python #$ omp teams [clause[ [,]clause] ... ] @@ -541,9 +541,9 @@ Team num : 1 Team num : 1 ``` -### sections Construct +### `sections` Construct -#### Syntax of *sections* +#### Syntax of `sections` ```python #$ omp sections [nowait] [clause[ [,]clause] ... ] @@ -605,9 +605,9 @@ sum3 : 28, thread : 1 ## Combined Constructs Usage on Pyccel -### parallel for +### `parallel for` -#### Syntax of *parallel for* +#### Syntax of `parallel for` ```python #$ omp parallel for [clause[ [,]clause] ... ] @@ -616,7 +616,7 @@ loop-nest #### Example -The ```#$ omp parallel for``` construct specifies a parallel construct containing a worksharingloop construct with a canonical loop nest. +The ```#$ omp parallel for``` construct specifies a parallel construct containing a work sharing loop construct with a canonical loop nest. ```python import numpy as np @@ -635,9 +635,9 @@ The output of this program is : result: 28 ``` -### parallel for simd +### `parallel for simd` -#### Syntax of *parallel for simd* +#### Syntax of `parallel for simd` ```python #$ omp parallel for simd [clause[ [,]clause] ... ] @@ -646,7 +646,7 @@ loop-nest #### Example -The ```#$ omp parallel for simd``` construct specifies a parallel construct containing only one worksharing-loop SIMD construct. +The ```#$ omp parallel for simd``` construct specifies a parallel construct containing only one work sharing loop SIMD construct. ```python import numpy as np @@ -675,9 +675,9 @@ z[ 5 ] : 3 z[ 6 ] : 3 z[ 7 ] : 3 ``` -### for simd +### `for simd` -#### Syntax of *for simd* +#### Syntax of `for simd` ```python @@ -685,9 +685,9 @@ z[ 7 ] : 3 for-loops ``` -### teams distribute +### `teams distribute` -#### Syntax of *teams distribute* +#### Syntax of `teams distribute` ```python #$ omp teams distribute [clause[ [,]clause] ... ] @@ -727,27 +727,27 @@ z[ 7 ] : 3 ``` -### teams distribute simd +### `teams distribute simd` -#### Syntax of *teams distribut simd* +#### Syntax of `teams distribute simd` ```python #$ omp teams distribute simd [clause[ [,]clause] ... ] loop-nest ``` -### teams distribute parallel for +### `teams distribute parallel for` -#### Syntax of *teams distribute parallel for* +#### Syntax of `teams distribute parallel for` ```python #$ omp teams distribute parallel for [clause[ [,]clause] ... ] loop-nest ``` -### target parallel +### `target parallel` -#### Syntax of *target parallel* +#### Syntax of `target parallel` ```python #$ omp target parallel [clause[ [,]clause] ... ] @@ -755,27 +755,27 @@ structured-block #$ omp end target parallel ``` -### target parallel for +### `target parallel for` -#### Syntax of *target parallel for* +#### Syntax of `target parallel for` ```python #$ omp target parallel for [clause[ [,]clause] ... ] loop-nest ``` -### target parallel for simd +### `target parallel for simd` -#### Syntax of *target parallel for simd* +#### Syntax of `target parallel for simd` ```python #$ omp target parallel for simd [clause[ [,]clause] ... ] loop-nest ``` -### target teams +### `target teams` -#### Syntax of *target teams* +#### Syntax of `target teams` ```python #$ omp target teams [clause[ [,]clause] ... ] @@ -783,36 +783,36 @@ structured-block #$ omp end target teams ``` -### target teams distribute +### `target teams distribute` -#### Syntax of *target teams distribute* +#### Syntax of `target teams distribute` ```python #$ omp target teams distribute [clause[ [,]clause] ... ] loop-nest ``` -### target teams distribute simd +### `target teams distribute simd` -#### Syntax of *target teams distribute simd* +#### Syntax of `target teams distribute simd` ```python #$ omp target teams distribute simd [clause[ [,]clause] ... ] loop-nest ``` -### target teams distribute parallel for +### `target teams distribute parallel for` -#### Syntax of *target teams distribute parallel for* +#### Syntax of `target teams distribute parallel for` ```python #$ omp target teams distribute parallel for [clause[ [,]clause] ... ] loop-nest ``` -### target teams distribute parallel for simd +### `target teams distribute parallel for simd` -#### Syntax of *target teams distribute parallel for simd* +#### Syntax of `target teams distribute parallel for simd` ```python #$ omp target teams distribute parallel for simd [clause[ [,]clause] ... ] @@ -821,7 +821,7 @@ loop-nest #### Example -The ```#$ omp parallel for simd``` construct specifies a parallel construct containing only one worksharing-loop SIMD construct. +The ```#$ omp parallel for simd``` construct specifies a parallel construct containing only one work sharing loop SIMD construct. ```python r = 0 @@ -842,7 +842,7 @@ result: 49995000 ## Supported Constructs All constructs in the OpenMP 5.1 standard are supported except: -- scope -- workshare -- scan -- interop +- `scope` +- `workshare` +- `scan` +- `interop` diff --git a/tutorial/quickstart.md b/tutorial/quickstart.md index b329c45650..603b2027e6 100644 --- a/tutorial/quickstart.md +++ b/tutorial/quickstart.md @@ -4,10 +4,10 @@ Pyccel is a **static compiler** for Python 3, using Fortran or C as a backend la Pyccel's main goal is to resolve the principal bottleneck in scientific computing: the transition from **prototype** to **production**. Programmers usually develop their prototype code in a user-friendly interactive language like Python, but their final application requires an HPC implementation and therefore a new production code. -In most cases this is written in a statically compiled language like Fortran/C/C++, and it uses SIMD vectorization, parallel multi-threading, MPI parallelization, GPU offloading, etc. +In most cases this is written in a statically compiled language like Fortran/C/C++, and it uses SIMD vectorisation, parallel multi-threading, MPI parallelisation, GPU offloading, etc. We believe that this expensive process can be avoided, or at least drastically reduced, by using Pyccel to accelerate the most computationally intensive parts of the Python prototype. -Not only is the Pyccel-generated Fortran or C code very fast, but it is **human-readable**; hence the expert programmer can easily profile the code on the target machine and further optimize it. +Not only is the Pyccel-generated Fortran or C code very fast, but it is **human-readable**; hence the expert programmer can easily profile the code on the target machine and further optimise it. ## Some Useful Background @@ -96,7 +96,7 @@ We recommend using Python-style annotations, which have the syntax: ```python def fun(arg1: 'type1', arg2: 'type2', ..., argN: 'typeN') -> 'return_type': ``` -or to declare Numpy arrays +or to declare NumPy arrays ```python def fun(arg1: 'type1[:]', arg2: 'type2[:,:]', ..., argN: 'typeN[dimensions]') -> 'return_type': ``` @@ -106,10 +106,10 @@ In general string type hints must be used to provide Pyccel with information abo For scalar variables and arrays Pyccel supports the following data types: -- built-in datatypes: `bool`, `int`, `float`, `complex` -- Numpy integer types: `int8`, `int16`, `int32`, `int64` -- Numpy real types: `float32`, `float64`, `double` -- Numpy complex types: `complex64`, `complex128` +- built-in data types: `bool`, `int`, `float`, `complex` +- NumPy integer types: `int8`, `int16`, `int32`, `int64` +- NumPy real types: `float32`, `float64`, `double` +- NumPy complex types: `complex64`, `complex128` ## How to use Pyccel @@ -121,7 +121,7 @@ Detailed installation instructions are found in the [README](https://github.com/ ### Command Line Usage -After installation, the `pyccel` command will be available on a terminal app (iterm or terminal for MacOs, terminal for Linux). +After installation, the `pyccel` command will be available on a terminal app (iterm or terminal for macOS, terminal for Linux). After typing `pyccel`, the usage should be displayed on the terminal; if this is the case then the installation has succeeded. In essence the `pyccel` command translates the given Python file to a Fortran or C file, and then compiles the generated code to a Python C extension module or a simple executable. @@ -185,7 +185,7 @@ $ pyccel mod.py --language c By default Pyccel also compiles the C code into a Python C extension module named `mod..so`, which is placed in the same directory as `mod.py`. To achieve this Pyccel generates the additional files `mod_wrapper.c` (which interacts directly with the CPython API) and `setup_mod.py` (which defines the build procedure for the extension module), as well as a `build` directory. -If the command `import mod` is now given to the Python interpreter, this will import the Python C extention module `mod..so` instead of the pure Python module `mod.py`. +If the command `import mod` is now given to the Python interpreter, this will import the Python C extension module `mod..so` instead of the pure Python module `mod.py`. These are the contents of the current directory: ```bash @@ -264,7 +264,7 @@ int64_t binomial_coefficient(int64_t n, int64_t k); Let's now see a more complicated example, where the Python module `mod.py` contains a function that performs the matrix-matrix multiplication between two arrays `a` and `b`, and writes the result into the array `c`: -- The three function's arguments are 2D Numpy arrays of double-precision floating point numbers +- The three function's arguments are 2D NumPy arrays of double-precision floating point numbers - Matrices `a` and `c` use C ordering (row-major), matrix `b` uses Fortran ordering (column-major) - Since matrix `c` is modified by the function, it has `intent(inout)` in Fortran - Comments starting with `#$ omp` are translated to OpenMP pragmas @@ -380,7 +380,7 @@ end module mod ### Interactive Usage with `epyccel` -In addition to the `pyccel` command, the Pyccel library provides the `epyccel` Python function, whose name stands for "embedded Pyccel": given a pure Python function `f` with type annotations, `epyccel` returns a "pyccelized" function `f_fast` that can be used in the same Python session. +In addition to the `pyccel` command, the Pyccel library provides the `epyccel` Python function, whose name stands for "embedded Pyccel": given a pure Python function `f` with type annotations, `epyccel` returns a "pyccelised" function `f_fast` that can be used in the same Python session. For example: ```python from pyccel.epyccel import epyccel @@ -390,7 +390,7 @@ f_fast = epyccel(f) ``` In practice `epyccel` copies the contents of `f` into a temporary python file in the `__epyccel__` directory. As a result it is important that all imports are written inside the function when using `epyccel`. -Once the file has been copied, `epyccel` calls the `pyccel` command to generate a Python C extension module that contains a single pyccelized function. +Once the file has been copied, `epyccel` calls the `pyccel` command to generate a Python C extension module that contains a single pyccelised function. Then finally, it imports this function and returns it to the caller. #### Example 4: quicksort algorithm @@ -418,8 +418,8 @@ def quicksort(a: 'float[:]', lo: int, hi: int): lo = i j = hi ``` -We now import this function from an interactive IPython terminal and pyccelize it with the `epyccel` command. -We then use the two functions (original and pyccelized) to sort a random array of 100 elements. +We now import this function from an interactive IPython terminal and pyccelise it with the `epyccel` command. +We then use the two functions (original and pyccelised) to sort a random array of 100 elements. Finally we compare the timings obtained on an Intel Core 3 architecture. ```bash In [1]: from numpy.random import random @@ -441,7 +441,7 @@ In [8]: %timeit y = x.copy(); quicksort_fast(y, 0, 99) In [9]: (280 - 0.435) / (1.76 - 0.435) Out[9]: 210.99245283018868 ``` -After subtracting the amount of time required to create an array copy from the given times, we can conclude that the pyccelized function is approximately 210 times faster than the original Python function. +After subtracting the amount of time required to create an array copy from the given times, we can conclude that the pyccelised function is approximately 210 times faster than the original Python function. ## Other Features @@ -453,9 +453,9 @@ In the future we plan to support GPU programming with [CUDA](https://en.wikipedi ## Getting Help -If you face problems with pyccel, please take the following steps: +If you face problems with Pyccel, please take the following steps: -1. Consult our documention in the tutorial directory; +1. Consult our documentation in the tutorial directory; 2. Send an email message to pyccel@googlegroups.com; 3. Open an issue on GitHub. diff --git a/tutorial/templates.md b/tutorial/templates.md index b15887df88..386aa7d3f5 100644 --- a/tutorial/templates.md +++ b/tutorial/templates.md @@ -2,7 +2,7 @@ ## Template ### Templates using header comments -A **template** in pyccel, is used to allow the same function to take arguments of different types from a selection of types the user specifies. +A **template** in Pyccel, is used to allow the same function to take arguments of different types from a selection of types the user specifies. #### The usage In this example the argument **a**, could either be an integer or float, and the same for the argument **b**: ```python @@ -32,7 +32,7 @@ def f1(): pass pass ``` -In this example the arguments of **f2** can either be bool or complex, they can not be int or float: +In this example the arguments of **f2** can either be boolean or complex, they can not be integer or float: ```python #$ header template T(int|real) def f1(): @@ -53,7 +53,7 @@ def f(a,b): ``` Arguments: - name: the name of the template -- types: the types the tamplate represents. +- types: the types the template represents. --- *Note:* The arguments **name** and **types** could also be passed of the form @@ -63,9 +63,9 @@ The arguments **name** and **types** could also be passed of the form When a function is decorated with the template decorator: - The templates are only available to the decorated function. - The templates overrides any existing templates with the same name (declared as header comment). -- If the function is decorated with two templates with the same name, the first one gets overrided. +- If the function is decorated with two templates with the same name, the first one gets overridden. ##### Examples -In this example the arguments of **f** can either be bool or complex, they can not be int or float. +In this example the arguments of **f** can either be boolean or complex, they can not be integer or float. ```python from pyccel.decorators import types, template #$ header template T(int|real) From 6a11942a452a4808f245fbbe66eb4cddd7b3b4ce Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Tue, 14 Mar 2023 14:00:45 +0100 Subject: [PATCH 02/53] Fix commit --- pyccel/ast/numpyext.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pyccel/ast/numpyext.py b/pyccel/ast/numpyext.py index 81efbc0bef..e4dc4d214a 100644 --- a/pyccel/ast/numpyext.py +++ b/pyccel/ast/numpyext.py @@ -369,13 +369,6 @@ def process_dtype(dtype): ------ TypeError: In the case of unrecognized argument type. TypeError: In the case of passed string argument not recognized as valid dtype. - - Returns: - ---------- - dtype: Datatype - The Datatype corresponding to the passed dtype. - precision: int - The precision corresponding to the passed dtype. """ if isinstance(dtype, PythonType): From e52035daa0114aee60289412c3b3f4e005f63ec3 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Tue, 14 Mar 2023 14:34:33 +0100 Subject: [PATCH 03/53] pip_installation no longer exists --- .github/workflows/Github_pytest.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index f67217668a..d10b4802fd 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -118,8 +118,11 @@ jobs: uses: ./.github/actions/linux_install - name: Install python (setup-python action doesn't work with containers) uses: ./.github/actions/python_install - - name: Install python dependencies - uses: ./.github/actions/pip_installation + - name: Install Pyccel with tests + run: | + python -m pip install --upgrade pip + python -m pip install .[test] + shell: bash - name: Coverage install uses: ./.github/actions/coverage_install - name: Ccuda tests with pytest @@ -148,8 +151,11 @@ jobs: python-version: 3.7 - name: Install dependencies uses: ./.github/actions/linux_install - - name: Install python dependencies - uses: ./.github/actions/pip_installation + - name: Install Pyccel with tests + run: | + python -m pip install --upgrade pip + python -m pip install .[test] + shell: bash - name: Coverage install uses: ./.github/actions/coverage_install - name: Collect coverage information From 4f538e28f83177cbd467c3c40dffde077c30bf55 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Tue, 14 Mar 2023 14:51:59 +0100 Subject: [PATCH 04/53] Add Code owners to protect merges --- .github/CODEOWNERS | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..63440e5f3b --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,6 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# @global-owner1 and @global-owner2 will be requested for +# review when someone opens a pull request. +* @EmilyBourne @bauom + From 7dd51f37d9af661e39cb6effb79295a8c7b3088b Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Tue, 14 Mar 2023 15:36:44 +0100 Subject: [PATCH 05/53] Debug coverage problem --- .github/workflows/Github_pytest.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index d10b4802fd..b276a7a24a 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -175,6 +175,8 @@ jobs: - name: Generate coverage report run: | coverage combine + ls + coverage xml -i coverage xml - name: Run codacy-coverage-reporter uses: codacy/codacy-coverage-reporter-action@master From d1f61c54dd761d17784600c1e1b18be1fc362947 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Tue, 14 Mar 2023 16:16:25 +0100 Subject: [PATCH 06/53] Remove debugging --- .github/workflows/Github_pytest.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index b276a7a24a..d10b4802fd 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -175,8 +175,6 @@ jobs: - name: Generate coverage report run: | coverage combine - ls - coverage xml -i coverage xml - name: Run codacy-coverage-reporter uses: codacy/codacy-coverage-reporter-action@master From b8e7b8c309649ba5ba626ee2ac5f020c3a4322d4 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Wed, 15 Mar 2023 12:57:25 +0100 Subject: [PATCH 07/53] Trigger codacy From a7c3fc99d7669bb958d12963f95a314e668b87f5 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Thu, 16 Mar 2023 11:25:49 +0100 Subject: [PATCH 08/53] Trigger codacy From 79c22938ac3f1bc0e94e3c9d734988156ce69a29 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Fri, 17 Mar 2023 10:09:47 +0100 Subject: [PATCH 09/53] Correct folder --- .github/actions/pytest_run_cuda/action.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/pytest_run_cuda/action.yml b/.github/actions/pytest_run_cuda/action.yml index 59c8b5b916..52092a6e02 100644 --- a/.github/actions/pytest_run_cuda/action.yml +++ b/.github/actions/pytest_run_cuda/action.yml @@ -11,7 +11,7 @@ runs: - name: Ccuda tests with pytest run: | # Catch exit 5 (no tests found) - sh -c 'python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and ccuda" --ignore=tests/symbolic --ignore=tests/ndarrays; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' + sh -c 'python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and ccuda" --ignore=symbolic --ignore=ndarrays; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' pyccel-clean shell: ${{ inputs.shell_cmd }} - working-directory: ./ + working-directory: ./tests From 2dbf5f9bbd8a7bd0389cc1597d8b032094542372 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Fri, 17 Mar 2023 10:10:17 +0100 Subject: [PATCH 10/53] Include cuda files in MANIFEST --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 656cdd153a..bf873bea31 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -6,6 +6,7 @@ include README.md recursive-include pyccel *.tx recursive-include pyccel *.pyh recursive-include pyccel *.c +recursive-include pyccel *.cu recursive-include pyccel *.f90 recursive-include pyccel *.h recursive-include pyccel *.pyccel From 92155b4ccef7d130833f730b997932286caeaa6f Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Fri, 17 Mar 2023 10:46:26 +0100 Subject: [PATCH 11/53] Update checkout version --- .github/workflows/Github_pytest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index d10b4802fd..9ad70e2a21 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -104,7 +104,7 @@ jobs: container: nvidia/cuda:11.7.1-devel-ubuntu20.04 if: github.event.pull_request.base.ref != 'master' steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Prepare docker run: | apt update && apt install sudo From e83d00751d3d5e21f7e9b66f9ba928adff3ee6d0 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Fri, 17 Mar 2023 15:54:32 +0100 Subject: [PATCH 12/53] Import coverage fixes --- .github/workflows/Github_pytest.yml | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index 9ad70e2a21..e15e41581c 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -120,8 +120,11 @@ jobs: uses: ./.github/actions/python_install - name: Install Pyccel with tests run: | + PATH=${PATH}:$HOME/.local/bin + echo $PATH + echo "PATH=${PATH}" >> $GITHUB_ENV python -m pip install --upgrade pip - python -m pip install .[test] + python -m pip install --user .[test] shell: bash - name: Coverage install uses: ./.github/actions/coverage_install @@ -154,10 +157,9 @@ jobs: - name: Install Pyccel with tests run: | python -m pip install --upgrade pip - python -m pip install .[test] + python -m pip install . + python -m pip install coverage shell: bash - - name: Coverage install - uses: ./.github/actions/coverage_install - name: Collect coverage information uses: actions/download-artifact@v3 with: @@ -174,6 +176,8 @@ jobs: run: mv .coverage .coverage.cuda - name: Generate coverage report run: | + INSTALL_DIR=$(cd tests; python -c "import pyccel; print(pyccel.__path__[0])") + echo -e "[paths]\nsource =\n ${INSTALL_DIR}\n */site-packages/pyccel\n[xml]\noutput = cobertura.xml" > .coveragerc coverage combine coverage xml - name: Run codacy-coverage-reporter From 0c639276a4bf926f1295800e799ed7284158713d Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Fri, 17 Mar 2023 15:55:46 +0100 Subject: [PATCH 13/53] Remove unnecessary echo --- .github/workflows/Github_pytest.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index e15e41581c..45c46c9fb1 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -121,7 +121,6 @@ jobs: - name: Install Pyccel with tests run: | PATH=${PATH}:$HOME/.local/bin - echo $PATH echo "PATH=${PATH}" >> $GITHUB_ENV python -m pip install --upgrade pip python -m pip install --user .[test] From 6e066fe2cb9a5c98b04c423b3e003adac29141ad Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Mon, 20 Mar 2023 09:53:41 +0100 Subject: [PATCH 14/53] Make faster as done in cuda_main_temp --- .github/workflows/Github_pytest.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index 45c46c9fb1..dfe800f778 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -153,10 +153,9 @@ jobs: python-version: 3.7 - name: Install dependencies uses: ./.github/actions/linux_install - - name: Install Pyccel with tests + - name: Install coverage run: | python -m pip install --upgrade pip - python -m pip install . python -m pip install coverage shell: bash - name: Collect coverage information @@ -175,8 +174,7 @@ jobs: run: mv .coverage .coverage.cuda - name: Generate coverage report run: | - INSTALL_DIR=$(cd tests; python -c "import pyccel; print(pyccel.__path__[0])") - echo -e "[paths]\nsource =\n ${INSTALL_DIR}\n */site-packages/pyccel\n[xml]\noutput = cobertura.xml" > .coveragerc + echo -e "[paths]\nsource =\n $(pwd)/pyccel\n */site-packages/pyccel\n[xml]\noutput = cobertura.xml" > .coveragerc coverage combine coverage xml - name: Run codacy-coverage-reporter From 82d85772855f9aa103a3593b5bfcfb19ff416ded Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Mon, 20 Mar 2023 10:08:32 +0100 Subject: [PATCH 15/53] Trigger codacy From beb44ad468974257257931813d737bf2d820e19c Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Mon, 20 Mar 2023 10:14:48 +0100 Subject: [PATCH 16/53] Try with coverage secret From 7638bc6e0a85870e2f8c670c4540c5e64239830f Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Mon, 20 Mar 2023 13:48:05 +0100 Subject: [PATCH 17/53] Add first commit to provide codacy baseline From b0ad67408039d8598d89f1f464dc162913588b68 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Mon, 20 Mar 2023 14:09:19 +0100 Subject: [PATCH 18/53] Only run benchmarks in Pyccel repo --- .github/workflows/bench.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml index 72b927ff70..014c78fc59 100644 --- a/.github/workflows/bench.yml +++ b/.github/workflows/bench.yml @@ -2,7 +2,7 @@ name: Benchmarks on: push: - branches: [ master ] + branches: [ pyccel/master ] jobs: From 70a9590945684f7fc6e9c9cdfcd0bdfb09600a4c Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Tue, 21 Mar 2023 08:55:37 +0100 Subject: [PATCH 19/53] Trigger codacy with correct branch name From abf004bd2aec46fa488666b4e7499e33b87531fa Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Mon, 3 Apr 2023 18:22:28 +0200 Subject: [PATCH 20/53] Fix triggers --- .github/workflows/Github_pytest.yml | 2 +- .github/workflows/doc_coverage.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index dfe800f778..20baee0f4b 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -2,7 +2,7 @@ name: Pyccel tests on: pull_request: - branches: [ master, cuda_main, cuda_devel ] + branches: [ master, development ] jobs: Linux: diff --git a/.github/workflows/doc_coverage.yml b/.github/workflows/doc_coverage.yml index ef63106271..57e05f7e91 100644 --- a/.github/workflows/doc_coverage.yml +++ b/.github/workflows/doc_coverage.yml @@ -2,7 +2,7 @@ name: Doc Coverage Action on: pull_request: - branches: [ master ] + branches: [ master, development ] jobs: From 713ee6b682c4d7ebdfc73613fa13b0525039a958 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Thu, 13 Apr 2023 14:00:49 +0200 Subject: [PATCH 21/53] Set up bot for cuda --- .github/workflows/Github_pytest.yml | 14 +++++++- .github/workflows/coverage.yml | 49 ++++++++-------------------- ci_tools/bot_interaction.py | 4 ++- ci_tools/bot_messages/show_tests.txt | 5 +-- 4 files changed, 33 insertions(+), 39 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index 0606e04685..bd9b297e48 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -89,9 +89,21 @@ jobs: python_version: ${{ (needs.Bot.outputs.python_version == '') && '3.10' || needs.Bot.outputs.python_version }} ref: ${{ needs.Bot.outputs.REF }} - coverage: + coverage_collection: needs: [Bot, linux, cuda] + if: ${{ always() && needs.Bot.outputs.run_coverage == 'True' && needs.Linux.result == 'success' && needs.Cuda.result != 'failure' }} + if: ${{ needs.Bot.outputs.run_coverage == 'True' }} + uses: + ./.github/workflows/coverage.yml + with: + python_version: ${{ (needs.Bot.outputs.python_version == '') && '3.7' || needs.Bot.outputs.python_version }} + ref: ${{ needs.Bot.outputs.REF }} + cuda_done: ${{ needs.Cuda.result }} + + coverage: + needs: [Bot, linux, cuda, coverage_collection] if: ${{ needs.Bot.outputs.run_coverage == 'True' }} + if: ${{ always() && needs.Bot.outputs.run_coverage == 'True' && needs.CoverageCollection.result == 'success' && needs.Cuda.result != 'failure' }} uses: ./.github/workflows/coverage.yml with: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index ac7944967c..b6c90506c1 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,9 +9,6 @@ on: ref: required: false type: string - cuda_done: - required: true - type: bool jobs: CoverageChecker: @@ -27,41 +24,23 @@ jobs: uses: actions/setup-python@v4 with: python-version: ${{ inputs.python_version }} - - name: Install dependencies - uses: ./.github/actions/linux_install - - name: Install coverage + - name: Install Python dependencies run: | - python -m pip install --upgrade pip - python -m pip install coverage + python -m pip install --upgrade pip + python -m pip install defusedxml shell: bash - name: Collect coverage information uses: actions/download-artifact@v3 - with: - name: coverage-artifact - - name: Rename coverage file - run: mv .coverage .coverage.linux - - name: Collect coverage information - uses: actions/download-artifact@v3 - if: ${{ inputs.cuda_done }} == 'success' - with: - name: cuda-coverage-artifact - - name: Rename coverage file - if: ${{ inputs.cuda_done }} == 'success' - run: mv .coverage .coverage.cuda - - name: Generate coverage report - run: | - echo -e "[paths]\nsource =\n $(pwd)/pyccel\n */site-packages/pyccel\n[xml]\noutput = cobertura.xml" > .coveragerc - coverage combine - coverage xml - - name: Run codacy-coverage-reporter - uses: codacy/codacy-coverage-reporter-action@master - continue-on-error: True - with: - project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} - coverage-reports: cobertura.xml - - name: Save code coverage xml report - uses: actions/upload-artifact@v3 with: name: coverage-artifact-xml - path: cobertura.xml - retention-days: 1 + - name: Collect diff information + run: | + BASE_BRANCH=$GITHUB_BASE_REF + git fetch + git diff origin/${BASE_BRANCH}..HEAD --no-indent-heuristic --unified=0 --output=pull_diff.txt --no-color + ls + shell: bash + - name: Check coverage + run: | + python ci_tools/check_new_coverage.py pull_diff.txt cobertura.xml $GITHUB_EVENT_PATH $GITHUB_STEP_SUMMARY + shell: bash diff --git a/ci_tools/bot_interaction.py b/ci_tools/bot_interaction.py index e3de344ac0..405c8750bc 100644 --- a/ci_tools/bot_interaction.py +++ b/ci_tools/bot_interaction.py @@ -13,7 +13,7 @@ senior_reviewer = ['yguclu', 'EmilyBourne'] trusted_reviewers = ['yguclu', 'EmilyBourne', 'ratnania', 'saidctb', 'bauom'] -pr_test_keys = ['linux', 'windows', 'macosx', 'coverage', 'docs', 'pylint', +pr_test_keys = ['linux', 'windows', 'macosx', 'cuda', 'coverage', 'docs', 'pylint', 'lint', 'spelling'] review_labels = ('needs_initial_review', 'Ready_for_review', 'Ready_to_merge') @@ -76,6 +76,7 @@ def run_tests(pr_id, tests, outputs, event): if outputs['run_coverage']: outputs['run_linux'] = True + outputs['run_cuda'] = True outputs['status_url'] = event['repository']['statuses_url'].format(sha=ref_sha) @@ -347,6 +348,7 @@ def flagged_as_trusted(pr_id, user): outputs = {'run_linux': False, 'run_windows': False, 'run_macosx': False, + 'run_cuda': False, 'run_coverage': False, 'run_docs': False, 'run_pylint': False, diff --git a/ci_tools/bot_messages/show_tests.txt b/ci_tools/bot_messages/show_tests.txt index a8e3322c6e..6dca265a85 100644 --- a/ci_tools/bot_messages/show_tests.txt +++ b/ci_tools/bot_messages/show_tests.txt @@ -1,7 +1,8 @@ The following is a list of keywords which can be used to run tests. Tests in bold are run by pull requests marked as ready for review: - **linux** : Runs the unit tests on a linux system. -- **windows** : Runs the unit tests on a linux system. -- **macosx** : Runs the unit tests on a linux system. +- **windows** : Runs the unit tests on a windows system. +- **macosx** : Runs the unit tests on a macosx system. +- **cuda** : Runs the cuda unit tests on a linux system. - **coverage** : Runs the unit tests on a linux system and checks the coverage of the tests. - **docs** : Checks if the documentation follows the numpydoc format. - **pylint** : Runs pylint on files which are too big to be handled by codacy. From e406bc5038356f39f861939f65bd018a1da12d23 Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Thu, 13 Apr 2023 14:05:59 +0200 Subject: [PATCH 22/53] Correct double if --- .github/workflows/Github_pytest.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index bd9b297e48..dd0b3fb92a 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -92,7 +92,6 @@ jobs: coverage_collection: needs: [Bot, linux, cuda] if: ${{ always() && needs.Bot.outputs.run_coverage == 'True' && needs.Linux.result == 'success' && needs.Cuda.result != 'failure' }} - if: ${{ needs.Bot.outputs.run_coverage == 'True' }} uses: ./.github/workflows/coverage.yml with: @@ -102,7 +101,6 @@ jobs: coverage: needs: [Bot, linux, cuda, coverage_collection] - if: ${{ needs.Bot.outputs.run_coverage == 'True' }} if: ${{ always() && needs.Bot.outputs.run_coverage == 'True' && needs.CoverageCollection.result == 'success' && needs.Cuda.result != 'failure' }} uses: ./.github/workflows/coverage.yml From 7f4b669bcc015209b60ebe97008b61792c93090a Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Thu, 13 Apr 2023 14:14:54 +0200 Subject: [PATCH 23/53] Wrong file --- .github/workflows/Github_pytest.yml | 2 +- .github/workflows/coverage_collect.yml | 67 ++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/coverage_collect.yml diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index dd0b3fb92a..4237bfac82 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -93,7 +93,7 @@ jobs: needs: [Bot, linux, cuda] if: ${{ always() && needs.Bot.outputs.run_coverage == 'True' && needs.Linux.result == 'success' && needs.Cuda.result != 'failure' }} uses: - ./.github/workflows/coverage.yml + ./.github/workflows/coverage_collect.yml with: python_version: ${{ (needs.Bot.outputs.python_version == '') && '3.7' || needs.Bot.outputs.python_version }} ref: ${{ needs.Bot.outputs.REF }} diff --git a/.github/workflows/coverage_collect.yml b/.github/workflows/coverage_collect.yml new file mode 100644 index 0000000000..e0c4a0c3bc --- /dev/null +++ b/.github/workflows/coverage_collect.yml @@ -0,0 +1,67 @@ +name: Unit test coverage collection + +on: + workflow_call: + inputs: + python_version: + required: true + type: string + ref: + required: false + type: string + cuda_result: + required: true + type: string + +jobs: + CoverageChecker: + + runs-on: ubuntu-latest + name: Unit tests + + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.ref }} + - name: Set up Python ${{ inputs.python_version }} + uses: actions/setup-python@v4 + with: + python-version: ${{ inputs.python_version }} + - name: Install dependencies + uses: ./.github/actions/linux_install + - name: Install coverage + run: | + python -m pip install --upgrade pip + python -m pip install coverage + shell: bash + - name: Collect coverage information + uses: actions/download-artifact@v3 + with: + name: coverage-artifact + - name: Rename coverage file + run: mv .coverage .coverage.linux + - name: Collect coverage information + uses: actions/download-artifact@v3 + if: ${{ inputs.cuda_result }} == 'success' + with: + name: cuda-coverage-artifact + - name: Rename coverage file + if: ${{ inputs.cuda_result }} == 'success' + run: mv .coverage .coverage.cuda + - name: Generate coverage report + run: | + echo -e "[paths]\nsource =\n $(pwd)/pyccel\n */site-packages/pyccel\n[xml]\noutput = cobertura.xml" > .coveragerc + coverage combine + coverage xml + - name: Run codacy-coverage-reporter + uses: codacy/codacy-coverage-reporter-action@master + continue-on-error: True + with: + project-token: ${{ secrets.CODACY_PROJECT_TOKEN }} + coverage-reports: cobertura.xml + - name: Save code coverage xml report + uses: actions/upload-artifact@v3 + with: + name: coverage-artifact-xml + path: cobertura.xml + retention-days: 1 From d77057cda95cf6b9408af8d7672a882a39e462ca Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Sun, 16 Apr 2023 10:50:35 +0200 Subject: [PATCH 24/53] Correct typo --- .github/workflows/Github_pytest.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index b23be5d32f..9a54c2ed18 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -98,7 +98,7 @@ jobs: with: python_version: ${{ (needs.Bot.outputs.python_version == '') && '3.7' || needs.Bot.outputs.python_version }} ref: ${{ needs.Bot.outputs.REF }} - cuda_done: ${{ needs.Cuda.result }} + cuda_result: ${{ needs.Cuda.result }} coverage: needs: [Bot, linux, cuda, coverage_collection] From 1a2542deabd01b888a789c64c3b38641f01bbd40 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Mon, 17 Apr 2023 17:40:31 +0200 Subject: [PATCH 25/53] Missing bot output --- .github/workflows/Github_pytest.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/Github_pytest.yml b/.github/workflows/Github_pytest.yml index 9a54c2ed18..005e163ec3 100644 --- a/.github/workflows/Github_pytest.yml +++ b/.github/workflows/Github_pytest.yml @@ -18,6 +18,7 @@ jobs: run_windows: ${{ steps.run_bot.outputs.run_windows }} run_macosx: ${{ steps.run_bot.outputs.run_macosx }} run_coverage: ${{ steps.run_bot.outputs.run_coverage }} + run_cuda: ${{ steps.run_bot.outputs.run_cuda }} run_docs: ${{ steps.run_bot.outputs.run_docs }} run_pylint: ${{ steps.run_bot.outputs.run_pylint }} run_lint: ${{ steps.run_bot.outputs.run_lint }} From dfef06563444f8e9bdf14f7fe3a598eff1e4bc65 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:01:19 +0100 Subject: [PATCH 26/53] Fix indenting --- .github/actions/pytest_run/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/pytest_run/action.yml b/.github/actions/pytest_run/action.yml index d2500fd30a..1c4deecb57 100644 --- a/.github/actions/pytest_run/action.yml +++ b/.github/actions/pytest_run/action.yml @@ -39,7 +39,7 @@ runs: # Test ndarray folder update (requires parallel tests to avoid clean) touch ${SITE_DIR}/pyccel/stdlib/cwrapper/cwrapper.h python -m pytest -n auto -rXx ${FLAGS} -m c -k test_array_int32_1d_scalar epyccel/test_arrays.py 2>&1 | tee s2_outfile.out - fi + fi shell: ${{ inputs.shell_cmd }} working-directory: ./tests id: pytest_2 From 9e256c2a6ee6030672a7d53b5ee4eb0a6e4d6160 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:13:29 +0100 Subject: [PATCH 27/53] Add missing description --- ci_tools/bot_tools/bot_funcs.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ci_tools/bot_tools/bot_funcs.py b/ci_tools/bot_tools/bot_funcs.py index f8e0972e9f..adfcd70caf 100644 --- a/ci_tools/bot_tools/bot_funcs.py +++ b/ci_tools/bot_tools/bot_funcs.py @@ -41,7 +41,8 @@ 'pyccel_lint': "Pyccel best practices", 'pylint': "Python linting", 'spelling': "Spelling verification", - 'windows': "Unit tests on Windows" + 'windows': "Unit tests on Windows", + 'cuda': "Unit tests on Linux with cuda" } test_dependencies = {'coverage':['linux', 'cuda']} From 8a80dcd605a0c8df60276a79f0f769ebf7d2531e Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:14:59 +0100 Subject: [PATCH 28/53] Reorder to ensure Python is available --- .github/workflows/cuda.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index cf9af468c1..f6dcaf1221 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -42,17 +42,17 @@ jobs: ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata shell: bash - - name: CUDA Version - run: nvcc --version # cuda install check + - name: Install python (setup-python action doesn't work with containers) + uses: ./.github/actions/python_install - name: "Setup" id: token run: | pip install jwt requests python ci_tools/setup_check_run.py + - name: CUDA Version + run: nvcc --version # cuda install check - name: Install dependencies uses: ./.github/actions/linux_install - - name: Install python (setup-python action doesn't work with containers) - uses: ./.github/actions/python_install - name: Install Pyccel with tests run: | PATH=${PATH}:$HOME/.local/bin From f1fc7937838edc66f40415f73dd950299f0a41bd Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:17:28 +0100 Subject: [PATCH 29/53] Specify test name --- .github/workflows/cuda.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index f6dcaf1221..b8ae3396e2 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -48,7 +48,7 @@ jobs: id: token run: | pip install jwt requests - python ci_tools/setup_check_run.py + python ci_tools/setup_check_run.py cuda - name: CUDA Version run: nvcc --version # cuda install check - name: Install dependencies From 47d661c2b1d82a2930e736491255c6f56e463e9b Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:35:28 +0100 Subject: [PATCH 30/53] Report cuda_pytest outcome --- .github/workflows/cuda.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index b8ae3396e2..9ca829c52b 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -63,6 +63,7 @@ jobs: - name: Coverage install uses: ./.github/actions/coverage_install - name: Ccuda tests with pytest + id: cuda_pytest uses: ./.github/actions/pytest_run_cuda - name: Collect coverage information continue-on-error: True @@ -76,5 +77,5 @@ jobs: - name: "Post completed" if: always() run: - python ci_tools/complete_check_run.py ${{ steps.f_c_pytest.outcome }} ${{ steps.python_pytest.outcome }} ${{ steps.parallel.outcome }} ${{ steps.valgrind.outcome }} + python ci_tools/complete_check_run.py ${{ steps.cuda_pytest.outcome }} From daf19de0431cf9b2ebbd0d471d9d86d0f82f5e78 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:35:39 +0100 Subject: [PATCH 31/53] Filter when cuda runs --- ci_tools/bot_tools/bot_funcs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ci_tools/bot_tools/bot_funcs.py b/ci_tools/bot_tools/bot_funcs.py index adfcd70caf..90ded0e42d 100644 --- a/ci_tools/bot_tools/bot_funcs.py +++ b/ci_tools/bot_tools/bot_funcs.py @@ -419,7 +419,7 @@ def is_test_required(self, commit_log, name, key, state): True if the test should be run, False otherwise. """ print("Checking : ", name) - if key in ('linux', 'windows', 'macosx', 'anaconda_linux', 'anaconda_windows', 'coverage', 'intel'): + if key in ('linux', 'windows', 'macosx', 'anaconda_linux', 'anaconda_windows', 'coverage', 'intel', 'cuda'): has_relevant_change = lambda diff: any((f.startswith('pyccel/') or f.startswith('tests/')) #pylint: disable=unnecessary-lambda-assignment and f.endswith('.py') and f != 'pyccel/version.py' for f in diff) From 32fb825a36cb7b0fdc7b94a4b6516e903b004dbf Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 16 Jan 2024 13:35:49 +0100 Subject: [PATCH 32/53] Run cuda on devel --- ci_tools/devel_branch_tests.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ci_tools/devel_branch_tests.py b/ci_tools/devel_branch_tests.py index 1102ef9e92..ec67b6c49a 100644 --- a/ci_tools/devel_branch_tests.py +++ b/ci_tools/devel_branch_tests.py @@ -15,3 +15,4 @@ bot.run_tests(['anaconda_linux'], '3.10', force_run = True) bot.run_tests(['anaconda_windows'], '3.10', force_run = True) bot.run_tests(['intel'], '3.9', force_run = True) + bot.run_tests(['cuda'], '-', force_run = True) From b140e88882296a898b937a7f77cf51c4b0ba13df Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Thu, 15 Feb 2024 15:38:52 +0100 Subject: [PATCH 33/53] Fix bot PR state calculation --- ci_tools/bot_tools/bot_funcs.py | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/ci_tools/bot_tools/bot_funcs.py b/ci_tools/bot_tools/bot_funcs.py index a330b3c0d0..1621d1d089 100644 --- a/ci_tools/bot_tools/bot_funcs.py +++ b/ci_tools/bot_tools/bot_funcs.py @@ -290,15 +290,15 @@ def run_tests(self, tests, python_version = None, force_run = False): self._GAI.create_comment(self._pr_id, "There are unrecognised tests.\n"+message_from_file('show_tests.txt')) return [] else: - check_runs = self._GAI.get_check_runs(self._ref)['check_runs'] - already_triggered = [c["name"] for c in check_runs if c['status'] in ('completed', 'in_progress') and \ - c['conclusion'] != 'cancelled' and \ - c['name'] not in ('coverage',)] + check_runs = {self.get_name_key(c["name"]): c for c in self._GAI.get_check_runs(self._ref)['check_runs']} + already_triggered = [c["name"] for n,c in check_runs.items() if c['status'] in ('completed', 'in_progress') and \ + c['conclusion'] != 'cancelled' and \ + n != 'coverage'] already_triggered_names = [self.get_name_key(t) for t in already_triggered] - already_programmed = {c["name"]:c for c in check_runs if c['status'] == 'queued'} - success_names = [self.get_name_key(c["name"]) for c in check_runs if c['status'] == 'completed' and c['conclusion'] == 'success'] + already_programmed = {c["name"]:c for c in check_runs.values() if c['status'] == 'queued'} + success_names = [n for n,c in check_runs.items() if c['status'] == 'completed' and c['conclusion'] == 'success'] print(already_triggered) - states = [c['conclusion'] for c in check_runs if c['status'] == 'completed'] + states = [] if not force_run: # Get a list of all commits on this branch @@ -317,6 +317,7 @@ def run_tests(self, tests, python_version = None, force_run = False): pv = python_version or default_python_versions[t] key = f"({t}, {pv})" if any(key in a for a in already_triggered): + states.append(check_runs[t]['conclusion']) continue name = f"{test_names[t]} {key}" if not force_run and not self.is_test_required(commit_log, name, t, states): @@ -332,8 +333,8 @@ def run_tests(self, tests, python_version = None, force_run = False): if all(d in success_names for d in deps): workflow_ids = None if t == 'coverage': - print([r['details_url'] for r in check_runs if r['conclusion'] == "success"]) - workflow_ids = [int(r['details_url'].split('/')[-1]) for r in check_runs if r['conclusion'] == "success" and '(' in r['name']] + print([r['details_url'] for r in check_runs.values() if r['conclusion'] == "success"]) + workflow_ids = [int(r['details_url'].split('/')[-1]) for r in check_runs.values() if r['conclusion'] == "success" and '(' in r['name']] print("Running test") self.run_test(t, pv, posted["id"], workflow_ids) return states @@ -555,6 +556,7 @@ def request_mark_as_ready(self): _, err = p.communicate() print(err) + print(states) if all(s == 'success' for s in states): self.mark_as_ready(False) From 5295ee243fdfe9fcb09f9632a8719d64126b9758 Mon Sep 17 00:00:00 2001 From: bauom <40796259+bauom@users.noreply.github.com> Date: Wed, 28 Feb 2024 18:11:50 +0100 Subject: [PATCH 34/53] [init] Adding CUDA language/compiler and CodePrinter (#32) This PR aims to make the C code compilable using nvcc. The cuda language was added as well as a CudaCodePrinter. Changes to stdlib: Wrapped expressions using complex types in an `ifndef __NVCC__` to avoid processing them with the nvcc compiler --------- Co-authored-by: Mouad Elalj, EmilyBourne --- .dict_custom.txt | 1 + .github/actions/pytest_parallel/action.yml | 4 +- .github/actions/pytest_run/action.yml | 4 +- .github/actions/pytest_run_cuda/action.yml | 11 +- CHANGELOG.md | 6 + ci_tools/json_pytest_output.py | 2 +- pyccel/codegen/codegen.py | 43 +++++-- pyccel/codegen/compiling/compilers.py | 5 +- pyccel/codegen/pipeline.py | 5 +- pyccel/codegen/printing/cucode.py | 74 +++++++++++ pyccel/commands/console.py | 2 +- pyccel/compilers/default_compilers.py | 13 +- pyccel/naming/__init__.py | 4 +- pyccel/naming/cudanameclashchecker.py | 92 ++++++++++++++ pyccel/stdlib/numpy/numpy_c.c | 2 + pyccel/stdlib/numpy/numpy_c.h | 2 + pytest.ini | 1 + tests/conftest.py | 11 ++ tests/epyccel/test_base.py | 136 ++++++++++----------- 19 files changed, 324 insertions(+), 94 deletions(-) create mode 100644 pyccel/codegen/printing/cucode.py create mode 100644 pyccel/naming/cudanameclashchecker.py diff --git a/.dict_custom.txt b/.dict_custom.txt index b25b47f277..1ad66b6914 100644 --- a/.dict_custom.txt +++ b/.dict_custom.txt @@ -106,5 +106,6 @@ Valgrind variadic subclasses oneAPI +Cuda getter setter diff --git a/.github/actions/pytest_parallel/action.yml b/.github/actions/pytest_parallel/action.yml index c7c77d99c7..f91d84915b 100644 --- a/.github/actions/pytest_parallel/action.yml +++ b/.github/actions/pytest_parallel/action.yml @@ -10,8 +10,8 @@ runs: steps: - name: Test with pytest run: | - mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel/test_parallel_epyccel.py -v -m parallel -rXx - #mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel -v -m parallel -rXx + mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel/test_parallel_epyccel.py -v -m "parallel and not cuda" -rXx + #mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel -v -m "parallel and not cuda" -rXx shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run/action.yml b/.github/actions/pytest_run/action.yml index 668b4a4fd8..ce95c0cb7c 100644 --- a/.github/actions/pytest_run/action.yml +++ b/.github/actions/pytest_run/action.yml @@ -51,13 +51,13 @@ runs: working-directory: ./tests id: pytest_3 - name: Test Fortran translations - run: python -m pytest -n auto -rX ${FLAGS} -m "not (parallel or xdist_incompatible) and not (c or python or ccuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s4_outfile.out + run: python -m pytest -n auto -rX ${FLAGS} -m "not (parallel or xdist_incompatible) and not (c or python or cuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s4_outfile.out shell: ${{ inputs.shell_cmd }} working-directory: ./tests id: pytest_4 - name: Test multi-file Fortran translations run: | - python -m pytest -rX ${FLAGS} -m "xdist_incompatible and not parallel and not (c or python or ccuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays | tee s5_outfile.out + python -m pytest -rX ${FLAGS} -m "xdist_incompatible and not parallel and not (c or python or cuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays | tee s5_outfile.out pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run_cuda/action.yml b/.github/actions/pytest_run_cuda/action.yml index 52092a6e02..46f90552ed 100644 --- a/.github/actions/pytest_run_cuda/action.yml +++ b/.github/actions/pytest_run_cuda/action.yml @@ -1,4 +1,4 @@ -name: 'Pyccel pytest commands generating Ccuda' +name: 'Pyccel pytest commands generating Cuda' inputs: shell_cmd: description: 'Specifies the shell command (different for anaconda)' @@ -11,7 +11,14 @@ runs: - name: Ccuda tests with pytest run: | # Catch exit 5 (no tests found) - sh -c 'python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and ccuda" --ignore=symbolic --ignore=ndarrays; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' + python -m pytest -rX ${FLAGS} -m "not (xdist_incompatible or parallel) and cuda ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s1_outfile.out pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests + - name: Final step + if: always() + id: status + run: + python ci_tools/json_pytest_output.py -t "Cuda Test Summary" --tests "Cuda tests:${{ steps.pytest_1.outcome }}:tests/s1_outfile.out" + + shell: ${{ inputs.shell_cmd }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 37192de886..e341e45b98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,12 @@ # Change Log All notable changes to this project will be documented in this file. +## \[Cuda - UNRELEASED\] + +### Added + +- #32 : add support for `nvcc` Compiler and `cuda` language as a possible option. + ## \[UNRELEASED\] ### Added diff --git a/ci_tools/json_pytest_output.py b/ci_tools/json_pytest_output.py index 409ae76d72..b84f4a4c09 100644 --- a/ci_tools/json_pytest_output.py +++ b/ci_tools/json_pytest_output.py @@ -61,7 +61,7 @@ def mini_md_summary(title, outcome, failed_tests): summary = "" failed_pattern = re.compile(r".*FAILED.*") - languages = ('c', 'fortran', 'python') + languages = ('c', 'fortran', 'python', 'cuda') pattern = {lang: re.compile(r".*\["+lang+r"\]\ \_.*") for lang in languages} for i in p_args.tests: diff --git a/pyccel/codegen/codegen.py b/pyccel/codegen/codegen.py index 01303cc393..7c8b210136 100644 --- a/pyccel/codegen/codegen.py +++ b/pyccel/codegen/codegen.py @@ -9,34 +9,41 @@ from pyccel.codegen.printing.fcode import FCodePrinter from pyccel.codegen.printing.ccode import CCodePrinter from pyccel.codegen.printing.pycode import PythonCodePrinter +from pyccel.codegen.printing.cucode import CudaCodePrinter from pyccel.ast.core import FunctionDef, Interface, ModuleHeader from pyccel.errors.errors import Errors from pyccel.utilities.stage import PyccelStage -_extension_registry = {'fortran': 'f90', 'c':'c', 'python':'py'} -_header_extension_registry = {'fortran': None, 'c':'h', 'python':None} +_extension_registry = {'fortran': 'f90', 'c':'c', 'python':'py', 'cuda':'cu'} +_header_extension_registry = {'fortran': None, 'c':'h', 'python':None, 'cuda':'h'} printer_registry = { 'fortran':FCodePrinter, 'c':CCodePrinter, - 'python':PythonCodePrinter + 'python':PythonCodePrinter, + 'cuda':CudaCodePrinter } pyccel_stage = PyccelStage() class Codegen(object): - """Abstract class for code generator.""" + """ + Class which handles the generation of code. - def __init__(self, parser, name): - """Constructor for Codegen. - - parser: pyccel parser + The class which handles the generation of code. This is done by creating an appropriate class + inheriting from `CodePrinter` and using it to create strings describing the code that should + be printed. This class then takes care of creating the necessary files. + Parameters + ---------- + parser : SemanticParser + The Pyccel Semantic parser node. + name : str + Name of the generated module or program. + """ - name: str - name of the generated module or program. - """ + def __init__(self, parser, name): pyccel_stage.set_stage('codegen') self._parser = parser self._ast = parser.ast @@ -135,12 +142,22 @@ def language(self): return self._language def set_printer(self, **settings): - """ Set the current codeprinter instance""" + """ + Set the current codeprinter instance. + + Getting the language that will be used (default language used is fortran), + Then instantiating the codePrinter with the corresponding language. + + Parameters + ---------- + **settings : dict + Any additional arguments which are necessary for CCodePrinter. + """ # Get language used (default language used is fortran) language = settings.pop('language', 'fortran') # Set language - if not language in ['fortran', 'c', 'python']: + if not language in ['fortran', 'c', 'python', 'cuda']: raise ValueError('{} language is not available'.format(language)) self._language = language diff --git a/pyccel/codegen/compiling/compilers.py b/pyccel/codegen/compiling/compilers.py index feafa4bea8..48e225cb83 100644 --- a/pyccel/codegen/compiling/compilers.py +++ b/pyccel/codegen/compiling/compilers.py @@ -441,7 +441,10 @@ def compile_shared_library(self, compile_obj, output_folder, verbose = False, sh # Collect compile information exec_cmd, includes, libs_flags, libdirs_flags, m_code = \ self._get_compile_components(compile_obj, accelerators) - linker_libdirs_flags = ['-Wl,-rpath' if l == '-L' else l for l in libdirs_flags] + if self._info['exec'] == 'nvcc': + linker_libdirs_flags = ['-Xcompiler' if l == '-L' else f'"-Wl,-rpath,{l}"' for l in libdirs_flags] + else: + linker_libdirs_flags = ['-Wl,-rpath' if l == '-L' else l for l in libdirs_flags] flags.insert(0,"-shared") diff --git a/pyccel/codegen/pipeline.py b/pyccel/codegen/pipeline.py index 4d4770a491..6730b9bb21 100644 --- a/pyccel/codegen/pipeline.py +++ b/pyccel/codegen/pipeline.py @@ -179,9 +179,10 @@ def handle_error(stage): if language is None: language = 'fortran' - # Choose Fortran compiler + # Choose Default compiler if compiler is None: - compiler = os.environ.get('PYCCEL_DEFAULT_COMPILER', 'GNU') + default_compiler_family = 'nvidia' if language == 'cuda' else 'GNU' + compiler = os.environ.get('PYCCEL_DEFAULT_COMPILER', default_compiler_family) fflags = [] if fflags is None else fflags.split() wrapper_flags = [] if wrapper_flags is None else wrapper_flags.split() diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py new file mode 100644 index 0000000000..e70b57bc20 --- /dev/null +++ b/pyccel/codegen/printing/cucode.py @@ -0,0 +1,74 @@ +# coding: utf-8 +#------------------------------------------------------------------------------------------# +# This file is part of Pyccel which is released under MIT License. See the LICENSE file or # +# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. # +#------------------------------------------------------------------------------------------# +""" +Provide tools for generating and handling CUDA code. +This module is designed to interface Pyccel's Abstract Syntax Tree (AST) with CUDA, +enabling the direct translation of high-level Pyccel expressions into CUDA code. +""" + +from pyccel.codegen.printing.ccode import CCodePrinter, c_library_headers + +from pyccel.ast.core import Import, Module + +from pyccel.errors.errors import Errors + + +errors = Errors() + +__all__ = ["CudaCodePrinter"] + +class CudaCodePrinter(CCodePrinter): + """ + Print code in CUDA format. + + This printer converts Pyccel's Abstract Syntax Tree (AST) into strings of CUDA code. + Navigation through this file utilizes _print_X functions, + as is common with all printers. + + Parameters + ---------- + filename : str + The name of the file being pyccelised. + prefix_module : str + A prefix to be added to the name of the module. + """ + language = "cuda" + + def __init__(self, filename, prefix_module = None): + + errors.set_target(filename, 'file') + + super().__init__(filename) + + def _print_Module(self, expr): + self.set_scope(expr.scope) + self._current_module = expr.name + body = ''.join(self._print(i) for i in expr.body) + + global_variables = ''.join(self._print(d) for d in expr.declarations) + + # Print imports last to be sure that all additional_imports have been collected + imports = [Import(expr.name, Module(expr.name,(),())), *self._additional_imports.values()] + c_headers_imports = '' + local_imports = '' + + for imp in imports: + if imp.source in c_library_headers: + c_headers_imports += self._print(imp) + else: + local_imports += self._print(imp) + + imports = f'{c_headers_imports}\ + extern "C"{{\n\ + {local_imports}\ + }}' + + code = f'{imports}\n\ + {global_variables}\n\ + {body}\n' + + self.exit_scope() + return code diff --git a/pyccel/commands/console.py b/pyccel/commands/console.py index aa9c1aadc5..ea23dd6f8b 100644 --- a/pyccel/commands/console.py +++ b/pyccel/commands/console.py @@ -80,7 +80,7 @@ def pyccel(files=None, mpi=None, openmp=None, openacc=None, output_dir=None, com # ... backend compiler options group = parser.add_argument_group('Backend compiler options') - group.add_argument('--language', choices=('fortran', 'c', 'python'), help='Generated language') + group.add_argument('--language', choices=('fortran', 'c', 'python', 'cuda'), help='Generated language') group.add_argument('--compiler', help='Compiler family or json file containing a compiler description {GNU,intel,PGI}') diff --git a/pyccel/compilers/default_compilers.py b/pyccel/compilers/default_compilers.py index 166085d22e..d47856773c 100644 --- a/pyccel/compilers/default_compilers.py +++ b/pyccel/compilers/default_compilers.py @@ -185,6 +185,15 @@ }, 'family': 'nvidia', } +#------------------------------------------------------------ +nvcc_info = {'exec' : 'nvcc', + 'language' : 'cuda', + 'debug_flags' : ("-g",), + 'release_flags': ("-O3",), + 'general_flags': ('--compiler-options', '-fPIC',), + 'family' : 'nvidia' + } + #------------------------------------------------------------ def change_to_lib_flag(lib): @@ -288,6 +297,7 @@ def change_to_lib_flag(lib): pgfortran_info.update(python_info) nvc_info.update(python_info) nvfort_info.update(python_info) +nvcc_info.update(python_info) available_compilers = {('GNU', 'c') : gcc_info, ('GNU', 'fortran') : gfort_info, @@ -296,6 +306,7 @@ def change_to_lib_flag(lib): ('PGI', 'c') : pgcc_info, ('PGI', 'fortran') : pgfortran_info, ('nvidia', 'c') : nvc_info, - ('nvidia', 'fortran') : nvfort_info} + ('nvidia', 'fortran') : nvfort_info, + ('nvidia', 'cuda'): nvcc_info} vendors = ('GNU','intel','PGI','nvidia') diff --git a/pyccel/naming/__init__.py b/pyccel/naming/__init__.py index a71d841c8e..1b8514703b 100644 --- a/pyccel/naming/__init__.py +++ b/pyccel/naming/__init__.py @@ -10,7 +10,9 @@ from .fortrannameclashchecker import FortranNameClashChecker from .cnameclashchecker import CNameClashChecker from .pythonnameclashchecker import PythonNameClashChecker +from .cudanameclashchecker import CudaNameClashChecker name_clash_checkers = {'fortran':FortranNameClashChecker(), 'c':CNameClashChecker(), - 'python':PythonNameClashChecker()} + 'python':PythonNameClashChecker(), + 'cuda':CudaNameClashChecker()} diff --git a/pyccel/naming/cudanameclashchecker.py b/pyccel/naming/cudanameclashchecker.py new file mode 100644 index 0000000000..971204e912 --- /dev/null +++ b/pyccel/naming/cudanameclashchecker.py @@ -0,0 +1,92 @@ +# coding: utf-8 +#------------------------------------------------------------------------------------------# +# This file is part of Pyccel which is released under MIT License. See the LICENSE file or # +# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. # +#------------------------------------------------------------------------------------------# +""" +Handles name clash problems in Cuda +""" +from .languagenameclashchecker import LanguageNameClashChecker + +class CudaNameClashChecker(LanguageNameClashChecker): + """ + Class containing functions to help avoid problematic names in Cuda. + + A class which provides functionalities to check or propose variable names and + verify that they do not cause name clashes. Name clashes may be due to + new variables, or due to the use of reserved keywords. + """ + # Keywords as mentioned on https://en.cppreference.com/w/c/keyword + keywords = set(['isign', 'fsign', 'csign', 'auto', 'break', 'case', 'char', 'const', + 'continue', 'default', 'do', 'double', 'else', 'enum', + 'extern', 'float', 'for', 'goto', 'if', 'inline', 'int', + 'long', 'register', 'restrict', 'return', 'short', 'signed', + 'sizeof', 'static', 'struct', 'switch', 'typedef', 'union', + 'unsigned', 'void', 'volatile', 'whie', '_Alignas', + '_Alignof', '_Atomic', '_Bool', '_Complex', 'Decimal128', + '_Decimal32', '_Decimal64', '_Generic', '_Imaginary', + '_Noreturn', '_Static_assert', '_Thread_local', 't_ndarray', + 'array_create', 'new_slice', 'array_slicing', 'alias_assign', + 'transpose_alias_assign', 'array_fill', 't_slice', + 'GET_INDEX_EXP1', 'GET_INDEX_EXP2', 'GET_INDEX_EXP2', + 'GET_INDEX_EXP3', 'GET_INDEX_EXP4', 'GET_INDEX_EXP5', + 'GET_INDEX_EXP6', 'GET_INDEX_EXP7', 'GET_INDEX_EXP8', + 'GET_INDEX_EXP9', 'GET_INDEX_EXP10', 'GET_INDEX_EXP11', + 'GET_INDEX_EXP12', 'GET_INDEX_EXP13', 'GET_INDEX_EXP14', + 'GET_INDEX_EXP15', 'NUM_ARGS_H1', 'NUM_ARGS', + 'GET_INDEX_FUNC_H2', 'GET_INDEX_FUNC', 'GET_INDEX', + 'INDEX', 'GET_ELEMENT', 'free_array', 'free_pointer', + 'get_index', 'numpy_to_ndarray_strides', + 'numpy_to_ndarray_shape', 'get_size', 'order_f', 'order_c', 'array_copy_data']) + + def has_clash(self, name, symbols): + """ + Indicate whether the proposed name causes any clashes. + + Checks if a suggested name conflicts with predefined + keywords or specified symbols,returning true for a clash. + This method is crucial for maintaining namespace integrity and + preventing naming conflicts in code generation processes. + + Parameters + ---------- + name : str + The suggested name. + symbols : set + Symbols which should be considered as collisions. + + Returns + ------- + bool + True if the name is a collision. + False if the name is collision free. + """ + return any(name == k for k in self.keywords) or \ + any(name == s for s in symbols) + + def get_collisionless_name(self, name, symbols): + """ + Get a valid name which doesn't collision with symbols or Cuda keywords. + + Find a new name based on the suggested name which will not cause + conflicts with Cuda keywords, does not appear in the provided symbols, + and is a valid name in Cuda code. + + Parameters + ---------- + name : str + The suggested name. + symbols : set + Symbols which should be considered as collisions. + + Returns + ------- + str + A new name which is collision free. + """ + if len(name)>4 and all(name[i] == '_' for i in (0,1,-1,-2)): + # Ignore magic methods + return name + if name[0] == '_': + name = 'private'+name + return self._get_collisionless_name(name, symbols) diff --git a/pyccel/stdlib/numpy/numpy_c.c b/pyccel/stdlib/numpy/numpy_c.c index 36e4a205ec..1b5a1bf017 100644 --- a/pyccel/stdlib/numpy/numpy_c.c +++ b/pyccel/stdlib/numpy/numpy_c.c @@ -17,8 +17,10 @@ double fsign(double x) return SIGN(x); } +#ifndef __NVCC__ /* numpy.sign for complex */ double complex csign(double complex x) { return x ? ((!creal(x) && cimag(x) < 0) || (creal(x) < 0) ? -1 : 1) : 0; } +#endif diff --git a/pyccel/stdlib/numpy/numpy_c.h b/pyccel/stdlib/numpy/numpy_c.h index 4133e9dbe9..326ec3a549 100644 --- a/pyccel/stdlib/numpy/numpy_c.h +++ b/pyccel/stdlib/numpy/numpy_c.h @@ -15,6 +15,8 @@ long long int isign(long long int x); double fsign(double x); +#ifndef __NVCC__ double complex csign(double complex x); +#endif #endif diff --git a/pytest.ini b/pytest.ini index 42eb0d72ba..3792ab65f9 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,3 +9,4 @@ markers = python: test to generate python code xdist_incompatible: test which compiles a file also compiled by another test external: test using an external dll (problematic with conda on Windows) + cuda: test to generate cuda code diff --git a/tests/conftest.py b/tests/conftest.py index 79144b6978..a5082ef6e8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,6 +21,17 @@ def language(request): return request.param +@pytest.fixture( params=[ + pytest.param("fortran", marks = pytest.mark.fortran), + pytest.param("c", marks = pytest.mark.c), + pytest.param("python", marks = pytest.mark.python), + pytest.param("cuda", marks = pytest.mark.cuda) + ], + scope = "session" +) +def language_with_cuda(request): + return request.param + def move_coverage(path_dir): for root, _, files in os.walk(path_dir): for name in files: diff --git a/tests/epyccel/test_base.py b/tests/epyccel/test_base.py index c22064d321..413f79eef1 100644 --- a/tests/epyccel/test_base.py +++ b/tests/epyccel/test_base.py @@ -7,128 +7,128 @@ from utilities import epyccel_test -def test_is_false(language): - test = epyccel_test(base.is_false, lang=language) +def test_is_false(language_with_cuda): + test = epyccel_test(base.is_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_is_true(language): - test = epyccel_test(base.is_true, lang=language) +def test_is_true(language_with_cuda): + test = epyccel_test(base.is_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_compare_is(language): - test = epyccel_test(base.compare_is, lang=language) +def test_compare_is(language_with_cuda): + test = epyccel_test(base.compare_is, lang=language_with_cuda) test.compare_epyccel( True, True ) test.compare_epyccel( True, False ) test.compare_epyccel( False, True ) test.compare_epyccel( False, False ) -def test_compare_is_not(language): - test = epyccel_test(base.compare_is_not, lang=language) +def test_compare_is_not(language_with_cuda): + test = epyccel_test(base.compare_is_not, lang=language_with_cuda) test.compare_epyccel( True, True ) test.compare_epyccel( True, False ) test.compare_epyccel( False, True ) test.compare_epyccel( False, False ) -def test_compare_is_int(language): - test = epyccel_test(base.compare_is_int, lang=language) +def test_compare_is_int(language_with_cuda): + test = epyccel_test(base.compare_is_int, lang=language_with_cuda) test.compare_epyccel( True, 1 ) test.compare_epyccel( True, 0 ) test.compare_epyccel( False, 1 ) test.compare_epyccel( False, 0 ) -def test_compare_is_not_int(language): - test = epyccel_test(base.compare_is_not_int, lang=language) +def test_compare_is_not_int(language_with_cuda): + test = epyccel_test(base.compare_is_not_int, lang=language_with_cuda) test.compare_epyccel( True, 1 ) test.compare_epyccel( True, 0 ) test.compare_epyccel( False, 1 ) test.compare_epyccel( False, 0 ) -def test_not_false(language): - test = epyccel_test(base.not_false, lang=language) +def test_not_false(language_with_cuda): + test = epyccel_test(base.not_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_not_true(language): - test = epyccel_test(base.not_true, lang=language) +def test_not_true(language_with_cuda): + test = epyccel_test(base.not_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_eq_false(language): - test = epyccel_test(base.eq_false, lang=language) +def test_eq_false(language_with_cuda): + test = epyccel_test(base.eq_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_eq_true(language): - test = epyccel_test(base.eq_true, lang=language) +def test_eq_true(language_with_cuda): + test = epyccel_test(base.eq_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_neq_false(language): - test = epyccel_test(base.eq_false, lang=language) +def test_neq_false(language_with_cuda): + test = epyccel_test(base.eq_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_neq_true(language): - test = epyccel_test(base.eq_true, lang=language) +def test_neq_true(language_with_cuda): + test = epyccel_test(base.eq_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_not(language): - test = epyccel_test(base.not_val, lang=language) +def test_not(language_with_cuda): + test = epyccel_test(base.not_val, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_not_int(language): - test = epyccel_test(base.not_int, lang=language) +def test_not_int(language_with_cuda): + test = epyccel_test(base.not_int, lang=language_with_cuda) test.compare_epyccel( 0 ) test.compare_epyccel( 4 ) -def test_compare_is_nil(language): - test = epyccel_test(base.is_nil, lang=language) +def test_compare_is_nil(language_with_cuda): + test = epyccel_test(base.is_nil, lang=language_with_cuda) test.compare_epyccel( None ) -def test_compare_is_not_nil(language): - test = epyccel_test(base.is_not_nil, lang=language) +def test_compare_is_not_nil(language_with_cuda): + test = epyccel_test(base.is_not_nil, lang=language_with_cuda) test.compare_epyccel( None ) -def test_cast_int(language): - test = epyccel_test(base.cast_int, lang=language) +def test_cast_int(language_with_cuda): + test = epyccel_test(base.cast_int, lang=language_with_cuda) test.compare_epyccel( 4 ) - test = epyccel_test(base.cast_float_to_int, lang=language) + test = epyccel_test(base.cast_float_to_int, lang=language_with_cuda) test.compare_epyccel( 4.5 ) -def test_cast_bool(language): - test = epyccel_test(base.cast_bool, lang=language) +def test_cast_bool(language_with_cuda): + test = epyccel_test(base.cast_bool, lang=language_with_cuda) test.compare_epyccel( True ) -def test_cast_float(language): - test = epyccel_test(base.cast_float, lang=language) +def test_cast_float(language_with_cuda): + test = epyccel_test(base.cast_float, lang=language_with_cuda) test.compare_epyccel( 4.5 ) - test = epyccel_test(base.cast_int_to_float, lang=language) + test = epyccel_test(base.cast_int_to_float, lang=language_with_cuda) test.compare_epyccel( 4 ) -def test_if_0_int(language): - test = epyccel_test(base.if_0_int, lang=language) +def test_if_0_int(language_with_cuda): + test = epyccel_test(base.if_0_int, lang=language_with_cuda) test.compare_epyccel( 22 ) test.compare_epyccel( 0 ) -def test_if_0_real(language): - test = epyccel_test(base.if_0_real, lang=language) +def test_if_0_real(language_with_cuda): + test = epyccel_test(base.if_0_real, lang=language_with_cuda) test.compare_epyccel( 22.3 ) test.compare_epyccel( 0.0 ) -def test_same_int(language): - test = epyccel_test(base.is_same_int, lang=language) +def test_same_int(language_with_cuda): + test = epyccel_test(base.is_same_int, lang=language_with_cuda) test.compare_epyccel( 22 ) - test = epyccel_test(base.isnot_same_int, lang=language) + test = epyccel_test(base.isnot_same_int, lang=language_with_cuda) test.compare_epyccel( 22 ) -def test_same_float(language): - test = epyccel_test(base.is_same_float, lang=language) +def test_same_float(language_with_cuda): + test = epyccel_test(base.is_same_float, lang=language_with_cuda) test.compare_epyccel( 22.2 ) - test = epyccel_test(base.isnot_same_float, lang=language) + test = epyccel_test(base.isnot_same_float, lang=language_with_cuda) test.compare_epyccel( 22.2 ) @pytest.mark.parametrize( 'language', [ @@ -150,28 +150,28 @@ def test_same_complex(language): test = epyccel_test(base.isnot_same_complex, lang=language) test.compare_epyccel( complex(2,3) ) -def test_is_types(language): - test = epyccel_test(base.is_types, lang=language) +def test_is_types(language_with_cuda): + test = epyccel_test(base.is_types, lang=language_with_cuda) test.compare_epyccel( 1, 1.0 ) -def test_isnot_types(language): - test = epyccel_test(base.isnot_types, lang=language) +def test_isnot_types(language_with_cuda): + test = epyccel_test(base.isnot_types, lang=language_with_cuda) test.compare_epyccel( 1, 1.0 ) -def test_none_is_none(language): - test = epyccel_test(base.none_is_none, lang=language) +def test_none_is_none(language_with_cuda): + test = epyccel_test(base.none_is_none, lang=language_with_cuda) test.compare_epyccel() -def test_none_isnot_none(language): - test = epyccel_test(base.none_isnot_none, lang=language) +def test_none_isnot_none(language_with_cuda): + test = epyccel_test(base.none_isnot_none, lang=language_with_cuda) test.compare_epyccel() -def test_pass_if(language): - test = epyccel_test(base.pass_if, lang=language) +def test_pass_if(language_with_cuda): + test = epyccel_test(base.pass_if, lang=language_with_cuda) test.compare_epyccel(2) -def test_pass2_if(language): - test = epyccel_test(base.pass2_if, lang=language) +def test_pass2_if(language_with_cuda): + test = epyccel_test(base.pass2_if, lang=language_with_cuda) test.compare_epyccel(0.2) test.compare_epyccel(0.0) @@ -192,15 +192,15 @@ def test_use_optional(language): test.compare_epyccel() test.compare_epyccel(6) -def test_none_equality(language): - test = epyccel_test(base.none_equality, lang=language) +def test_none_equality(language_with_cuda): + test = epyccel_test(base.none_equality, lang=language_with_cuda) test.compare_epyccel() test.compare_epyccel(6) -def test_none_none_equality(language): - test = epyccel_test(base.none_none_equality, lang=language) +def test_none_none_equality(language_with_cuda): + test = epyccel_test(base.none_none_equality, lang=language_with_cuda) test.compare_epyccel() -def test_none_literal_equality(language): - test = epyccel_test(base.none_literal_equality, lang=language) +def test_none_literal_equality(language_with_cuda): + test = epyccel_test(base.none_literal_equality, lang=language_with_cuda) test.compare_epyccel() From b92703786ee88812cbcb9b9fd70430c223aef3a2 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Mon, 4 Mar 2024 11:01:40 +0100 Subject: [PATCH 35/53] Ensure tests are run on push --- .github/workflows/anaconda_linux.yml | 2 +- .github/workflows/anaconda_windows.yml | 2 +- .github/workflows/intel.yml | 2 +- .github/workflows/linux.yml | 2 +- .github/workflows/macosx.yml | 2 +- .github/workflows/pickle.yml | 2 +- .github/workflows/pickle_wheel.yml | 2 +- .github/workflows/windows.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/anaconda_linux.yml b/.github/workflows/anaconda_linux.yml index c7e04a37da..e79bf11572 100644 --- a/.github/workflows/anaconda_linux.yml +++ b/.github/workflows/anaconda_linux.yml @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/anaconda_windows.yml b/.github/workflows/anaconda_windows.yml index 70bf150b1a..f02eb9bdbc 100644 --- a/.github/workflows/anaconda_windows.yml +++ b/.github/workflows/anaconda_windows.yml @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: windows-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index d45beb3d8b..1c4f11c33d 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -29,7 +29,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 29239db9b6..ab31641a8e 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -28,7 +28,7 @@ env: jobs: matrix_prep: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: diff --git a/.github/workflows/macosx.yml b/.github/workflows/macosx.yml index 1d4d4fd562..3196f44d29 100644 --- a/.github/workflows/macosx.yml +++ b/.github/workflows/macosx.yml @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: macos-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/pickle.yml b/.github/workflows/pickle.yml index b20fe1a93f..8e7613c055 100644 --- a/.github/workflows/pickle.yml +++ b/.github/workflows/pickle.yml @@ -31,7 +31,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-matrix.outputs.python_version }} matrix: ${{ steps.set-matrix.outputs.matrix }} diff --git a/.github/workflows/pickle_wheel.yml b/.github/workflows/pickle_wheel.yml index ce530a238f..d543547d5d 100644 --- a/.github/workflows/pickle_wheel.yml +++ b/.github/workflows/pickle_wheel.yml @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 03cecaab64..8e965882bf 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: windows-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: From 2c9ccad30764b14fa9b4de35a33d7453ca656e99 Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Mon, 4 Mar 2024 11:03:05 +0100 Subject: [PATCH 36/53] Ensure tests are triggered on main branch --- .github/workflows/anaconda_linux.yml | 2 +- .github/workflows/anaconda_windows.yml | 2 +- .github/workflows/intel.yml | 2 +- .github/workflows/linux.yml | 2 +- .github/workflows/macosx.yml | 2 +- .github/workflows/pickle.yml | 2 +- .github/workflows/pickle_wheel.yml | 2 +- .github/workflows/windows.yml | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/anaconda_linux.yml b/.github/workflows/anaconda_linux.yml index e79bf11572..8173b4f7a4 100644 --- a/.github/workflows/anaconda_linux.yml +++ b/.github/workflows/anaconda_linux.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/anaconda_windows.yml b/.github/workflows/anaconda_windows.yml index f02eb9bdbc..5cd222511d 100644 --- a/.github/workflows/anaconda_windows.yml +++ b/.github/workflows/anaconda_windows.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 1c4f11c33d..96b614f0fb 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index ab31641a8e..3fbf76d70a 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/macosx.yml b/.github/workflows/macosx.yml index 3196f44d29..c5396fbe7c 100644 --- a/.github/workflows/macosx.yml +++ b/.github/workflows/macosx.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/pickle.yml b/.github/workflows/pickle.yml index 8e7613c055..47663b6b2f 100644 --- a/.github/workflows/pickle.yml +++ b/.github/workflows/pickle.yml @@ -19,7 +19,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/pickle_wheel.yml b/.github/workflows/pickle_wheel.yml index d543547d5d..450cb250be 100644 --- a/.github/workflows/pickle_wheel.yml +++ b/.github/workflows/pickle_wheel.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 8e965882bf..14ba9ddf31 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} From 1f9ca8c0c5752f61167594ad1af42b4403711c77 Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 13:42:05 +0100 Subject: [PATCH 37/53] use __cplusplus macro to remove complex type --- pyccel/stdlib/ndarrays/ndarrays.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/pyccel/stdlib/ndarrays/ndarrays.h b/pyccel/stdlib/ndarrays/ndarrays.h index fa113c1811..1e5739746f 100644 --- a/pyccel/stdlib/ndarrays/ndarrays.h +++ b/pyccel/stdlib/ndarrays/ndarrays.h @@ -92,8 +92,10 @@ typedef struct s_ndarray float *nd_float; double *nd_double; bool *nd_bool; + #ifndef __cplusplus double complex *nd_cdouble; float complex *nd_cfloat; + #endif }; /* number of dimensions */ int32_t nd; @@ -128,8 +130,10 @@ void _array_fill_int64(int64_t c, t_ndarray arr); void _array_fill_float(float c, t_ndarray arr); void _array_fill_double(double c, t_ndarray arr); void _array_fill_bool(bool c, t_ndarray arr); +#ifndef __cplusplus void _array_fill_cfloat(float complex c, t_ndarray arr); void _array_fill_cdouble(double complex c, t_ndarray arr); +#endif /* slicing */ /* creating a Slice object */ @@ -164,8 +168,10 @@ int64_t numpy_sum_int32(t_ndarray arr); int64_t numpy_sum_int64(t_ndarray arr); float numpy_sum_float32(t_ndarray arr); double numpy_sum_float64(t_ndarray arr); +#ifndef __cplusplus float complex numpy_sum_complex64(t_ndarray arr); double complex numpy_sum_complex128(t_ndarray arr); +#endif /*numpy max/amax */ @@ -176,8 +182,10 @@ int64_t numpy_amax_int32(t_ndarray arr); int64_t numpy_amax_int64(t_ndarray arr); float numpy_amax_float32(t_ndarray arr); double numpy_amax_float64(t_ndarray arr); +#ifndef __cplusplus float complex numpy_amax_complex64(t_ndarray arr); double complex numpy_amax_complex128(t_ndarray arr); +#endif /* numpy min/amin */ @@ -188,7 +196,9 @@ int64_t numpy_amin_int32(t_ndarray arr); int64_t numpy_amin_int64(t_ndarray arr); float numpy_amin_float32(t_ndarray arr); double numpy_amin_float64(t_ndarray arr); +#ifndef __cplusplus float complex numpy_amin_complex64(t_ndarray arr); double complex numpy_amin_complex128(t_ndarray arr); +#endif #endif From 54c4423ba793a08bd07f23adc197c3d151c8d23a Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 14:31:36 +0100 Subject: [PATCH 38/53] made array create works with cuda - seperated the compound literal from func_call / add extern C to program includes --- pyccel/codegen/printing/cucode.py | 68 ++++++++++++++++++++++++++++++- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py index e70b57bc20..92916c65d8 100644 --- a/pyccel/codegen/printing/cucode.py +++ b/pyccel/codegen/printing/cucode.py @@ -9,9 +9,10 @@ enabling the direct translation of high-level Pyccel expressions into CUDA code. """ -from pyccel.codegen.printing.ccode import CCodePrinter, c_library_headers +from pyccel.codegen.printing.ccode import CCodePrinter, c_library_headers, c_imports +from pyccel.ast.datatypes import NativeInteger -from pyccel.ast.core import Import, Module +from pyccel.ast.core import Import, Module, Declare from pyccel.errors.errors import Errors @@ -43,6 +44,38 @@ def __init__(self, filename, prefix_module = None): super().__init__(filename) + def _print_Program(self, expr): + self.set_scope(expr.scope) + body = self._print(expr.body) + variables = self.scope.variables.values() + decs = ''.join(self._print(Declare(v)) for v in variables) + + imports = [*expr.imports, *self._additional_imports.values()] + c_headers_imports = '' + local_imports = '' + + for imp in imports: + if imp.source in c_library_headers: + c_headers_imports += self._print(imp) + else: + local_imports += self._print(imp) + + imports = f'{c_headers_imports}\ + extern "C"{{\n\ + {local_imports}\ + }}' + # imports = ''.join(self._print(i) for i in imports) + + self.exit_scope() + return ('{imports}' + 'int main()\n{{\n' + '{decs}' + '{body}' + 'return 0;\n' + '}}').format(imports=imports, + decs=decs, + body=body) + def _print_Module(self, expr): self.set_scope(expr.scope) self._current_module = expr.name @@ -72,3 +105,34 @@ def _print_Module(self, expr): self.exit_scope() return code + + def _print_Allocate(self, expr): + free_code = '' + variable = expr.variable + if variable.rank > 0: + #free the array if its already allocated and checking if its not null if the status is unknown + if (expr.status == 'unknown'): + shape_var = DottedVariable(NativeVoid(), 'shape', lhs = variable) + free_code = f'if ({self._print(shape_var)} != NULL)\n' + free_code += "{{\n{}}}\n".format(self._print(Deallocate(variable))) + elif (expr.status == 'allocated'): + free_code += self._print(Deallocate(variable)) + self.add_import(c_imports['ndarrays']) + shape = ", ".join(self._print(i) for i in expr.shape) + dtype = self.find_in_ndarray_type_registry(variable.dtype, variable.precision) + shape_dtype = self.find_in_dtype_registry(NativeInteger(), 8) + tmp_shape = self.scope.get_new_name(f'tmp_shape_{self._print(variable)}') + shape_Assign = f'{shape_dtype} {tmp_shape}[] = {{{shape}}};\n' + is_view = 'false' if variable.on_heap else 'true' + order = "order_f" if expr.order == "F" else "order_c" + alloc_code = f"{self._print(variable)} = array_create({variable.rank}, {tmp_shape}, {dtype}, {is_view}, {order});\n" + return '{}{}{}'.format(free_code, shape_Assign,alloc_code) + elif variable.is_alias: + var_code = self._print(ObjectAddress(variable)) + if expr.like: + declaration_type = self.get_declare_type(expr.like) + return f'{var_code} = malloc(sizeof({declaration_type}));\n' + else: + raise NotImplementedError(f"Allocate not implemented for {variable}") + else: + raise NotImplementedError(f"Allocate not implemented for {variable}") \ No newline at end of file From 82006d5b3576e5562088882b5edce8884a02425d Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 17:02:27 +0100 Subject: [PATCH 39/53] removed _Generic Macros --- pyccel/stdlib/ndarrays/ndarrays.h | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/pyccel/stdlib/ndarrays/ndarrays.h b/pyccel/stdlib/ndarrays/ndarrays.h index 1e5739746f..23817129ca 100644 --- a/pyccel/stdlib/ndarrays/ndarrays.h +++ b/pyccel/stdlib/ndarrays/ndarrays.h @@ -11,17 +11,6 @@ # include # include -/* mapping the function array_fill to the correct type */ -# define array_fill(c, arr) _Generic((c), int64_t : _array_fill_int64,\ - int32_t : _array_fill_int32,\ - int16_t : _array_fill_int16,\ - int8_t : _array_fill_int8,\ - float : _array_fill_float,\ - double : _array_fill_double,\ - bool : _array_fill_bool,\ - float complex : _array_fill_cfloat,\ - double complex : _array_fill_cdouble)(c, arr) - typedef enum e_slice_type { ELEMENT, RANGE } t_slice_type; typedef struct s_slice From 0ae88a3a19d5e291877cdceae04c2141d1814804 Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 17:07:34 +0100 Subject: [PATCH 40/53] added required imports --- pyccel/codegen/printing/cucode.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py index 92916c65d8..e9b8427f39 100644 --- a/pyccel/codegen/printing/cucode.py +++ b/pyccel/codegen/printing/cucode.py @@ -10,7 +10,10 @@ """ from pyccel.codegen.printing.ccode import CCodePrinter, c_library_headers, c_imports -from pyccel.ast.datatypes import NativeInteger +from pyccel.ast.datatypes import NativeInteger, NativeVoid + +from pyccel.ast.core import Deallocate +from pyccel.ast.variable import DottedVariable from pyccel.ast.core import Import, Module, Declare From 16f5a1ba593e4218ec9ee671422e6e579cae5d6a Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 17:08:23 +0100 Subject: [PATCH 41/53] continuation of removing _Generic printing the correct array_fill --- pyccel/codegen/printing/ccode.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/pyccel/codegen/printing/ccode.py b/pyccel/codegen/printing/ccode.py index e071b0f429..1a89fed6b0 100644 --- a/pyccel/codegen/printing/ccode.py +++ b/pyccel/codegen/printing/ccode.py @@ -484,12 +484,14 @@ def arrayFill(self, expr): lhs = expr.lhs code_init = '' declare_dtype = self.find_in_dtype_registry(rhs.dtype, rhs.precision) - + dtype = self.find_in_ndarray_type_registry(rhs.dtype, rhs.precision) + dtype = dtype[3:] + fill_value = self._print(rhs.fill_value) if rhs.fill_value is not None: if isinstance(rhs.fill_value, Literal): - code_init += 'array_fill(({0}){1}, {2});\n'.format(declare_dtype, self._print(rhs.fill_value), self._print(lhs)) + code_init += f'_array_fill_{dtype}(({declare_dtype}){fill_value}, {self._print(lhs)});\n' else: - code_init += 'array_fill({0}, {1});\n'.format(self._print(rhs.fill_value), self._print(lhs)) + code_init += f'_array_fill_{dtype}({fill_value}, {self._print(lhs)});\n' return code_init def _init_stack_array(self, expr): From 536e1f02575b926b93ae2681216baad293a50f27 Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 17:48:33 +0100 Subject: [PATCH 42/53] fixed stack arrays in cuda printing --- pyccel/codegen/printing/cucode.py | 45 +++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py index e9b8427f39..b57f6e239c 100644 --- a/pyccel/codegen/printing/cucode.py +++ b/pyccel/codegen/printing/cucode.py @@ -9,6 +9,8 @@ enabling the direct translation of high-level Pyccel expressions into CUDA code. """ +import functools + from pyccel.codegen.printing.ccode import CCodePrinter, c_library_headers, c_imports from pyccel.ast.datatypes import NativeInteger, NativeVoid @@ -16,6 +18,7 @@ from pyccel.ast.variable import DottedVariable from pyccel.ast.core import Import, Module, Declare +from pyccel.ast.operators import PyccelMul from pyccel.errors.errors import Errors @@ -109,6 +112,48 @@ def _print_Module(self, expr): self.exit_scope() return code + def _init_stack_array(self, expr): + """ + Return a string which handles the assignment of a stack ndarray. + + Print the code necessary to initialise a ndarray on the stack. + + Parameters + ---------- + expr : TypedAstNode + The Assign Node used to get the lhs and rhs. + + Returns + ------- + buffer_array : str + String initialising the stack (C) array which stores the data. + array_init : str + String containing the rhs of the initialization of a stack array. + """ + var = expr + dtype = self.find_in_dtype_registry(var.dtype, var.precision) + np_dtype = self.find_in_ndarray_type_registry(var.dtype, var.precision) + shape = ", ".join(self._print(i) for i in var.alloc_shape) + tot_shape = self._print(functools.reduce( + lambda x,y: PyccelMul(x,y,simplify=True), var.alloc_shape)) + declare_dtype = self.find_in_dtype_registry(NativeInteger(), 8) + + dummy_array_name = self.scope.get_new_name('array_dummy') + buffer_array = "{dtype} {name}[{size}];\n".format( + dtype = dtype, + name = dummy_array_name, + size = tot_shape) + tmp_shape = self.scope.get_new_name(f'tmp_shape_{var.name}') + shape_init = f'{declare_dtype} {tmp_shape}[] = {{{shape}}};\n' + tmp_strides = self.scope.get_new_name(f'tmp_strides_{var.name}') + strides_init = f'{declare_dtype} {tmp_strides}[{var.rank}] = {{0}};\n' + array_init = f' = (t_ndarray){{\n.{np_dtype}={dummy_array_name},\n .nd={var.rank},\n ' + array_init += f'.shape={tmp_shape},\n .strides={tmp_strides},\n .type={np_dtype},\n .is_view=false\n}};\n' + array_init += 'stack_array_init(&{})'.format(self._print(var)) + preface = buffer_array + shape_init + strides_init + self.add_import(c_imports['ndarrays']) + return preface, array_init + def _print_Allocate(self, expr): free_code = '' variable = expr.variable From cd67474f7e7d081d7541a74b20cdcc6ce45a185c Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 17:54:07 +0100 Subject: [PATCH 43/53] activated CPU cuda array tests --- tests/epyccel/test_array_as_func_args.py | 16 ++--- tests/epyccel/test_arrays.py | 64 +++++++++---------- .../test_arrays_multiple_assignments.py | 49 +++++++------- 3 files changed, 63 insertions(+), 66 deletions(-) diff --git a/tests/epyccel/test_array_as_func_args.py b/tests/epyccel/test_array_as_func_args.py index 194d269dba..b8990300af 100644 --- a/tests/epyccel/test_array_as_func_args.py +++ b/tests/epyccel/test_array_as_func_args.py @@ -12,13 +12,13 @@ int_types = ['int8', 'int16', 'int32', 'int64'] float_types = ['float32', 'float64'] -def test_array_int_1d_scalar_add(language): +def test_array_int_1d_scalar_add(language_with_cuda): @template('T', ['int8', 'int16', 'int32', 'int64']) def array_int_1d_scalar_add(x : 'T[:]', a : 'T', x_len : int): for i in range(x_len): x[i] += a f1 = array_int_1d_scalar_add - f2 = epyccel(f1, language=language) + f2 = epyccel(f1, language=language_with_cuda) for t in int_types: size = randint(1, 30) @@ -31,13 +31,13 @@ def array_int_1d_scalar_add(x : 'T[:]', a : 'T', x_len : int): assert np.array_equal( x1, x2 ) -def test_array_real_1d_scalar_add(language): +def test_array_real_1d_scalar_add(language_with_cuda): @template('T', ['float32', 'double']) def array_real_1d_scalar_add(x : 'T[:]', a : 'T', x_len : int): for i in range(x_len): x[i] += a f1 = array_real_1d_scalar_add - f2 = epyccel(f1, language=language) + f2 = epyccel(f1, language=language_with_cuda) for t in float_types: size = randint(1, 30) @@ -71,14 +71,14 @@ def array_complex_1d_scalar_add(x : 'T[:]', a : 'T', x_len : int): assert np.array_equal( x1, x2 ) -def test_array_int_2d_scalar_add(language): +def test_array_int_2d_scalar_add(language_with_cuda): @template('T', ['int8', 'int16', 'int32', 'int64']) def array_int_2d_scalar_add( x : 'T[:,:]', a : 'T', d1 : int, d2 : int): for i in range(d1): for j in range(d2): x[i, j] += a f1 = array_int_2d_scalar_add - f2 = epyccel(f1, language=language) + f2 = epyccel(f1, language=language_with_cuda) for t in int_types: d1 = randint(1, 15) @@ -92,14 +92,14 @@ def array_int_2d_scalar_add( x : 'T[:,:]', a : 'T', d1 : int, d2 : int): assert np.array_equal( x1, x2 ) -def test_array_real_2d_scalar_add(language): +def test_array_real_2d_scalar_add(language_with_cuda): @template('T', ['float32', 'double']) def array_real_2d_scalar_add(x : 'T[:,:]', a : 'T', d1 : int, d2 : int): for i in range(d1): for j in range(d2): x[i, j] += a f1 = array_real_2d_scalar_add - f2 = epyccel(f1, language=language) + f2 = epyccel(f1, language=language_with_cuda) for t in float_types: d1 = randint(1, 15) diff --git a/tests/epyccel/test_arrays.py b/tests/epyccel/test_arrays.py index f279137ff0..4ea556f3fa 100644 --- a/tests/epyccel/test_arrays.py +++ b/tests/epyccel/test_arrays.py @@ -85,10 +85,10 @@ def test_array_assigned_dtype(language): # TEST: 1D ARRAYS OF INT-32 #============================================================================== -def test_array_int32_1d_scalar_add(language): +def test_array_int32_1d_scalar_add(language_with_cuda): f1 = arrays.array_int32_1d_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -99,10 +99,10 @@ def test_array_int32_1d_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_add_stride(language): +def test_array_int32_1d_scalar_add_stride(language_with_cuda): f1 = arrays.array_int32_1d_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3,4,5,6,7,8], dtype=np.int32 ) x2 = np.copy(x1) @@ -113,10 +113,10 @@ def test_array_int32_1d_scalar_add_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_sub(language): +def test_array_int32_1d_scalar_sub(language_with_cuda): f1 = arrays.array_int32_1d_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -127,10 +127,10 @@ def test_array_int32_1d_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_sub_stride(language): +def test_array_int32_1d_scalar_sub_stride(language_with_cuda): f1 = arrays.array_int32_1d_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3,4,5,6,7,8,9], dtype=np.int32 ) x2 = np.copy(x1) @@ -141,10 +141,10 @@ def test_array_int32_1d_scalar_sub_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_mul(language): +def test_array_int32_1d_scalar_mul(language_with_cuda): f1 = arrays.array_int32_1d_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -155,10 +155,10 @@ def test_array_int32_1d_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_mul_stride(language): +def test_array_int32_1d_scalar_mul_stride(language_with_cuda): f1 = arrays.array_int32_1d_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3,4,5,6,7,8,9], dtype=np.int32 ) x2 = np.copy(x1) @@ -169,10 +169,10 @@ def test_array_int32_1d_scalar_mul_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_div(language): +def test_array_int32_1d_scalar_div(language_with_cuda): f1 = arrays.array_int32_1d_scalar_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -183,10 +183,10 @@ def test_array_int32_1d_scalar_div(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_idiv(language): +def test_array_int32_1d_scalar_idiv(language_with_cuda): f1 = arrays.array_int32_1d_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -197,10 +197,10 @@ def test_array_int32_1d_scalar_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_scalar_idiv_stride(language): +def test_array_int32_1d_scalar_idiv_stride(language_with_cuda): f1 = arrays.array_int32_1d_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3,4,5,6,7,8,9], dtype=np.int32 ) x2 = np.copy(x1) @@ -211,10 +211,10 @@ def test_array_int32_1d_scalar_idiv_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_add(language): +def test_array_int32_1d_add(language_with_cuda): f1 = arrays.array_int32_1d_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -225,10 +225,10 @@ def test_array_int32_1d_add(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_sub(language): +def test_array_int32_1d_sub(language_with_cuda): f1 = arrays.array_int32_1d_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -239,10 +239,10 @@ def test_array_int32_1d_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_mul(language): +def test_array_int32_1d_mul(language_with_cuda): f1 = arrays.array_int32_1d_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -253,10 +253,10 @@ def test_array_int32_1d_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_idiv(language): +def test_array_int32_1d_idiv(language_with_cuda): f1 = arrays.array_int32_1d_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -267,10 +267,10 @@ def test_array_int32_1d_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_add_augassign(language): +def test_array_int32_1d_add_augassign(language_with_cuda): f1 = arrays.array_int32_1d_add_augassign - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -281,10 +281,10 @@ def test_array_int32_1d_add_augassign(language): assert np.array_equal( x1, x2 ) -def test_array_int32_1d_sub_augassign(language): +def test_array_int32_1d_sub_augassign(language_with_cuda): f1 = arrays.array_int32_1d_sub_augassign - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3], dtype=np.int32 ) x2 = np.copy(x1) @@ -295,10 +295,10 @@ def test_array_int32_1d_sub_augassign(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_initialization_1(language): +def test_array_int_1d_initialization_1(language_with_cuda): f1 = arrays.array_int_1d_initialization_1 - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) assert f1() == f2() diff --git a/tests/epyccel/test_arrays_multiple_assignments.py b/tests/epyccel/test_arrays_multiple_assignments.py index b7be052a85..004b7675e0 100644 --- a/tests/epyccel/test_arrays_multiple_assignments.py +++ b/tests/epyccel/test_arrays_multiple_assignments.py @@ -15,7 +15,7 @@ STACK_ARRAY_UNKNOWN_SHAPE) #============================================================================== -def test_no_reallocation(language): +def test_no_reallocation(language_with_cuda): @stack_array('y') def f(): @@ -30,13 +30,13 @@ def f(): return x.sum() + y.sum() # TODO: check that we don't get any Pyccel warnings - g = epyccel(f, language=language) + g = epyccel(f, language=language_with_cuda) # Check result of pyccelized function assert f() == g() #============================================================================== -def test_reallocation_heap(language): +def test_reallocation_heap(language_with_cuda): def f(): import numpy as np @@ -48,7 +48,7 @@ def f(): errors = Errors() # TODO: check if we get the correct Pyccel warning - g = epyccel(f, language=language) + g = epyccel(f, language=language_with_cuda) # Check result of pyccelized function assert f() == g() @@ -63,7 +63,7 @@ def f(): assert warning_info.message == ARRAY_REALLOCATION #============================================================================== -def test_reallocation_stack(language): +def test_reallocation_stack(language_with_cuda): @stack_array('x') def f(): @@ -77,7 +77,7 @@ def f(): # epyccel should raise an Exception with pytest.raises(PyccelSemanticError): - epyccel(f, language=language) + epyccel(f, language=language_with_cuda) # Check that we got exactly 1 Pyccel error assert errors.has_errors() @@ -89,7 +89,7 @@ def f(): assert error_info.message == INCOMPATIBLE_REDEFINITION_STACK_ARRAY #============================================================================== -def test_creation_in_loop_heap(language): +def test_creation_in_loop_heap(language_with_cuda): def f(): import numpy as np @@ -101,7 +101,7 @@ def f(): errors = Errors() # TODO: check if we get the correct Pyccel warning - g = epyccel(f, language=language) + g = epyccel(f, language=language_with_cuda) # Check result of pyccelized function assert f() == g() @@ -116,7 +116,7 @@ def f(): assert warning_info.message == ARRAY_DEFINITION_IN_LOOP #============================================================================== -def test_creation_in_loop_stack(language): +def test_creation_in_loop_stack(language_with_cuda): @stack_array('x') def f(): @@ -130,7 +130,7 @@ def f(): # epyccel should raise an Exception with pytest.raises(PyccelSemanticError): - epyccel(f, language=language) + epyccel(f, language=language_with_cuda) # Check that we got exactly 2 Pyccel errors assert errors.has_errors() @@ -149,7 +149,7 @@ def f(): assert error_info.message == STACK_ARRAY_DEFINITION_IN_LOOP #============================================================================== -def test_creation_in_if_heap(language): +def test_creation_in_if_heap(language_with_cuda): def f(c : 'float'): import numpy as np @@ -160,7 +160,7 @@ def f(c : 'float'): return x.sum() # TODO: check if we get the correct Pyccel warning - g = epyccel(f, language=language) + g = epyccel(f, language=language_with_cuda) # Check result of pyccelized function import numpy as np @@ -168,7 +168,7 @@ def f(c : 'float'): assert f(c) == g(c) #============================================================================== -def test_Reassign_to_Target(): +def test_Reassign_to_Target(language_with_cuda): def f(): import numpy as np @@ -182,7 +182,7 @@ def f(): # epyccel should raise an Exception with pytest.raises(PyccelSemanticError): - epyccel(f) + epyccel(f, language=language_with_cuda) # Check that we got exactly 1 Pyccel error assert errors.has_errors() == 1 @@ -195,7 +195,7 @@ def f(): #============================================================================== -def test_Assign_Between_Allocatables(): +def test_Assign_Between_Allocatables(language_with_cuda): def f(): import numpy as np @@ -210,7 +210,7 @@ def f(): # epyccel should raise an Exception with pytest.raises(PyccelSemanticError): - epyccel(f) + epyccel(f, language=language_with_cuda) # Check that we got exactly 1 Pyccel error assert errors.has_errors() == 1 @@ -223,7 +223,7 @@ def f(): #============================================================================== -def test_Assign_after_If(): +def test_Assign_after_If(language_with_cuda): def f(b : bool): import numpy as np @@ -240,7 +240,7 @@ def f(b : bool): errors = Errors() # epyccel should raise an Exception - f2 = epyccel(f) + f2 = epyccel(f, language=language_with_cuda) # Check that we got exactly 1 Pyccel warning assert errors.has_warnings() @@ -255,7 +255,7 @@ def f(b : bool): assert f(False) == f2(False) #============================================================================== -def test_stack_array_if(language): +def test_stack_array_if(language_with_cuda): @stack_array('x') def f(b : bool): @@ -267,18 +267,15 @@ def f(b : bool): return x[0] # Initialize singleton that stores Pyccel errors - f2 = epyccel(f, language=language) + f2 = epyccel(f, language=language_with_cuda) assert f(True) == f2(True) assert f(False) == f2(False) #============================================================================== -@pytest.mark.parametrize('lang',[ - pytest.param('fortran', marks = pytest.mark.fortran), - pytest.param('python', marks = pytest.mark.python), - pytest.param('c' , marks = pytest.mark.c)]) -def test_Assign_between_nested_If(lang): + +def test_Assign_between_nested_If(language_with_cuda): def f(b1 : bool, b2 : bool): import numpy as np @@ -297,7 +294,7 @@ def f(b1 : bool, b2 : bool): errors = Errors() # epyccel should raise an Exception - f2 = epyccel(f, language=lang) + f2 = epyccel(f, language=language_with_cuda) # Check that we don't get a Pyccel warning assert not errors.has_warnings() From ac5642da5db84c9a0e2a5727d4eb5fbd221790f7 Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 18:03:23 +0100 Subject: [PATCH 44/53] activated more arrays tests --- tests/epyccel/test_arrays.py | 376 +++++++++++++++++------------------ 1 file changed, 188 insertions(+), 188 deletions(-) diff --git a/tests/epyccel/test_arrays.py b/tests/epyccel/test_arrays.py index 4ea556f3fa..6092938524 100644 --- a/tests/epyccel/test_arrays.py +++ b/tests/epyccel/test_arrays.py @@ -318,10 +318,10 @@ def test_array_int_1d_initialization_2(language): assert f1() == f2() -def test_array_int_1d_initialization_3(language): +def test_array_int_1d_initialization_3(language_with_cuda): f1 = arrays.array_int_1d_initialization_3 - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) assert f1() == f2() @@ -329,10 +329,10 @@ def test_array_int_1d_initialization_3(language): # TEST: 2D ARRAYS OF INT-32 WITH C ORDERING #============================================================================== -def test_array_int32_2d_C_scalar_add(language): +def test_array_int32_2d_C_scalar_add(language_with_cuda): f1 = arrays.array_int32_2d_C_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -343,10 +343,10 @@ def test_array_int32_2d_C_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_scalar_add_stride(language): +def test_array_int32_2d_C_scalar_add_stride(language_with_cuda): f1 = arrays.array_int32_2d_C_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -357,10 +357,10 @@ def test_array_int32_2d_C_scalar_add_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_scalar_sub(language): +def test_array_int32_2d_C_scalar_sub(language_with_cuda): f1 = arrays.array_int32_2d_C_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -395,10 +395,10 @@ def test_array_int32_2d_C_scalar_sub_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_scalar_mul(language): +def test_array_int32_2d_C_scalar_mul(language_with_cuda): f1 = arrays.array_int32_2d_C_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -409,10 +409,10 @@ def test_array_int32_2d_C_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_scalar_mul_stride(language): +def test_array_int32_2d_C_scalar_mul_stride(language_with_cuda): f1 = arrays.array_int32_2d_C_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -423,10 +423,10 @@ def test_array_int32_2d_C_scalar_mul_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_scalar_idiv(language): +def test_array_int32_2d_C_scalar_idiv(language_with_cuda): f1 = arrays.array_int32_2d_C_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -461,10 +461,10 @@ def test_array_int32_2d_C_scalar_idiv_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_add(language): +def test_array_int32_2d_C_add(language_with_cuda): f1 = arrays.array_int32_2d_C_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -475,10 +475,10 @@ def test_array_int32_2d_C_add(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_sub(language): +def test_array_int32_2d_C_sub(language_with_cuda): f1 = arrays.array_int32_2d_C_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -489,10 +489,10 @@ def test_array_int32_2d_C_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_mul(language): +def test_array_int32_2d_C_mul(language_with_cuda): f1 = arrays.array_int32_2d_C_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -503,10 +503,10 @@ def test_array_int32_2d_C_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_C_idiv(language): +def test_array_int32_2d_C_idiv(language_with_cuda): f1 = arrays.array_int32_2d_C_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32 ) x2 = np.copy(x1) @@ -521,10 +521,10 @@ def test_array_int32_2d_C_idiv(language): # TEST: 2D ARRAYS OF INT-32 WITH F ORDERING #============================================================================== -def test_array_int32_2d_F_scalar_add(language): +def test_array_int32_2d_F_scalar_add(language_with_cuda): f1 = arrays.array_int32_2d_F_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -559,10 +559,10 @@ def test_array_int32_2d_F_scalar_add_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_scalar_sub(language): +def test_array_int32_2d_F_scalar_sub(language_with_cuda): f1 = arrays.array_int32_2d_F_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -597,10 +597,10 @@ def test_array_int32_2d_F_scalar_sub_stride(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_scalar_mul(language): +def test_array_int32_2d_F_scalar_mul(language_with_cuda): f1 = arrays.array_int32_2d_F_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -611,10 +611,10 @@ def test_array_int32_2d_F_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_scalar_idiv(language): +def test_array_int32_2d_F_scalar_idiv(language_with_cuda): f1 = arrays.array_int32_2d_F_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -625,10 +625,10 @@ def test_array_int32_2d_F_scalar_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_add(language): +def test_array_int32_2d_F_add(language_with_cuda): f1 = arrays.array_int32_2d_F_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -639,10 +639,10 @@ def test_array_int32_2d_F_add(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_sub(language): +def test_array_int32_2d_F_sub(language_with_cuda): f1 = arrays.array_int32_2d_F_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -653,10 +653,10 @@ def test_array_int32_2d_F_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_mul(language): +def test_array_int32_2d_F_mul(language_with_cuda): f1 = arrays.array_int32_2d_F_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -667,10 +667,10 @@ def test_array_int32_2d_F_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int32_2d_F_idiv(language): +def test_array_int32_2d_F_idiv(language_with_cuda): f1 = arrays.array_int32_2d_F_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], dtype=np.int32, order='F' ) x2 = np.copy(x1) @@ -686,10 +686,10 @@ def test_array_int32_2d_F_idiv(language): # TEST: 1D ARRAYS OF INT-64 #============================================================================== -def test_array_int_1d_scalar_add(language): +def test_array_int_1d_scalar_add(language_with_cuda): f1 = arrays.array_int_1d_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -700,10 +700,10 @@ def test_array_int_1d_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_scalar_sub(language): +def test_array_int_1d_scalar_sub(language_with_cuda): f1 = arrays.array_int_1d_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -714,10 +714,10 @@ def test_array_int_1d_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_scalar_mul(language): +def test_array_int_1d_scalar_mul(language_with_cuda): f1 = arrays.array_int_1d_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -728,10 +728,10 @@ def test_array_int_1d_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_scalar_idiv(language): +def test_array_int_1d_scalar_idiv(language_with_cuda): f1 = arrays.array_int_1d_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -742,10 +742,10 @@ def test_array_int_1d_scalar_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_add(language): +def test_array_int_1d_add(language_with_cuda): f1 = arrays.array_int_1d_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -756,10 +756,10 @@ def test_array_int_1d_add(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_sub(language): +def test_array_int_1d_sub(language_with_cuda): f1 = arrays.array_int_1d_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -770,10 +770,10 @@ def test_array_int_1d_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_mul(language): +def test_array_int_1d_mul(language_with_cuda): f1 = arrays.array_int_1d_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -784,10 +784,10 @@ def test_array_int_1d_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int_1d_idiv(language): +def test_array_int_1d_idiv(language_with_cuda): f1 = arrays.array_int_1d_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1,2,3] ) x2 = np.copy(x1) @@ -802,10 +802,10 @@ def test_array_int_1d_idiv(language): # TEST: 2D ARRAYS OF INT-64 WITH C ORDERING #============================================================================== -def test_array_int_2d_C_scalar_add(language): +def test_array_int_2d_C_scalar_add(language_with_cuda): f1 = arrays.array_int_2d_C_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -816,10 +816,10 @@ def test_array_int_2d_C_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_scalar_sub(language): +def test_array_int_2d_C_scalar_sub(language_with_cuda): f1 = arrays.array_int_2d_C_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -830,10 +830,10 @@ def test_array_int_2d_C_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_scalar_mul(language): +def test_array_int_2d_C_scalar_mul(language_with_cuda): f1 = arrays.array_int_2d_C_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -844,10 +844,10 @@ def test_array_int_2d_C_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_scalar_idiv(language): +def test_array_int_2d_C_scalar_idiv(language_with_cuda): f1 = arrays.array_int_2d_C_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -858,10 +858,10 @@ def test_array_int_2d_C_scalar_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_add(language): +def test_array_int_2d_C_add(language_with_cuda): f1 = arrays.array_int_2d_C_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -872,10 +872,10 @@ def test_array_int_2d_C_add(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_sub(language): +def test_array_int_2d_C_sub(language_with_cuda): f1 = arrays.array_int_2d_C_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -886,10 +886,10 @@ def test_array_int_2d_C_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_mul(language): +def test_array_int_2d_C_mul(language_with_cuda): f1 = arrays.array_int_2d_C_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -900,10 +900,10 @@ def test_array_int_2d_C_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_idiv(language): +def test_array_int_2d_C_idiv(language_with_cuda): f1 = arrays.array_int_2d_C_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]] ) x2 = np.copy(x1) @@ -914,10 +914,10 @@ def test_array_int_2d_C_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_C_initialization(language): +def test_array_int_2d_C_initialization(language_with_cuda): f1 = arrays.array_int_2d_C_initialization - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x1 = np.zeros((2, 3), dtype=int) x2 = np.ones_like(x1) @@ -931,10 +931,10 @@ def test_array_int_2d_C_initialization(language): # TEST: 2D ARRAYS OF INT-64 WITH F ORDERING #============================================================================== -def test_array_int_2d_F_scalar_add(language): +def test_array_int_2d_F_scalar_add(language_with_cuda): f1 = arrays.array_int_2d_F_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -945,10 +945,10 @@ def test_array_int_2d_F_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_scalar_sub(language): +def test_array_int_2d_F_scalar_sub(language_with_cuda): f1 = arrays.array_int_2d_F_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -959,10 +959,10 @@ def test_array_int_2d_F_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_scalar_mul(language): +def test_array_int_2d_F_scalar_mul(language_with_cuda): f1 = arrays.array_int_2d_F_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -973,10 +973,10 @@ def test_array_int_2d_F_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_scalar_idiv(language): +def test_array_int_2d_F_scalar_idiv(language_with_cuda): f1 = arrays.array_int_2d_F_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -987,10 +987,10 @@ def test_array_int_2d_F_scalar_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_add(language): +def test_array_int_2d_F_add(language_with_cuda): f1 = arrays.array_int_2d_F_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -1001,10 +1001,10 @@ def test_array_int_2d_F_add(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_sub(language): +def test_array_int_2d_F_sub(language_with_cuda): f1 = arrays.array_int_2d_F_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -1015,10 +1015,10 @@ def test_array_int_2d_F_sub(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_mul(language): +def test_array_int_2d_F_mul(language_with_cuda): f1 = arrays.array_int_2d_F_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -1029,10 +1029,10 @@ def test_array_int_2d_F_mul(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_idiv(language): +def test_array_int_2d_F_idiv(language_with_cuda): f1 = arrays.array_int_2d_F_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1,2,3], [4,5,6]], order='F' ) x2 = np.copy(x1) @@ -1043,10 +1043,10 @@ def test_array_int_2d_F_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_int_2d_F_initialization(language): +def test_array_int_2d_F_initialization(language_with_cuda): f1 = arrays.array_int_2d_F_initialization - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x1 = np.zeros((2, 3), dtype=int, order='F') x2 = np.ones_like(x1) @@ -1060,10 +1060,10 @@ def test_array_int_2d_F_initialization(language): # TEST: 1D ARRAYS OF REAL #============================================================================== -def test_array_float_1d_scalar_add(language): +def test_array_float_1d_scalar_add(language_with_cuda): f1 = arrays.array_float_1d_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1074,10 +1074,10 @@ def test_array_float_1d_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_scalar_sub(language): +def test_array_float_1d_scalar_sub(language_with_cuda): f1 = arrays.array_float_1d_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1088,10 +1088,10 @@ def test_array_float_1d_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_scalar_mul(language): +def test_array_float_1d_scalar_mul(language_with_cuda): f1 = arrays.array_float_1d_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1102,10 +1102,10 @@ def test_array_float_1d_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_scalar_div(language): +def test_array_float_1d_scalar_div(language_with_cuda): f1 = arrays.array_float_1d_scalar_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1116,9 +1116,9 @@ def test_array_float_1d_scalar_div(language): assert np.allclose(x1, x2, rtol=RTOL, atol=ATOL) -def test_array_float_1d_scalar_mod(language): +def test_array_float_1d_scalar_mod(language_with_cuda): f1 = arrays.array_float_1d_scalar_mod - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1129,10 +1129,10 @@ def test_array_float_1d_scalar_mod(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_scalar_idiv(language): +def test_array_float_1d_scalar_idiv(language_with_cuda): f1 = arrays.array_float_1d_scalar_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1143,10 +1143,10 @@ def test_array_float_1d_scalar_idiv(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_add(language): +def test_array_float_1d_add(language_with_cuda): f1 = arrays.array_float_1d_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1157,10 +1157,10 @@ def test_array_float_1d_add(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_sub(language): +def test_array_float_1d_sub(language_with_cuda): f1 = arrays.array_float_1d_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1171,10 +1171,10 @@ def test_array_float_1d_sub(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_mul(language): +def test_array_float_1d_mul(language_with_cuda): f1 = arrays.array_float_1d_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1185,10 +1185,10 @@ def test_array_float_1d_mul(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_div(language): +def test_array_float_1d_div(language_with_cuda): f1 = arrays.array_float_1d_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1199,10 +1199,10 @@ def test_array_float_1d_div(language): assert np.array_equal( x1, x2 ) -def test_array_float_1d_mod(language): +def test_array_float_1d_mod(language_with_cuda): f1 = arrays.array_float_1d_mod - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1213,10 +1213,10 @@ def test_array_float_1d_mod(language): assert np.array_equal( x1, x2) -def test_array_float_1d_idiv(language): +def test_array_float_1d_idiv(language_with_cuda): f1 = arrays.array_float_1d_idiv - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [1.,2.,3.] ) x2 = np.copy(x1) @@ -1231,10 +1231,10 @@ def test_array_float_1d_idiv(language): # TEST: 2D ARRAYS OF REAL WITH C ORDERING #============================================================================== -def test_array_float_2d_C_scalar_add(language): +def test_array_float_2d_C_scalar_add(language_with_cuda): f1 = arrays.array_float_2d_C_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1245,10 +1245,10 @@ def test_array_float_2d_C_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_scalar_sub(language): +def test_array_float_2d_C_scalar_sub(language_with_cuda): f1 = arrays.array_float_2d_C_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1259,10 +1259,10 @@ def test_array_float_2d_C_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_scalar_mul(language): +def test_array_float_2d_C_scalar_mul(language_with_cuda): f1 = arrays.array_float_2d_C_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1273,10 +1273,10 @@ def test_array_float_2d_C_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_scalar_div(language): +def test_array_float_2d_C_scalar_div(language_with_cuda): f1 = arrays.array_float_2d_C_scalar_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1287,10 +1287,10 @@ def test_array_float_2d_C_scalar_div(language): assert np.allclose(x1, x2, rtol=RTOL, atol=ATOL) -def test_array_float_2d_C_scalar_mod(language): +def test_array_float_2d_C_scalar_mod(language_with_cuda): f1 = arrays.array_float_2d_C_scalar_mod - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1301,10 +1301,10 @@ def test_array_float_2d_C_scalar_mod(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_add(language): +def test_array_float_2d_C_add(language_with_cuda): f1 = arrays.array_float_2d_C_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1315,10 +1315,10 @@ def test_array_float_2d_C_add(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_sub(language): +def test_array_float_2d_C_sub(language_with_cuda): f1 = arrays.array_float_2d_C_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1329,10 +1329,10 @@ def test_array_float_2d_C_sub(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_mul(language): +def test_array_float_2d_C_mul(language_with_cuda): f1 = arrays.array_float_2d_C_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1343,10 +1343,10 @@ def test_array_float_2d_C_mul(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_div(language): +def test_array_float_2d_C_div(language_with_cuda): f1 = arrays.array_float_2d_C_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1357,10 +1357,10 @@ def test_array_float_2d_C_div(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_mod(language): +def test_array_float_2d_C_mod(language_with_cuda): f1 = arrays.array_float_2d_C_mod - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]] ) x2 = np.copy(x1) @@ -1371,10 +1371,10 @@ def test_array_float_2d_C_mod(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_C_array_initialization(language): +def test_array_float_2d_C_array_initialization(language_with_cuda): f1 = arrays.array_float_2d_C_array_initialization - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x1 = np.zeros((2, 3), dtype=float ) x2 = np.ones_like(x1) @@ -1384,10 +1384,10 @@ def test_array_float_2d_C_array_initialization(language): assert np.array_equal(x1, x2) -def test_array_float_3d_C_array_initialization_1(language): +def test_array_float_3d_C_array_initialization_1(language_with_cuda): f1 = arrays.array_float_3d_C_array_initialization_1 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x = np.random.random((3,2)) y = np.random.random((3,2)) @@ -1401,10 +1401,10 @@ def test_array_float_3d_C_array_initialization_1(language): assert np.array_equal(x1, x2) -def test_array_float_3d_C_array_initialization_2(language): +def test_array_float_3d_C_array_initialization_2(language_with_cuda): f1 = arrays.array_float_3d_C_array_initialization_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x1 = np.zeros((2,3,4)) x2 = np.zeros((2,3,4)) @@ -1414,10 +1414,10 @@ def test_array_float_3d_C_array_initialization_2(language): assert np.array_equal(x1, x2) -def test_array_float_4d_C_array_initialization(language): +def test_array_float_4d_C_array_initialization(language_with_cuda): f1 = arrays.array_float_4d_C_array_initialization - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x = np.random.random((3,2,4)) y = np.random.random((3,2,4)) @@ -1434,10 +1434,10 @@ def test_array_float_4d_C_array_initialization(language): # TEST: 2D ARRAYS OF REAL WITH F ORDERING #============================================================================== -def test_array_float_2d_F_scalar_add(language): +def test_array_float_2d_F_scalar_add(language_with_cuda): f1 = arrays.array_float_2d_F_scalar_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1448,10 +1448,10 @@ def test_array_float_2d_F_scalar_add(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_scalar_sub(language): +def test_array_float_2d_F_scalar_sub(language_with_cuda): f1 = arrays.array_float_2d_F_scalar_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1462,10 +1462,10 @@ def test_array_float_2d_F_scalar_sub(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_scalar_mul(language): +def test_array_float_2d_F_scalar_mul(language_with_cuda): f1 = arrays.array_float_2d_F_scalar_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1476,10 +1476,10 @@ def test_array_float_2d_F_scalar_mul(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_scalar_div(language): +def test_array_float_2d_F_scalar_div(language_with_cuda): f1 = arrays.array_float_2d_F_scalar_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1490,10 +1490,10 @@ def test_array_float_2d_F_scalar_div(language): assert np.allclose(x1, x2, rtol=RTOL, atol=ATOL) -def test_array_float_2d_F_scalar_mod(language): +def test_array_float_2d_F_scalar_mod(language_with_cuda): f1 = arrays.array_float_2d_F_scalar_mod - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1504,10 +1504,10 @@ def test_array_float_2d_F_scalar_mod(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_add(language): +def test_array_float_2d_F_add(language_with_cuda): f1 = arrays.array_float_2d_F_add - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1518,10 +1518,10 @@ def test_array_float_2d_F_add(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_sub(language): +def test_array_float_2d_F_sub(language_with_cuda): f1 = arrays.array_float_2d_F_sub - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1532,10 +1532,10 @@ def test_array_float_2d_F_sub(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_mul(language): +def test_array_float_2d_F_mul(language_with_cuda): f1 = arrays.array_float_2d_F_mul - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1546,10 +1546,10 @@ def test_array_float_2d_F_mul(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_div(language): +def test_array_float_2d_F_div(language_with_cuda): f1 = arrays.array_float_2d_F_div - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1560,10 +1560,10 @@ def test_array_float_2d_F_div(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_mod(language): +def test_array_float_2d_F_mod(language_with_cuda): f1 = arrays.array_float_2d_F_mod - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.array( [[1.,2.,3.], [4.,5.,6.]], order='F' ) x2 = np.copy(x1) @@ -1574,10 +1574,10 @@ def test_array_float_2d_F_mod(language): assert np.array_equal( x1, x2 ) -def test_array_float_2d_F_array_initialization(language): +def test_array_float_2d_F_array_initialization(language_with_cuda): f1 = arrays.array_float_2d_F_array_initialization - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x1 = np.zeros((2, 3), dtype=float, order='F') x2 = np.ones_like(x1) @@ -1587,10 +1587,10 @@ def test_array_float_2d_F_array_initialization(language): assert np.array_equal(x1, x2) -def test_array_float_3d_F_array_initialization_1(language): +def test_array_float_3d_F_array_initialization_1(language_with_cuda): f1 = arrays.array_float_3d_F_array_initialization_1 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x = np.random.random((3,2)).copy(order='F') y = np.random.random((3,2)).copy(order='F') @@ -1604,10 +1604,10 @@ def test_array_float_3d_F_array_initialization_1(language): assert np.array_equal(x1, x2) -def test_array_float_3d_F_array_initialization_2(language): +def test_array_float_3d_F_array_initialization_2(language_with_cuda): f1 = arrays.array_float_3d_F_array_initialization_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x1 = np.zeros((2,3,4), order='F') x2 = np.zeros((2,3,4), order='F') @@ -1617,10 +1617,10 @@ def test_array_float_3d_F_array_initialization_2(language): assert np.array_equal(x1, x2) -def test_array_float_4d_F_array_initialization(language): +def test_array_float_4d_F_array_initialization(language_with_cuda): f1 = arrays.array_float_4d_F_array_initialization - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) x = np.random.random((3,2,4)).copy(order='F') y = np.random.random((3,2,4)).copy(order='F') @@ -1794,64 +1794,64 @@ def test_array_float_2d_F_complex_3d_expr(language): # TEST: 1D Stack ARRAYS OF REAL #============================================================================== -def test_array_float_sum_stack_array(language): +def test_array_float_sum_stack_array(language_with_cuda): f1 = arrays.array_float_1d_sum_stack_array - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = f1() x2 = f2() assert np.equal( x1, x2 ) -def test_array_float_div_stack_array(language): +def test_array_float_div_stack_array(language_with_cuda): f1 = arrays.array_float_1d_div_stack_array - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = f1() x2 = f2() assert np.equal( x1, x2 ) -def test_multiple_stack_array_1(language): +def test_multiple_stack_array_1(language_with_cuda): f1 = arrays.multiple_stack_array_1 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert np.allclose(f1(), f2(), rtol=RTOL, atol=ATOL) -def test_multiple_stack_array_2(language): +def test_multiple_stack_array_2(language_with_cuda): f1 = arrays.multiple_stack_array_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert np.allclose(f1(), f2(), rtol=RTOL, atol=ATOL) #============================================================================== # TEST: 2D Stack ARRAYS OF REAL #============================================================================== -def test_array_float_sum_2d_stack_array(language): +def test_array_float_sum_2d_stack_array(language_with_cuda): f1 = arrays.array_float_2d_sum_stack_array - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = f1() x2 = f2() assert np.equal( x1, x2 ) -def test_array_float_div_2d_stack_array(language): +def test_array_float_div_2d_stack_array(language_with_cuda): f1 = arrays.array_float_2d_div_stack_array - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = f1() x2 = f2() assert np.equal( x1, x2 ) -def test_multiple_2d_stack_array_1(language): +def test_multiple_2d_stack_array_1(language_with_cuda): f1 = arrays.multiple_2d_stack_array_1 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert np.allclose(f1(), f2(), rtol=RTOL, atol=ATOL) -def test_multiple_2d_stack_array_2(language): +def test_multiple_2d_stack_array_2(language_with_cuda): f1 = arrays.multiple_2d_stack_array_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert np.allclose(f1(), f2(), rtol=RTOL, atol=ATOL) #============================================================================== @@ -2031,9 +2031,9 @@ def test_array_float_2d_2d_matmul_operator(language): f2(A2, B2, C2) assert np.array_equal(C1, C2) -def test_array_float_loopdiff(language): +def test_array_float_loopdiff(language_with_cuda): f1 = arrays.array_float_loopdiff - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) x1 = np.ones(5) y1 = np.zeros(5) x2 = np.copy(x1) From 2b303ee2f2c27c6968e8114e7c4a38ec61e707eb Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 18:14:56 +0100 Subject: [PATCH 45/53] EOF line --- pyccel/codegen/printing/cucode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py index b57f6e239c..087a08b71c 100644 --- a/pyccel/codegen/printing/cucode.py +++ b/pyccel/codegen/printing/cucode.py @@ -183,4 +183,4 @@ def _print_Allocate(self, expr): else: raise NotImplementedError(f"Allocate not implemented for {variable}") else: - raise NotImplementedError(f"Allocate not implemented for {variable}") \ No newline at end of file + raise NotImplementedError(f"Allocate not implemented for {variable}") From 67674ceef9e9c9afbe068494e1ff1056b1bda6e4 Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 18:42:11 +0100 Subject: [PATCH 46/53] fixed a bug in the array_fill --- pyccel/codegen/printing/ccode.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyccel/codegen/printing/ccode.py b/pyccel/codegen/printing/ccode.py index 1a89fed6b0..8bbc95fe7d 100644 --- a/pyccel/codegen/printing/ccode.py +++ b/pyccel/codegen/printing/ccode.py @@ -486,8 +486,8 @@ def arrayFill(self, expr): declare_dtype = self.find_in_dtype_registry(rhs.dtype, rhs.precision) dtype = self.find_in_ndarray_type_registry(rhs.dtype, rhs.precision) dtype = dtype[3:] - fill_value = self._print(rhs.fill_value) if rhs.fill_value is not None: + fill_value = self._print(rhs.fill_value) if isinstance(rhs.fill_value, Literal): code_init += f'_array_fill_{dtype}(({declare_dtype}){fill_value}, {self._print(lhs)});\n' else: From a9eb68b2f4dd7daaf975889a989074d26b560e4b Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 18:42:37 +0100 Subject: [PATCH 47/53] activated more arrays tests --- tests/epyccel/test_arrays.py | 84 ++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/tests/epyccel/test_arrays.py b/tests/epyccel/test_arrays.py index 6092938524..e22744c4bd 100644 --- a/tests/epyccel/test_arrays.py +++ b/tests/epyccel/test_arrays.py @@ -2061,81 +2061,81 @@ def test_array_kwargs_ones(language): # TEST: Negative indexes #============================================================================== -def test_constant_negative_index(language): +def test_constant_negative_index(language_with_cuda): from numpy.random import randint n = randint(2, 10) f1 = arrays.constant_negative_index - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) assert f1(n) == f2(n) -def test_almost_negative_index(language): +def test_almost_negative_index(language_with_cuda): from numpy.random import randint n = randint(2, 10) f1 = arrays.constant_negative_index - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) assert f1(n) == f2(n) -def test_var_negative_index(language): +def test_var_negative_index(language_with_cuda): from numpy.random import randint n = randint(2, 10) idx = randint(-n,0) f1 = arrays.var_negative_index - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) assert f1(n,idx) == f2(n,idx) -def test_expr_negative_index(language): +def test_expr_negative_index(language_with_cuda): from numpy.random import randint n = randint(2, 10) idx1 = randint(-n,2*n) idx2 = randint(idx1,idx1+n+1) f1 = arrays.expr_negative_index - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) assert f1(n,idx1,idx2) == f2(n,idx1,idx2) -def test_multiple_negative_index(language): +def test_multiple_negative_index(language_with_cuda): f1 = arrays.test_multiple_negative_index - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(-2, -1) == f2(-2, -1) -def test_multiple_negative_index_2(language): +def test_multiple_negative_index_2(language_with_cuda): f1 = arrays.test_multiple_negative_index_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(-4, -2) == f2(-4, -2) -def test_multiple_negative_index_3(language): +def test_multiple_negative_index_3(language_with_cuda): f1 = arrays.test_multiple_negative_index_3 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(-1, -1, -3) == f2(-1, -1, -3) -def test_argument_negative_index_1(language): +def test_argument_negative_index_1(language_with_cuda): a = arrays.a_1d f1 = arrays.test_argument_negative_index_1 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_argument_negative_index_2(language): +def test_argument_negative_index_2(language_with_cuda): a = arrays.a_1d f1 = arrays.test_argument_negative_index_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a, a) == f2(a, a) -def test_c_order_argument_negative_index(language): +def test_c_order_argument_negative_index(language_with_cuda): a = np.random.randint(20, size=(3,4)) f1 = arrays.test_c_order_argument_negative_index - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a, a) == f2(a, a) -def test_f_order_argument_negative_index(language): +def test_f_order_argument_negative_index(language_with_cuda): a = np.array(np.random.randint(20, size=(3,4)), order='F') f1 = arrays.test_f_order_argument_negative_index - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a, a) == f2(a, a) #============================================================================== @@ -2155,9 +2155,9 @@ def test_array_random_size(language): s1, s2 = f2() assert s1 == s2 -def test_array_variable_size(language): +def test_array_variable_size(language_with_cuda): f1 = arrays.array_variable_size - f2 = epyccel( f1 , language = language) + f2 = epyccel( f1 , language = language_with_cuda) from numpy.random import randint n = randint(1, 10) m = randint(11,20) @@ -2168,75 +2168,75 @@ def test_array_variable_size(language): # TEST : 1d array slices #============================================================================== -def test_array_1d_slice_1(language): +def test_array_1d_slice_1(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_1 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_2(language): +def test_array_1d_slice_2(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_2 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_3(language): +def test_array_1d_slice_3(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_3 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_4(language): +def test_array_1d_slice_4(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_4 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_5(language): +def test_array_1d_slice_5(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_5 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_6(language): +def test_array_1d_slice_6(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_6 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_7(language): +def test_array_1d_slice_7(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_7 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_8(language): +def test_array_1d_slice_8(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_8 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) -def test_array_1d_slice_9(language): +def test_array_1d_slice_9(language_with_cuda): a = arrays.a_1d f1 = arrays.array_1d_slice_9 - f2 = epyccel(f1, language = language) + f2 = epyccel(f1, language = language_with_cuda) assert f1(a) == f2(a) From aace39cf4132d7f87f1fae227e798e15e26c2940 Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 18:46:19 +0100 Subject: [PATCH 48/53] added missing import --- pyccel/codegen/printing/cucode.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py index 087a08b71c..7809939377 100644 --- a/pyccel/codegen/printing/cucode.py +++ b/pyccel/codegen/printing/cucode.py @@ -17,6 +17,7 @@ from pyccel.ast.core import Deallocate from pyccel.ast.variable import DottedVariable +from pyccel.ast.c_concepts import ObjectAddress from pyccel.ast.core import Import, Module, Declare from pyccel.ast.operators import PyccelMul From 7904dc8993a60268f3b9722ca1ec51cee3df334e Mon Sep 17 00:00:00 2001 From: El alj Mouad Date: Mon, 4 Mar 2024 19:23:32 +0100 Subject: [PATCH 49/53] using f_string instead of format --- pyccel/codegen/printing/cucode.py | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py index 7809939377..90a77f73ad 100644 --- a/pyccel/codegen/printing/cucode.py +++ b/pyccel/codegen/printing/cucode.py @@ -74,14 +74,12 @@ def _print_Program(self, expr): # imports = ''.join(self._print(i) for i in imports) self.exit_scope() - return ('{imports}' - 'int main()\n{{\n' - '{decs}' - '{body}' - 'return 0;\n' - '}}').format(imports=imports, - decs=decs, - body=body) + return f'{imports}\n\ + int main()\n{{\n\ + {decs}\n\ + {body}\n\ + return 0;\n\ + }}\n' def _print_Module(self, expr): self.set_scope(expr.scope) @@ -140,17 +138,14 @@ def _init_stack_array(self, expr): declare_dtype = self.find_in_dtype_registry(NativeInteger(), 8) dummy_array_name = self.scope.get_new_name('array_dummy') - buffer_array = "{dtype} {name}[{size}];\n".format( - dtype = dtype, - name = dummy_array_name, - size = tot_shape) + buffer_array = f'{dtype} {dummy_array_name}[{tot_shape}];\n' tmp_shape = self.scope.get_new_name(f'tmp_shape_{var.name}') shape_init = f'{declare_dtype} {tmp_shape}[] = {{{shape}}};\n' tmp_strides = self.scope.get_new_name(f'tmp_strides_{var.name}') strides_init = f'{declare_dtype} {tmp_strides}[{var.rank}] = {{0}};\n' array_init = f' = (t_ndarray){{\n.{np_dtype}={dummy_array_name},\n .nd={var.rank},\n ' array_init += f'.shape={tmp_shape},\n .strides={tmp_strides},\n .type={np_dtype},\n .is_view=false\n}};\n' - array_init += 'stack_array_init(&{})'.format(self._print(var)) + array_init += f'stack_array_init(&{self._print(var)})' preface = buffer_array + shape_init + strides_init self.add_import(c_imports['ndarrays']) return preface, array_init @@ -163,7 +158,7 @@ def _print_Allocate(self, expr): if (expr.status == 'unknown'): shape_var = DottedVariable(NativeVoid(), 'shape', lhs = variable) free_code = f'if ({self._print(shape_var)} != NULL)\n' - free_code += "{{\n{}}}\n".format(self._print(Deallocate(variable))) + free_code += f'{{\n{self._print(Deallocate(variable))}}}\n' elif (expr.status == 'allocated'): free_code += self._print(Deallocate(variable)) self.add_import(c_imports['ndarrays']) @@ -175,7 +170,7 @@ def _print_Allocate(self, expr): is_view = 'false' if variable.on_heap else 'true' order = "order_f" if expr.order == "F" else "order_c" alloc_code = f"{self._print(variable)} = array_create({variable.rank}, {tmp_shape}, {dtype}, {is_view}, {order});\n" - return '{}{}{}'.format(free_code, shape_Assign,alloc_code) + return f'{free_code}{shape_Assign}{alloc_code}' elif variable.is_alias: var_code = self._print(ObjectAddress(variable)) if expr.like: From 051710d0a4272ee2281d5db776dbaa72dc142c8d Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Mon, 11 Mar 2024 11:41:27 +0100 Subject: [PATCH 50/53] Trigger tests on push to devel or main branch --- .github/workflows/anaconda_linux.yml | 4 ++-- .github/workflows/anaconda_windows.yml | 4 ++-- .github/workflows/intel.yml | 4 ++-- .github/workflows/linux.yml | 4 ++-- .github/workflows/macosx.yml | 4 ++-- .github/workflows/pickle.yml | 4 ++-- .github/workflows/pickle_wheel.yml | 4 ++-- .github/workflows/windows.yml | 4 ++-- 8 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/anaconda_linux.yml b/.github/workflows/anaconda_linux.yml index c7e04a37da..8173b4f7a4 100644 --- a/.github/workflows/anaconda_linux.yml +++ b/.github/workflows/anaconda_linux.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/anaconda_windows.yml b/.github/workflows/anaconda_windows.yml index 70bf150b1a..5cd222511d 100644 --- a/.github/workflows/anaconda_windows.yml +++ b/.github/workflows/anaconda_windows.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: windows-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index d45beb3d8b..96b614f0fb 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -29,7 +29,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 29239db9b6..3fbf76d70a 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -28,7 +28,7 @@ env: jobs: matrix_prep: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: matrix: ${{ steps.set-matrix.outputs.matrix }} steps: diff --git a/.github/workflows/macosx.yml b/.github/workflows/macosx.yml index 1d4d4fd562..c5396fbe7c 100644 --- a/.github/workflows/macosx.yml +++ b/.github/workflows/macosx.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: macos-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/pickle.yml b/.github/workflows/pickle.yml index b20fe1a93f..47663b6b2f 100644 --- a/.github/workflows/pickle.yml +++ b/.github/workflows/pickle.yml @@ -19,7 +19,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -31,7 +31,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-matrix.outputs.python_version }} matrix: ${{ steps.set-matrix.outputs.matrix }} diff --git a/.github/workflows/pickle_wheel.yml b/.github/workflows/pickle_wheel.yml index ce530a238f..450cb250be 100644 --- a/.github/workflows/pickle_wheel.yml +++ b/.github/workflows/pickle_wheel.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: ubuntu-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 03cecaab64..14ba9ddf31 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -16,7 +16,7 @@ on: required: false type: string push: - branches: devel + branches: [devel, main] env: COMMIT: ${{ inputs.ref || github.event.ref }} @@ -28,7 +28,7 @@ env: jobs: Python_version_picker: runs-on: windows-latest - if: github.event_name != 'push' || github.repository == 'pyccel/pyccel' + if: github.event_name != 'push' || github.repository == 'pyccel/pyccel-cuda' outputs: python_version: ${{ steps.set-python_version.outputs.python_version }} steps: From 7be2a712320142badc042c8d723b2edc2b13d06b Mon Sep 17 00:00:00 2001 From: EmilyBourne Date: Mon, 11 Mar 2024 11:46:33 +0100 Subject: [PATCH 51/53] Add cuda workflow to test cuda developments on CI --- .github/actions/coverage_install/action.yml | 2 +- .github/actions/linux_install/action.yml | 10 +-- .github/actions/pytest_run/action.yml | 4 +- .github/actions/pytest_run_cuda/action.yml | 17 +++++ .github/actions/python_install/action.yml | 17 +++++ .github/workflows/cuda.yml | 83 +++++++++++++++++++++ ci_tools/bot_messages/show_tests.txt | 1 + ci_tools/bot_tools/bot_funcs.py | 12 +-- ci_tools/devel_branch_tests.py | 1 + ci_tools/json_pytest_output.py | 2 +- 10 files changed, 135 insertions(+), 14 deletions(-) create mode 100644 .github/actions/pytest_run_cuda/action.yml create mode 100644 .github/actions/python_install/action.yml create mode 100644 .github/workflows/cuda.yml diff --git a/.github/actions/coverage_install/action.yml b/.github/actions/coverage_install/action.yml index ac5294e542..5732baee34 100644 --- a/.github/actions/coverage_install/action.yml +++ b/.github/actions/coverage_install/action.yml @@ -15,7 +15,7 @@ runs: - name: Directory Creation run: | INSTALL_DIR=$(cd tests; python -c "import pyccel; print(pyccel.__path__[0])") - SITE_DIR=$(python -c 'import sysconfig; print(sysconfig.get_paths()["purelib"])') + SITE_DIR=$(dirname ${INSTALL_DIR}) echo -e "import coverage; coverage.process_startup()" > ${SITE_DIR}/pyccel_cov.pth echo -e "[run]\nparallel = True\nsource = ${INSTALL_DIR}\ndata_file = $(pwd)/.coverage\n[report]\ninclude = ${INSTALL_DIR}/*\n[xml]\noutput = cobertura.xml" > .coveragerc echo "SITE_DIR=${SITE_DIR}" >> $GITHUB_ENV diff --git a/.github/actions/linux_install/action.yml b/.github/actions/linux_install/action.yml index 8fb5cd8505..0ef9a69b8e 100644 --- a/.github/actions/linux_install/action.yml +++ b/.github/actions/linux_install/action.yml @@ -9,22 +9,22 @@ runs: shell: bash - name: Install fortran run: - sudo apt-get install gfortran + sudo apt-get install -y gfortran shell: bash - name: Install LaPack run: - sudo apt-get install libblas-dev liblapack-dev + sudo apt-get install -y libblas-dev liblapack-dev shell: bash - name: Install MPI run: | - sudo apt-get install libopenmpi-dev openmpi-bin + sudo apt-get install -y libopenmpi-dev openmpi-bin echo "MPI_OPTS=--oversubscribe" >> $GITHUB_ENV shell: bash - name: Install OpenMP run: - sudo apt-get install libomp-dev libomp5 + sudo apt-get install -y libomp-dev libomp5 shell: bash - name: Install Valgrind run: - sudo apt-get install valgrind + sudo apt-get install -y valgrind shell: bash diff --git a/.github/actions/pytest_run/action.yml b/.github/actions/pytest_run/action.yml index 8d300bd87c..12f75166cf 100644 --- a/.github/actions/pytest_run/action.yml +++ b/.github/actions/pytest_run/action.yml @@ -51,13 +51,13 @@ runs: working-directory: ./tests id: pytest_3 - name: Test Fortran translations - run: python -m pytest -n auto -rX ${FLAGS} -m "not (parallel or xdist_incompatible) and not (c or python) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s4_outfile.out + run: python -m pytest -n auto -rX ${FLAGS} -m "not (parallel or xdist_incompatible) and not (c or python or ccuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s4_outfile.out shell: ${{ inputs.shell_cmd }} working-directory: ./tests id: pytest_4 - name: Test multi-file Fortran translations run: | - python -m pytest -rX ${FLAGS} -m "xdist_incompatible and not parallel and not (c or python) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s5_outfile.out + python -m pytest -rX ${FLAGS} -m "xdist_incompatible and not parallel and not (c or python or ccuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s5_outfile.out pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run_cuda/action.yml b/.github/actions/pytest_run_cuda/action.yml new file mode 100644 index 0000000000..52092a6e02 --- /dev/null +++ b/.github/actions/pytest_run_cuda/action.yml @@ -0,0 +1,17 @@ +name: 'Pyccel pytest commands generating Ccuda' +inputs: + shell_cmd: + description: 'Specifies the shell command (different for anaconda)' + required: false + default: "bash" + +runs: + using: "composite" + steps: + - name: Ccuda tests with pytest + run: | + # Catch exit 5 (no tests found) + sh -c 'python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and ccuda" --ignore=symbolic --ignore=ndarrays; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' + pyccel-clean + shell: ${{ inputs.shell_cmd }} + working-directory: ./tests diff --git a/.github/actions/python_install/action.yml b/.github/actions/python_install/action.yml new file mode 100644 index 0000000000..f9b720e3e1 --- /dev/null +++ b/.github/actions/python_install/action.yml @@ -0,0 +1,17 @@ +name: 'Python installation commands' + +runs: + using: "composite" + steps: + - name: Install python + run: + sudo apt-get -y install python3-dev + shell: bash + - name: python as python3 + run: + sudo apt-get -y install python-is-python3 + shell: bash + - name: Install Pip + run: + sudo apt-get -y install python3-pip + shell: bash diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml new file mode 100644 index 0000000000..833ebf5d85 --- /dev/null +++ b/.github/workflows/cuda.yml @@ -0,0 +1,83 @@ +name: Cuda unit tests + +on: + workflow_dispatch: + inputs: + python_version: + required: false + type: string + ref: + required: false + type: string + check_run_id: + required: false + type: string + pr_repo: + required: false + type: string + push: + branches: [devel, main] + +env: + COMMIT: ${{ inputs.ref || github.event.ref }} + PEM: ${{ secrets.BOT_PEM }} + GITHUB_RUN_ID: ${{ github.run_id }} + GITHUB_CHECK_RUN_ID: ${{ inputs.check_run_id }} + PR_REPO: ${{ inputs.pr_repo || github.repository }} + +jobs: + Cuda: + + runs-on: ubuntu-20.04 + name: Unit tests + + container: nvidia/cuda:11.7.1-devel-ubuntu20.04 + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ env.COMMIT }} + repository: ${{ env.PR_REPO }} + - name: Prepare docker + run: | + apt update && apt install sudo + TZ=Europe/France + ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends tzdata + shell: bash + - name: Install python (setup-python action doesn't work with containers) + uses: ./.github/actions/python_install + - name: "Setup" + id: token + run: | + pip install jwt requests + python ci_tools/setup_check_run.py cuda + - name: CUDA Version + run: nvcc --version # cuda install check + - name: Install dependencies + uses: ./.github/actions/linux_install + - name: Install Pyccel with tests + run: | + PATH=${PATH}:$HOME/.local/bin + echo "PATH=${PATH}" >> $GITHUB_ENV + python -m pip install --upgrade pip + python -m pip install --user .[test] + shell: bash + - name: Coverage install + uses: ./.github/actions/coverage_install + - name: Ccuda tests with pytest + id: cuda_pytest + uses: ./.github/actions/pytest_run_cuda + - name: Collect coverage information + continue-on-error: True + uses: ./.github/actions/coverage_collection + - name: Save code coverage report + uses: actions/upload-artifact@v3 + with: + name: coverage-artifact + path: .coverage + retention-days: 1 + - name: "Post completed" + if: always() + run: + python ci_tools/complete_check_run.py ${{ steps.cuda_pytest.outcome }} + diff --git a/ci_tools/bot_messages/show_tests.txt b/ci_tools/bot_messages/show_tests.txt index adc07e8431..eb15492d2e 100644 --- a/ci_tools/bot_messages/show_tests.txt +++ b/ci_tools/bot_messages/show_tests.txt @@ -2,6 +2,7 @@ The following is a list of keywords which can be used to run tests. Tests in bol - **linux** : Runs the unit tests on a Linux system. - **windows** : Runs the unit tests on a Windows system. - **macosx** : Runs the unit tests on a MacOS X system. +- **cuda** : Runs the cuda unit tests on a Linux system. - **coverage** : Runs the unit tests on a Linux system and checks the coverage of the tests. - **docs** : Checks if the documentation follows the numpydoc format. - **pylint** : Runs pylint on files which are too big to be handled by codacy. diff --git a/ci_tools/bot_tools/bot_funcs.py b/ci_tools/bot_tools/bot_funcs.py index 7084a01bb9..1621d1d089 100644 --- a/ci_tools/bot_tools/bot_funcs.py +++ b/ci_tools/bot_tools/bot_funcs.py @@ -23,7 +23,8 @@ 'pyccel_lint': '3.8', 'pylint': '3.8', 'spelling': '3.8', - 'windows': '3.8' + 'windows': '3.8', + 'cuda': '-' } test_names = { @@ -40,15 +41,16 @@ 'pyccel_lint': "Pyccel best practices", 'pylint': "Python linting", 'spelling': "Spelling verification", - 'windows': "Unit tests on Windows" + 'windows': "Unit tests on Windows", + 'cuda': "Unit tests on Linux with cuda" } -test_dependencies = {'coverage':['linux']} +test_dependencies = {'coverage':['linux', 'cuda']} tests_with_base = ('coverage', 'docs', 'pyccel_lint', 'pylint') pr_test_keys = ('linux', 'windows', 'macosx', 'coverage', 'docs', 'pylint', - 'pyccel_lint', 'spelling') + 'pyccel_lint', 'spelling', 'cuda') review_stage_labels = ["needs_initial_review", "Ready_for_review", "Ready_to_merge"] @@ -420,7 +422,7 @@ def is_test_required(self, commit_log, name, key, state): True if the test should be run, False otherwise. """ print("Checking : ", name, key) - if key in ('linux', 'windows', 'macosx', 'anaconda_linux', 'anaconda_windows', 'intel'): + if key in ('linux', 'windows', 'macosx', 'anaconda_linux', 'anaconda_windows', 'intel', 'cuda'): has_relevant_change = lambda diff: any((f.startswith('pyccel/') or f.startswith('tests/')) #pylint: disable=unnecessary-lambda-assignment and f.endswith('.py') and f != 'pyccel/version.py' for f in diff) diff --git a/ci_tools/devel_branch_tests.py b/ci_tools/devel_branch_tests.py index 1102ef9e92..ec67b6c49a 100644 --- a/ci_tools/devel_branch_tests.py +++ b/ci_tools/devel_branch_tests.py @@ -15,3 +15,4 @@ bot.run_tests(['anaconda_linux'], '3.10', force_run = True) bot.run_tests(['anaconda_windows'], '3.10', force_run = True) bot.run_tests(['intel'], '3.9', force_run = True) + bot.run_tests(['cuda'], '-', force_run = True) diff --git a/ci_tools/json_pytest_output.py b/ci_tools/json_pytest_output.py index 409ae76d72..b84f4a4c09 100644 --- a/ci_tools/json_pytest_output.py +++ b/ci_tools/json_pytest_output.py @@ -61,7 +61,7 @@ def mini_md_summary(title, outcome, failed_tests): summary = "" failed_pattern = re.compile(r".*FAILED.*") - languages = ('c', 'fortran', 'python') + languages = ('c', 'fortran', 'python', 'cuda') pattern = {lang: re.compile(r".*\["+lang+r"\]\ \_.*") for lang in languages} for i in p_args.tests: From 4db5163d8ca0fc3e52c59939b54343b668da3a13 Mon Sep 17 00:00:00 2001 From: bauom <40796259+bauom@users.noreply.github.com> Date: Wed, 28 Feb 2024 18:11:50 +0100 Subject: [PATCH 52/53] [init] Adding CUDA language/compiler and CodePrinter (#32) This PR aims to make the C code compilable using nvcc. The cuda language was added as well as a CudaCodePrinter. Changes to stdlib: Wrapped expressions using complex types in an `ifndef __NVCC__` to avoid processing them with the nvcc compiler --------- Co-authored-by: Mouad Elalj, EmilyBourne --- .dict_custom.txt | 1 + .github/actions/pytest_parallel/action.yml | 4 +- .github/actions/pytest_run/action.yml | 4 +- .github/actions/pytest_run_cuda/action.yml | 11 +- CHANGELOG.md | 6 + pyccel/codegen/codegen.py | 43 +++++-- pyccel/codegen/compiling/compilers.py | 5 +- pyccel/codegen/pipeline.py | 5 +- pyccel/codegen/printing/cucode.py | 74 +++++++++++ pyccel/commands/console.py | 2 +- pyccel/compilers/default_compilers.py | 13 +- pyccel/naming/__init__.py | 4 +- pyccel/naming/cudanameclashchecker.py | 92 ++++++++++++++ pyccel/stdlib/numpy/numpy_c.c | 2 + pyccel/stdlib/numpy/numpy_c.h | 2 + pytest.ini | 1 + tests/conftest.py | 11 ++ tests/epyccel/test_base.py | 136 ++++++++++----------- 18 files changed, 323 insertions(+), 93 deletions(-) create mode 100644 pyccel/codegen/printing/cucode.py create mode 100644 pyccel/naming/cudanameclashchecker.py diff --git a/.dict_custom.txt b/.dict_custom.txt index b25b47f277..1ad66b6914 100644 --- a/.dict_custom.txt +++ b/.dict_custom.txt @@ -106,5 +106,6 @@ Valgrind variadic subclasses oneAPI +Cuda getter setter diff --git a/.github/actions/pytest_parallel/action.yml b/.github/actions/pytest_parallel/action.yml index c7c77d99c7..f91d84915b 100644 --- a/.github/actions/pytest_parallel/action.yml +++ b/.github/actions/pytest_parallel/action.yml @@ -10,8 +10,8 @@ runs: steps: - name: Test with pytest run: | - mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel/test_parallel_epyccel.py -v -m parallel -rXx - #mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel -v -m parallel -rXx + mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel/test_parallel_epyccel.py -v -m "parallel and not cuda" -rXx + #mpiexec -n 4 ${MPI_OPTS} python -m pytest epyccel -v -m "parallel and not cuda" -rXx shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run/action.yml b/.github/actions/pytest_run/action.yml index 12f75166cf..c2849e011c 100644 --- a/.github/actions/pytest_run/action.yml +++ b/.github/actions/pytest_run/action.yml @@ -51,13 +51,13 @@ runs: working-directory: ./tests id: pytest_3 - name: Test Fortran translations - run: python -m pytest -n auto -rX ${FLAGS} -m "not (parallel or xdist_incompatible) and not (c or python or ccuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s4_outfile.out + run: python -m pytest -n auto -rX ${FLAGS} -m "not (parallel or xdist_incompatible) and not (c or python or cuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s4_outfile.out shell: ${{ inputs.shell_cmd }} working-directory: ./tests id: pytest_4 - name: Test multi-file Fortran translations run: | - python -m pytest -rX ${FLAGS} -m "xdist_incompatible and not parallel and not (c or python or ccuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s5_outfile.out + python -m pytest -rX ${FLAGS} -m "xdist_incompatible and not parallel and not (c or python or cuda) ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s5_outfile.out pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests diff --git a/.github/actions/pytest_run_cuda/action.yml b/.github/actions/pytest_run_cuda/action.yml index 52092a6e02..46f90552ed 100644 --- a/.github/actions/pytest_run_cuda/action.yml +++ b/.github/actions/pytest_run_cuda/action.yml @@ -1,4 +1,4 @@ -name: 'Pyccel pytest commands generating Ccuda' +name: 'Pyccel pytest commands generating Cuda' inputs: shell_cmd: description: 'Specifies the shell command (different for anaconda)' @@ -11,7 +11,14 @@ runs: - name: Ccuda tests with pytest run: | # Catch exit 5 (no tests found) - sh -c 'python -m pytest -n auto -rx -m "not (parallel or xdist_incompatible) and ccuda" --ignore=symbolic --ignore=ndarrays; ret=$?; [ $ret = 5 ] && exit 0 || exit $ret' + python -m pytest -rX ${FLAGS} -m "not (xdist_incompatible or parallel) and cuda ${{ inputs.pytest_mark }}" --ignore=symbolic --ignore=ndarrays 2>&1 | tee s1_outfile.out pyccel-clean shell: ${{ inputs.shell_cmd }} working-directory: ./tests + - name: Final step + if: always() + id: status + run: + python ci_tools/json_pytest_output.py -t "Cuda Test Summary" --tests "Cuda tests:${{ steps.pytest_1.outcome }}:tests/s1_outfile.out" + + shell: ${{ inputs.shell_cmd }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 8ceaee09f9..66fcfe8f5e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,12 @@ # Change Log All notable changes to this project will be documented in this file. +## \[Cuda - UNRELEASED\] + +### Added + +- #32 : add support for `nvcc` Compiler and `cuda` language as a possible option. + ## \[UNRELEASED\] ### Added diff --git a/pyccel/codegen/codegen.py b/pyccel/codegen/codegen.py index 01303cc393..7c8b210136 100644 --- a/pyccel/codegen/codegen.py +++ b/pyccel/codegen/codegen.py @@ -9,34 +9,41 @@ from pyccel.codegen.printing.fcode import FCodePrinter from pyccel.codegen.printing.ccode import CCodePrinter from pyccel.codegen.printing.pycode import PythonCodePrinter +from pyccel.codegen.printing.cucode import CudaCodePrinter from pyccel.ast.core import FunctionDef, Interface, ModuleHeader from pyccel.errors.errors import Errors from pyccel.utilities.stage import PyccelStage -_extension_registry = {'fortran': 'f90', 'c':'c', 'python':'py'} -_header_extension_registry = {'fortran': None, 'c':'h', 'python':None} +_extension_registry = {'fortran': 'f90', 'c':'c', 'python':'py', 'cuda':'cu'} +_header_extension_registry = {'fortran': None, 'c':'h', 'python':None, 'cuda':'h'} printer_registry = { 'fortran':FCodePrinter, 'c':CCodePrinter, - 'python':PythonCodePrinter + 'python':PythonCodePrinter, + 'cuda':CudaCodePrinter } pyccel_stage = PyccelStage() class Codegen(object): - """Abstract class for code generator.""" + """ + Class which handles the generation of code. - def __init__(self, parser, name): - """Constructor for Codegen. - - parser: pyccel parser + The class which handles the generation of code. This is done by creating an appropriate class + inheriting from `CodePrinter` and using it to create strings describing the code that should + be printed. This class then takes care of creating the necessary files. + Parameters + ---------- + parser : SemanticParser + The Pyccel Semantic parser node. + name : str + Name of the generated module or program. + """ - name: str - name of the generated module or program. - """ + def __init__(self, parser, name): pyccel_stage.set_stage('codegen') self._parser = parser self._ast = parser.ast @@ -135,12 +142,22 @@ def language(self): return self._language def set_printer(self, **settings): - """ Set the current codeprinter instance""" + """ + Set the current codeprinter instance. + + Getting the language that will be used (default language used is fortran), + Then instantiating the codePrinter with the corresponding language. + + Parameters + ---------- + **settings : dict + Any additional arguments which are necessary for CCodePrinter. + """ # Get language used (default language used is fortran) language = settings.pop('language', 'fortran') # Set language - if not language in ['fortran', 'c', 'python']: + if not language in ['fortran', 'c', 'python', 'cuda']: raise ValueError('{} language is not available'.format(language)) self._language = language diff --git a/pyccel/codegen/compiling/compilers.py b/pyccel/codegen/compiling/compilers.py index feafa4bea8..48e225cb83 100644 --- a/pyccel/codegen/compiling/compilers.py +++ b/pyccel/codegen/compiling/compilers.py @@ -441,7 +441,10 @@ def compile_shared_library(self, compile_obj, output_folder, verbose = False, sh # Collect compile information exec_cmd, includes, libs_flags, libdirs_flags, m_code = \ self._get_compile_components(compile_obj, accelerators) - linker_libdirs_flags = ['-Wl,-rpath' if l == '-L' else l for l in libdirs_flags] + if self._info['exec'] == 'nvcc': + linker_libdirs_flags = ['-Xcompiler' if l == '-L' else f'"-Wl,-rpath,{l}"' for l in libdirs_flags] + else: + linker_libdirs_flags = ['-Wl,-rpath' if l == '-L' else l for l in libdirs_flags] flags.insert(0,"-shared") diff --git a/pyccel/codegen/pipeline.py b/pyccel/codegen/pipeline.py index 4d4770a491..6730b9bb21 100644 --- a/pyccel/codegen/pipeline.py +++ b/pyccel/codegen/pipeline.py @@ -179,9 +179,10 @@ def handle_error(stage): if language is None: language = 'fortran' - # Choose Fortran compiler + # Choose Default compiler if compiler is None: - compiler = os.environ.get('PYCCEL_DEFAULT_COMPILER', 'GNU') + default_compiler_family = 'nvidia' if language == 'cuda' else 'GNU' + compiler = os.environ.get('PYCCEL_DEFAULT_COMPILER', default_compiler_family) fflags = [] if fflags is None else fflags.split() wrapper_flags = [] if wrapper_flags is None else wrapper_flags.split() diff --git a/pyccel/codegen/printing/cucode.py b/pyccel/codegen/printing/cucode.py new file mode 100644 index 0000000000..e70b57bc20 --- /dev/null +++ b/pyccel/codegen/printing/cucode.py @@ -0,0 +1,74 @@ +# coding: utf-8 +#------------------------------------------------------------------------------------------# +# This file is part of Pyccel which is released under MIT License. See the LICENSE file or # +# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. # +#------------------------------------------------------------------------------------------# +""" +Provide tools for generating and handling CUDA code. +This module is designed to interface Pyccel's Abstract Syntax Tree (AST) with CUDA, +enabling the direct translation of high-level Pyccel expressions into CUDA code. +""" + +from pyccel.codegen.printing.ccode import CCodePrinter, c_library_headers + +from pyccel.ast.core import Import, Module + +from pyccel.errors.errors import Errors + + +errors = Errors() + +__all__ = ["CudaCodePrinter"] + +class CudaCodePrinter(CCodePrinter): + """ + Print code in CUDA format. + + This printer converts Pyccel's Abstract Syntax Tree (AST) into strings of CUDA code. + Navigation through this file utilizes _print_X functions, + as is common with all printers. + + Parameters + ---------- + filename : str + The name of the file being pyccelised. + prefix_module : str + A prefix to be added to the name of the module. + """ + language = "cuda" + + def __init__(self, filename, prefix_module = None): + + errors.set_target(filename, 'file') + + super().__init__(filename) + + def _print_Module(self, expr): + self.set_scope(expr.scope) + self._current_module = expr.name + body = ''.join(self._print(i) for i in expr.body) + + global_variables = ''.join(self._print(d) for d in expr.declarations) + + # Print imports last to be sure that all additional_imports have been collected + imports = [Import(expr.name, Module(expr.name,(),())), *self._additional_imports.values()] + c_headers_imports = '' + local_imports = '' + + for imp in imports: + if imp.source in c_library_headers: + c_headers_imports += self._print(imp) + else: + local_imports += self._print(imp) + + imports = f'{c_headers_imports}\ + extern "C"{{\n\ + {local_imports}\ + }}' + + code = f'{imports}\n\ + {global_variables}\n\ + {body}\n' + + self.exit_scope() + return code diff --git a/pyccel/commands/console.py b/pyccel/commands/console.py index aa9c1aadc5..ea23dd6f8b 100644 --- a/pyccel/commands/console.py +++ b/pyccel/commands/console.py @@ -80,7 +80,7 @@ def pyccel(files=None, mpi=None, openmp=None, openacc=None, output_dir=None, com # ... backend compiler options group = parser.add_argument_group('Backend compiler options') - group.add_argument('--language', choices=('fortran', 'c', 'python'), help='Generated language') + group.add_argument('--language', choices=('fortran', 'c', 'python', 'cuda'), help='Generated language') group.add_argument('--compiler', help='Compiler family or json file containing a compiler description {GNU,intel,PGI}') diff --git a/pyccel/compilers/default_compilers.py b/pyccel/compilers/default_compilers.py index 166085d22e..d47856773c 100644 --- a/pyccel/compilers/default_compilers.py +++ b/pyccel/compilers/default_compilers.py @@ -185,6 +185,15 @@ }, 'family': 'nvidia', } +#------------------------------------------------------------ +nvcc_info = {'exec' : 'nvcc', + 'language' : 'cuda', + 'debug_flags' : ("-g",), + 'release_flags': ("-O3",), + 'general_flags': ('--compiler-options', '-fPIC',), + 'family' : 'nvidia' + } + #------------------------------------------------------------ def change_to_lib_flag(lib): @@ -288,6 +297,7 @@ def change_to_lib_flag(lib): pgfortran_info.update(python_info) nvc_info.update(python_info) nvfort_info.update(python_info) +nvcc_info.update(python_info) available_compilers = {('GNU', 'c') : gcc_info, ('GNU', 'fortran') : gfort_info, @@ -296,6 +306,7 @@ def change_to_lib_flag(lib): ('PGI', 'c') : pgcc_info, ('PGI', 'fortran') : pgfortran_info, ('nvidia', 'c') : nvc_info, - ('nvidia', 'fortran') : nvfort_info} + ('nvidia', 'fortran') : nvfort_info, + ('nvidia', 'cuda'): nvcc_info} vendors = ('GNU','intel','PGI','nvidia') diff --git a/pyccel/naming/__init__.py b/pyccel/naming/__init__.py index a71d841c8e..1b8514703b 100644 --- a/pyccel/naming/__init__.py +++ b/pyccel/naming/__init__.py @@ -10,7 +10,9 @@ from .fortrannameclashchecker import FortranNameClashChecker from .cnameclashchecker import CNameClashChecker from .pythonnameclashchecker import PythonNameClashChecker +from .cudanameclashchecker import CudaNameClashChecker name_clash_checkers = {'fortran':FortranNameClashChecker(), 'c':CNameClashChecker(), - 'python':PythonNameClashChecker()} + 'python':PythonNameClashChecker(), + 'cuda':CudaNameClashChecker()} diff --git a/pyccel/naming/cudanameclashchecker.py b/pyccel/naming/cudanameclashchecker.py new file mode 100644 index 0000000000..971204e912 --- /dev/null +++ b/pyccel/naming/cudanameclashchecker.py @@ -0,0 +1,92 @@ +# coding: utf-8 +#------------------------------------------------------------------------------------------# +# This file is part of Pyccel which is released under MIT License. See the LICENSE file or # +# go to https://github.com/pyccel/pyccel/blob/master/LICENSE for full license details. # +#------------------------------------------------------------------------------------------# +""" +Handles name clash problems in Cuda +""" +from .languagenameclashchecker import LanguageNameClashChecker + +class CudaNameClashChecker(LanguageNameClashChecker): + """ + Class containing functions to help avoid problematic names in Cuda. + + A class which provides functionalities to check or propose variable names and + verify that they do not cause name clashes. Name clashes may be due to + new variables, or due to the use of reserved keywords. + """ + # Keywords as mentioned on https://en.cppreference.com/w/c/keyword + keywords = set(['isign', 'fsign', 'csign', 'auto', 'break', 'case', 'char', 'const', + 'continue', 'default', 'do', 'double', 'else', 'enum', + 'extern', 'float', 'for', 'goto', 'if', 'inline', 'int', + 'long', 'register', 'restrict', 'return', 'short', 'signed', + 'sizeof', 'static', 'struct', 'switch', 'typedef', 'union', + 'unsigned', 'void', 'volatile', 'whie', '_Alignas', + '_Alignof', '_Atomic', '_Bool', '_Complex', 'Decimal128', + '_Decimal32', '_Decimal64', '_Generic', '_Imaginary', + '_Noreturn', '_Static_assert', '_Thread_local', 't_ndarray', + 'array_create', 'new_slice', 'array_slicing', 'alias_assign', + 'transpose_alias_assign', 'array_fill', 't_slice', + 'GET_INDEX_EXP1', 'GET_INDEX_EXP2', 'GET_INDEX_EXP2', + 'GET_INDEX_EXP3', 'GET_INDEX_EXP4', 'GET_INDEX_EXP5', + 'GET_INDEX_EXP6', 'GET_INDEX_EXP7', 'GET_INDEX_EXP8', + 'GET_INDEX_EXP9', 'GET_INDEX_EXP10', 'GET_INDEX_EXP11', + 'GET_INDEX_EXP12', 'GET_INDEX_EXP13', 'GET_INDEX_EXP14', + 'GET_INDEX_EXP15', 'NUM_ARGS_H1', 'NUM_ARGS', + 'GET_INDEX_FUNC_H2', 'GET_INDEX_FUNC', 'GET_INDEX', + 'INDEX', 'GET_ELEMENT', 'free_array', 'free_pointer', + 'get_index', 'numpy_to_ndarray_strides', + 'numpy_to_ndarray_shape', 'get_size', 'order_f', 'order_c', 'array_copy_data']) + + def has_clash(self, name, symbols): + """ + Indicate whether the proposed name causes any clashes. + + Checks if a suggested name conflicts with predefined + keywords or specified symbols,returning true for a clash. + This method is crucial for maintaining namespace integrity and + preventing naming conflicts in code generation processes. + + Parameters + ---------- + name : str + The suggested name. + symbols : set + Symbols which should be considered as collisions. + + Returns + ------- + bool + True if the name is a collision. + False if the name is collision free. + """ + return any(name == k for k in self.keywords) or \ + any(name == s for s in symbols) + + def get_collisionless_name(self, name, symbols): + """ + Get a valid name which doesn't collision with symbols or Cuda keywords. + + Find a new name based on the suggested name which will not cause + conflicts with Cuda keywords, does not appear in the provided symbols, + and is a valid name in Cuda code. + + Parameters + ---------- + name : str + The suggested name. + symbols : set + Symbols which should be considered as collisions. + + Returns + ------- + str + A new name which is collision free. + """ + if len(name)>4 and all(name[i] == '_' for i in (0,1,-1,-2)): + # Ignore magic methods + return name + if name[0] == '_': + name = 'private'+name + return self._get_collisionless_name(name, symbols) diff --git a/pyccel/stdlib/numpy/numpy_c.c b/pyccel/stdlib/numpy/numpy_c.c index 36e4a205ec..1b5a1bf017 100644 --- a/pyccel/stdlib/numpy/numpy_c.c +++ b/pyccel/stdlib/numpy/numpy_c.c @@ -17,8 +17,10 @@ double fsign(double x) return SIGN(x); } +#ifndef __NVCC__ /* numpy.sign for complex */ double complex csign(double complex x) { return x ? ((!creal(x) && cimag(x) < 0) || (creal(x) < 0) ? -1 : 1) : 0; } +#endif diff --git a/pyccel/stdlib/numpy/numpy_c.h b/pyccel/stdlib/numpy/numpy_c.h index 4133e9dbe9..326ec3a549 100644 --- a/pyccel/stdlib/numpy/numpy_c.h +++ b/pyccel/stdlib/numpy/numpy_c.h @@ -15,6 +15,8 @@ long long int isign(long long int x); double fsign(double x); +#ifndef __NVCC__ double complex csign(double complex x); +#endif #endif diff --git a/pytest.ini b/pytest.ini index 42eb0d72ba..3792ab65f9 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,3 +9,4 @@ markers = python: test to generate python code xdist_incompatible: test which compiles a file also compiled by another test external: test using an external dll (problematic with conda on Windows) + cuda: test to generate cuda code diff --git a/tests/conftest.py b/tests/conftest.py index 79144b6978..a5082ef6e8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,6 +21,17 @@ def language(request): return request.param +@pytest.fixture( params=[ + pytest.param("fortran", marks = pytest.mark.fortran), + pytest.param("c", marks = pytest.mark.c), + pytest.param("python", marks = pytest.mark.python), + pytest.param("cuda", marks = pytest.mark.cuda) + ], + scope = "session" +) +def language_with_cuda(request): + return request.param + def move_coverage(path_dir): for root, _, files in os.walk(path_dir): for name in files: diff --git a/tests/epyccel/test_base.py b/tests/epyccel/test_base.py index c22064d321..413f79eef1 100644 --- a/tests/epyccel/test_base.py +++ b/tests/epyccel/test_base.py @@ -7,128 +7,128 @@ from utilities import epyccel_test -def test_is_false(language): - test = epyccel_test(base.is_false, lang=language) +def test_is_false(language_with_cuda): + test = epyccel_test(base.is_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_is_true(language): - test = epyccel_test(base.is_true, lang=language) +def test_is_true(language_with_cuda): + test = epyccel_test(base.is_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_compare_is(language): - test = epyccel_test(base.compare_is, lang=language) +def test_compare_is(language_with_cuda): + test = epyccel_test(base.compare_is, lang=language_with_cuda) test.compare_epyccel( True, True ) test.compare_epyccel( True, False ) test.compare_epyccel( False, True ) test.compare_epyccel( False, False ) -def test_compare_is_not(language): - test = epyccel_test(base.compare_is_not, lang=language) +def test_compare_is_not(language_with_cuda): + test = epyccel_test(base.compare_is_not, lang=language_with_cuda) test.compare_epyccel( True, True ) test.compare_epyccel( True, False ) test.compare_epyccel( False, True ) test.compare_epyccel( False, False ) -def test_compare_is_int(language): - test = epyccel_test(base.compare_is_int, lang=language) +def test_compare_is_int(language_with_cuda): + test = epyccel_test(base.compare_is_int, lang=language_with_cuda) test.compare_epyccel( True, 1 ) test.compare_epyccel( True, 0 ) test.compare_epyccel( False, 1 ) test.compare_epyccel( False, 0 ) -def test_compare_is_not_int(language): - test = epyccel_test(base.compare_is_not_int, lang=language) +def test_compare_is_not_int(language_with_cuda): + test = epyccel_test(base.compare_is_not_int, lang=language_with_cuda) test.compare_epyccel( True, 1 ) test.compare_epyccel( True, 0 ) test.compare_epyccel( False, 1 ) test.compare_epyccel( False, 0 ) -def test_not_false(language): - test = epyccel_test(base.not_false, lang=language) +def test_not_false(language_with_cuda): + test = epyccel_test(base.not_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_not_true(language): - test = epyccel_test(base.not_true, lang=language) +def test_not_true(language_with_cuda): + test = epyccel_test(base.not_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_eq_false(language): - test = epyccel_test(base.eq_false, lang=language) +def test_eq_false(language_with_cuda): + test = epyccel_test(base.eq_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_eq_true(language): - test = epyccel_test(base.eq_true, lang=language) +def test_eq_true(language_with_cuda): + test = epyccel_test(base.eq_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_neq_false(language): - test = epyccel_test(base.eq_false, lang=language) +def test_neq_false(language_with_cuda): + test = epyccel_test(base.eq_false, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_neq_true(language): - test = epyccel_test(base.eq_true, lang=language) +def test_neq_true(language_with_cuda): + test = epyccel_test(base.eq_true, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_not(language): - test = epyccel_test(base.not_val, lang=language) +def test_not(language_with_cuda): + test = epyccel_test(base.not_val, lang=language_with_cuda) test.compare_epyccel( True ) test.compare_epyccel( False ) -def test_not_int(language): - test = epyccel_test(base.not_int, lang=language) +def test_not_int(language_with_cuda): + test = epyccel_test(base.not_int, lang=language_with_cuda) test.compare_epyccel( 0 ) test.compare_epyccel( 4 ) -def test_compare_is_nil(language): - test = epyccel_test(base.is_nil, lang=language) +def test_compare_is_nil(language_with_cuda): + test = epyccel_test(base.is_nil, lang=language_with_cuda) test.compare_epyccel( None ) -def test_compare_is_not_nil(language): - test = epyccel_test(base.is_not_nil, lang=language) +def test_compare_is_not_nil(language_with_cuda): + test = epyccel_test(base.is_not_nil, lang=language_with_cuda) test.compare_epyccel( None ) -def test_cast_int(language): - test = epyccel_test(base.cast_int, lang=language) +def test_cast_int(language_with_cuda): + test = epyccel_test(base.cast_int, lang=language_with_cuda) test.compare_epyccel( 4 ) - test = epyccel_test(base.cast_float_to_int, lang=language) + test = epyccel_test(base.cast_float_to_int, lang=language_with_cuda) test.compare_epyccel( 4.5 ) -def test_cast_bool(language): - test = epyccel_test(base.cast_bool, lang=language) +def test_cast_bool(language_with_cuda): + test = epyccel_test(base.cast_bool, lang=language_with_cuda) test.compare_epyccel( True ) -def test_cast_float(language): - test = epyccel_test(base.cast_float, lang=language) +def test_cast_float(language_with_cuda): + test = epyccel_test(base.cast_float, lang=language_with_cuda) test.compare_epyccel( 4.5 ) - test = epyccel_test(base.cast_int_to_float, lang=language) + test = epyccel_test(base.cast_int_to_float, lang=language_with_cuda) test.compare_epyccel( 4 ) -def test_if_0_int(language): - test = epyccel_test(base.if_0_int, lang=language) +def test_if_0_int(language_with_cuda): + test = epyccel_test(base.if_0_int, lang=language_with_cuda) test.compare_epyccel( 22 ) test.compare_epyccel( 0 ) -def test_if_0_real(language): - test = epyccel_test(base.if_0_real, lang=language) +def test_if_0_real(language_with_cuda): + test = epyccel_test(base.if_0_real, lang=language_with_cuda) test.compare_epyccel( 22.3 ) test.compare_epyccel( 0.0 ) -def test_same_int(language): - test = epyccel_test(base.is_same_int, lang=language) +def test_same_int(language_with_cuda): + test = epyccel_test(base.is_same_int, lang=language_with_cuda) test.compare_epyccel( 22 ) - test = epyccel_test(base.isnot_same_int, lang=language) + test = epyccel_test(base.isnot_same_int, lang=language_with_cuda) test.compare_epyccel( 22 ) -def test_same_float(language): - test = epyccel_test(base.is_same_float, lang=language) +def test_same_float(language_with_cuda): + test = epyccel_test(base.is_same_float, lang=language_with_cuda) test.compare_epyccel( 22.2 ) - test = epyccel_test(base.isnot_same_float, lang=language) + test = epyccel_test(base.isnot_same_float, lang=language_with_cuda) test.compare_epyccel( 22.2 ) @pytest.mark.parametrize( 'language', [ @@ -150,28 +150,28 @@ def test_same_complex(language): test = epyccel_test(base.isnot_same_complex, lang=language) test.compare_epyccel( complex(2,3) ) -def test_is_types(language): - test = epyccel_test(base.is_types, lang=language) +def test_is_types(language_with_cuda): + test = epyccel_test(base.is_types, lang=language_with_cuda) test.compare_epyccel( 1, 1.0 ) -def test_isnot_types(language): - test = epyccel_test(base.isnot_types, lang=language) +def test_isnot_types(language_with_cuda): + test = epyccel_test(base.isnot_types, lang=language_with_cuda) test.compare_epyccel( 1, 1.0 ) -def test_none_is_none(language): - test = epyccel_test(base.none_is_none, lang=language) +def test_none_is_none(language_with_cuda): + test = epyccel_test(base.none_is_none, lang=language_with_cuda) test.compare_epyccel() -def test_none_isnot_none(language): - test = epyccel_test(base.none_isnot_none, lang=language) +def test_none_isnot_none(language_with_cuda): + test = epyccel_test(base.none_isnot_none, lang=language_with_cuda) test.compare_epyccel() -def test_pass_if(language): - test = epyccel_test(base.pass_if, lang=language) +def test_pass_if(language_with_cuda): + test = epyccel_test(base.pass_if, lang=language_with_cuda) test.compare_epyccel(2) -def test_pass2_if(language): - test = epyccel_test(base.pass2_if, lang=language) +def test_pass2_if(language_with_cuda): + test = epyccel_test(base.pass2_if, lang=language_with_cuda) test.compare_epyccel(0.2) test.compare_epyccel(0.0) @@ -192,15 +192,15 @@ def test_use_optional(language): test.compare_epyccel() test.compare_epyccel(6) -def test_none_equality(language): - test = epyccel_test(base.none_equality, lang=language) +def test_none_equality(language_with_cuda): + test = epyccel_test(base.none_equality, lang=language_with_cuda) test.compare_epyccel() test.compare_epyccel(6) -def test_none_none_equality(language): - test = epyccel_test(base.none_none_equality, lang=language) +def test_none_none_equality(language_with_cuda): + test = epyccel_test(base.none_none_equality, lang=language_with_cuda) test.compare_epyccel() -def test_none_literal_equality(language): - test = epyccel_test(base.none_literal_equality, lang=language) +def test_none_literal_equality(language_with_cuda): + test = epyccel_test(base.none_literal_equality, lang=language_with_cuda) test.compare_epyccel() From 1b1bd67af1a9593281c9e031d29cf6926ca0eedc Mon Sep 17 00:00:00 2001 From: Emily Bourne Date: Tue, 12 Mar 2024 09:42:33 +0100 Subject: [PATCH 53/53] Ensure that triggering coverage also triggers dependencies --- ci_tools/bot_comment_react.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ci_tools/bot_comment_react.py b/ci_tools/bot_comment_react.py index 1f26c05e49..2d1b058d42 100644 --- a/ci_tools/bot_comment_react.py +++ b/ci_tools/bot_comment_react.py @@ -29,6 +29,7 @@ def get_unique_test_list(keys): tests.discard('pr_tests') if 'coverage' in tests: tests.add('linux') + tests.add('cuda') # Ensure coverage is last in case dependencies are ready tests.discard('coverage') result = list(tests)