Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
f3f6733
add kernel
jowezarek Sep 26, 2023
e6a8f5d
fix dimension error
jowezarek Sep 26, 2023
59df5a6
Merge branch 'devel' into add_eval_kernel
Mar 6, 2024
6f956d9
update evaluation method
Mar 6, 2024
33eb0bf
use new method in geometry
Mar 6, 2024
7e4e2c8
Merge branch 'devel' into add_eval_kernel
Mar 6, 2024
56c45d8
convert index to tuple to avoid bug
Mar 6, 2024
bca8e40
fix weights
Mar 6, 2024
b7ccd51
other weight bug...
Mar 6, 2024
bcd6c4d
check type before kernels to avoid wrong type
Mar 6, 2024
b74690d
Merge branch 'devel' into add_eval_kernel
yguclu Mar 8, 2024
6871dcd
Improve kernel `eval_field_3d_once`
yguclu Mar 8, 2024
c8c2766
Merge branch 'devel' into add_eval_kernel
yguclu Mar 12, 2024
26c6909
fix access to starts, shifts and pads from the global vector space
Mar 26, 2024
7272623
Merge branch 'add_eval_kernel' of https://github.com/pyccel/psydac in…
Mar 26, 2024
acc86a3
fix allocation of ldim
Mar 26, 2024
244a5a9
Merge branch 'devel' into add_eval_kernel
yguclu Mar 27, 2024
ce99c53
Merge branch 'devel' into add_eval_kernel
yguclu Oct 4, 2024
d6d4617
Fix wrong merge in tensor.py
yguclu Oct 4, 2024
9963fd6
Some cleanup in tensor.py
yguclu Oct 4, 2024
4df5a23
Merge branch 'devel' into add_eval_kernel
yguclu Aug 27, 2025
3d7c3a6
Update fem/tensor.py: vector_space -> coeff_space
yguclu Aug 27, 2025
383e16a
Remove unused 'template' decorator from field_evaluation_kernels.py
yguclu Aug 28, 2025
2830e78
Update psydac.feec.multipatch module list
yguclu Aug 28, 2025
abc3908
Import Psydac in conf.py to avoid subsequent errors
yguclu Aug 28, 2025
3a1079b
Revert "Import Psydac in conf.py to avoid subsequent errors"
yguclu Aug 28, 2025
049c9a6
Install Psydac before building docs
yguclu Aug 28, 2025
9b7d72c
Revert useless change to test_spline_interpolation.py
yguclu Aug 28, 2025
e8c5e71
New function eval_field_nd_once:
yguclu Aug 29, 2025
c571d5e
Implement fast field evaluation kernels in 1/2/3D:
yguclu Aug 29, 2025
03ae122
Add __all__ to psydac.core.field_evaluation_kernels
yguclu Sep 8, 2025
224970c
Verify Jacobian matrix in test_export_nurbs_to_hdf5
yguclu Sep 9, 2025
d691ba5
Reorganize TensorFemSpace class, improve evaluation methods
yguclu Sep 9, 2025
d9498f4
Use new evaluation methods in NurbsMapping
yguclu Sep 9, 2025
267232a
Update unit tests in test_field_evaluation_kernel.py
yguclu Sep 10, 2025
3b0a095
Merge branch 'devel' into add_eval_kernel
yguclu Sep 10, 2025
18c8941
Remove debug code
yguclu Sep 10, 2025
2595d9c
Improve docstrings in FemSpace and FemField
yguclu Sep 10, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions .github/workflows/documentation.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,10 @@ jobs:
run: |
sudo apt update
sudo apt install graphviz
- name: Install Python dependencies
- name: Install Python dependencies (including PSYDAC)
run: |
python -m pip install -r docs/requirements.txt
pip install .
pip install -r docs/requirements.txt
- name: Make the sphinx doc
run: |
rm -rf docs/source/modules/STUBDIR
Expand Down
1 change: 0 additions & 1 deletion docs/source/modules/feec.multipatch.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,5 @@ feec.multipatch
multipatch.multipatch_domain_utilities
multipatch.non_matching_operators
multipatch.operators
multipatch.plotting_utilities
multipatch.utilities
multipatch.utils_conga_2d
5 changes: 5 additions & 0 deletions psydac/cad/tests/test_geometry.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,11 @@ def test_export_nurbs_to_hdf5(ncells, degree):

assert np.allclose(pcoords1[..., :domain.dim], pcoords2, 1e-15, 1e-15)

jacobian1 = new_pipe.gradient(u=eta1, v=eta2)[..., :domain.dim, :]
jacobian2 = np.array([[mapping.jacobian(e1, e2) for e2 in eta2] for e1 in eta1])

assert np.allclose(jacobian1, jacobian2, 1e-13, 1e-13)

#==============================================================================
@pytest.mark.parametrize( 'ncells', [[8,8], [12,12], [14,14]] )
@pytest.mark.parametrize( 'degree', [[2,2], [3,2], [2,3], [3,3], [4,4]] )
Expand Down
229 changes: 229 additions & 0 deletions psydac/core/field_evaluation_kernels.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,239 @@
import numpy as np
from typing import TypeVar

__all__ = (
'eval_field_1d_once',
'eval_field_2d_once',
'eval_field_3d_once',
'eval_fields_1d_irregular_no_weights',
'eval_fields_1d_irregular_weighted',
'eval_fields_1d_no_weights',
'eval_fields_1d_weighted',
'eval_fields_2d_irregular_no_weights',
'eval_fields_2d_irregular_weighted',
'eval_fields_2d_no_weights',
'eval_fields_2d_weighted',
'eval_fields_3d_irregular_no_weights',
'eval_fields_3d_irregular_weighted',
'eval_fields_3d_no_weights',
'eval_fields_3d_weighted',
'eval_jac_det_2d',
'eval_jac_det_2d_weights',
'eval_jac_det_3d',
'eval_jac_det_3d_weights',
'eval_jac_det_irregular_2d',
'eval_jac_det_irregular_2d_weights',
'eval_jac_det_irregular_3d',
'eval_jac_det_irregular_3d_weights',
'eval_jacobians_2d',
'eval_jacobians_2d_weights',
'eval_jacobians_3d',
'eval_jacobians_3d_weights',
'eval_jacobians_inv_2d',
'eval_jacobians_inv_2d_weights',
'eval_jacobians_inv_3d',
'eval_jacobians_inv_3d_weights',
'eval_jacobians_inv_irregular_2d',
'eval_jacobians_inv_irregular_2d_weights',
'eval_jacobians_inv_irregular_3d',
'eval_jacobians_inv_irregular_3d_weights',
'eval_jacobians_irregular_2d',
'eval_jacobians_irregular_2d_weights',
'eval_jacobians_irregular_3d',
'eval_jacobians_irregular_3d_weights',
'pushforward_2d_hcurl',
'pushforward_2d_hdiv',
'pushforward_2d_l2',
'pushforward_3d_hcurl',
'pushforward_3d_hdiv',
'pushforward_3d_l2',
)

T = TypeVar('T', float, complex)

# =============================================================================
# Field evaluation functions
# =============================================================================

# -----------------------------------------------------------------------------
# 0: Evaluation of single 3d field at single point
# -----------------------------------------------------------------------------
def eval_field_3d_once(local_coeffs : 'T[:,:,:]',
local_bases_1: 'float[:]',
local_bases_2: 'float[:]',
local_bases_3: 'float[:]',
work_1d: 'T[:]',
work_2d: 'T[:,:]') -> T:
"""
Evaluate a scalar field in 3D, given the basis values along each direction.

Compute the point value of a field, given the values of the non-zero 1D
basis functions at the point location. We assume that the 3D basis has a
tensor-product structure, hence each 3D basis function can be written as
the product of three 1D basis functions:

phi(x1, x2, x3) = phi_1(x1) * phi_2(x2) * phi_3(x3).

The algorithm uses sum factorization to compute the reduction. This needs
work arrays in 1D and 2D, which are provided to this function rather than
allocated at each function call.

Parameters
----------
local_coeffs: ndarray[float|complex]
Active (local) coefficients of the field in all directions.

local_bases_1: ndarray[float].
Active (local) 1D-basis functions values at the point of evaluation x1.

local_bases_2: ndarray[float].
Active (local) 1D-basis functions values at the point of evaluation x2.

local_bases_3: ndarray[float].
Active (local) 1D-basis functions values at the point of evaluation x3.

work_1d: ndarray[float|complex]
x1-dependent work array for the partial reduction in (x2, x3).

work_2d: ndarray[float|complex]
(x1, x2)-dependent work array for the partial reduction in x3.

Returns
-------
res : float | complex
The scalar value of the field at the location of interest.

"""
n1, n2, n3 = local_coeffs.shape

# Verify that local bases have the correct size
assert local_bases_1.size == n1
assert local_bases_2.size == n2
assert local_bases_3.size == n3

# Verify that provided work arrays are large enough
assert work_1d.shape[0] >= n1
assert work_2d.shape[0] >= n1
assert work_2d.shape[1] >= n2

# Obtain zero of correct type (float or complex) from input array
c0 = local_coeffs[0, 0, 0]
zero = c0 - c0

# Compute reduction using sum factorization in 3D
res = zero
for i1 in range(n1):
work_1d[i1] = zero
for i2 in range(n2):
work_2d[i1, i2] = zero
for i3 in range(n3):
work_2d[i1, i2] += local_coeffs[i1, i2, i3] * local_bases_3[i3]
work_1d[i1] += work_2d[i1, i2] * local_bases_2[i2]
res += work_1d[i1] * local_bases_1[i1]

return res


def eval_field_2d_once(local_coeffs : 'T[:,:]',
local_bases_1: 'float[:]',
local_bases_2: 'float[:]',
work_1d: 'T[:]') -> T:
"""
Evaluate a scalar field in 2D, given the basis values along each direction.

Compute the point value of a field, given the values of the non-zero 1D
basis functions at the point location. We assume that the 2D basis has a
tensor-product structure, hence each 2D basis function can be written as
the product of three 1D basis functions:

phi(x1, x2) = phi_1(x1) * phi_2(x2).

The algorithm uses sum factorization to compute the reduction. This needs
a 1D work array, which is provided to this function rather than allocated
at each function call.

Parameters
----------
local_coeffs: ndarray[float|complex]
Active (local) coefficients of the field in all directions.

local_bases_1: ndarray[float].
Active (local) 1D-basis functions values at the point of evaluation x1.

local_bases_2: ndarray[float].
Active (local) 1D-basis functions values at the point of evaluation x2.

work_1d: ndarray[float|complex]
x1-dependent work array for the partial reduction in x2.

Returns
-------
res : float | complex
The scalar value of the field at the location of interest.

"""
n1, n2 = local_coeffs.shape

# Verify that local bases have the correct size
assert local_bases_1.size == n1
assert local_bases_2.size == n2

# Verify that provided work arrays are large enough
assert work_1d.shape[0] >= n1

# Obtain zero of correct type (float or complex) from input array
c0 = local_coeffs[0, 0]
zero = c0 - c0

# Compute reduction using sum factorization in 2D
res = zero
for i1 in range(n1):
work_1d[i1] = zero
for i2 in range(n2):
work_1d[i1] += local_coeffs[i1, i2] * local_bases_2[i2]
res += work_1d[i1] * local_bases_1[i1]

return res


def eval_field_1d_once(local_coeffs : 'T[:]',
local_bases_1: 'float[:]') -> T:
"""
Evaluate a scalar field in 1D, given the basis values.

Compute the point value of a field, given the values of the non-zero 1D
basis functions at the point location.

Parameters
----------
local_coeffs: ndarray[float|complex]
Active (local) coefficients of the field in all directions.

local_bases_1: ndarray[float].
Active (local) 1D-basis functions values at the point of evaluation x1.

Returns
-------
res : float | complex
The scalar value of the field at the location of interest.

"""
n1, = local_coeffs.shape

# Verify that local bases have the correct size
assert local_bases_1.size == n1

# Obtain zero of correct type (float or complex) from input array
c0 = local_coeffs[0]
zero = c0 - c0

# Compute reduction as a simple scalar product
res = zero
for i1 in range(n1):
res += local_coeffs[i1] * local_bases_1[i1]

return res

# -----------------------------------------------------------------------------
# 1: Regular tensor grid without weight
# -----------------------------------------------------------------------------
Expand Down
26 changes: 12 additions & 14 deletions psydac/core/tests/test_field_evaluation_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -411,17 +411,15 @@ def test_regular_evaluations(knots, ldim, degree, npts_per_cell):
f_direct = np.array([space_h.eval_fields([e1], field) for e1 in regular_grid[0]])

# Weighted
f_direct_w = np.array([np.array(space_h.eval_fields([e1], field, weights=weight))
/ np.array(space_h.eval_fields([e1], weight))
for e1 in regular_grid[0]])
f_direct_w = np.array([space_h.eval_fields([e1], field, weights=weight)
for e1 in regular_grid[0]])

if ldim == 2:
# No weights
f_direct = np.array([[space_h.eval_fields([e1, e2], field) for e2 in regular_grid[1]] for e1 in regular_grid[0]])

# Weighted
f_direct_w = np.array([[np.array(space_h.eval_fields([e1, e2], field, weights=weight))
/ np.array(space_h.eval_fields([e1, e2], weight))
f_direct_w = np.array([[space_h.eval_fields([e1, e2], field, weights=weight)
for e2 in regular_grid[1]]
for e1 in regular_grid[0]])

Expand All @@ -433,8 +431,7 @@ def test_regular_evaluations(knots, ldim, degree, npts_per_cell):
for e1 in regular_grid[0]])

# Weighted
f_direct_w = np.array([[[np.array(space_h.eval_fields([e1, e2, e3], field, weights=weight))
/ np.array(space_h.eval_fields([e1, e2, e3], weight))
f_direct_w = np.array([[[space_h.eval_fields([e1, e2, e3], field, weights=weight)
for e3 in regular_grid[2]]
for e2 in regular_grid[1]]
for e1 in regular_grid[0]])
Expand Down Expand Up @@ -571,17 +568,15 @@ def test_irregular_evaluations(knots, ldim, degree, npts):
f_direct = np.array([space_h.eval_fields([e1], field) for e1 in irregular_grid[0]])

# Weighted
f_direct_w = np.array([np.array(space_h.eval_fields([e1], field, weights=weight))
/ np.array(space_h.eval_fields([e1], weight))
for e1 in irregular_grid[0]])
f_direct_w = np.array([space_h.eval_fields([e1], field, weights=weight)
for e1 in irregular_grid[0]])

if ldim == 2:
# No weights
f_direct = np.array([[space_h.eval_fields([e1, e2], field) for e2 in irregular_grid[1]] for e1 in irregular_grid[0]])

# Weighted
f_direct_w = np.array([[np.array(space_h.eval_fields([e1, e2], field, weights=weight))
/ np.array(space_h.eval_fields([e1, e2], weight))
f_direct_w = np.array([[space_h.eval_fields([e1, e2], field, weights=weight)
for e2 in irregular_grid[1]]
for e1 in irregular_grid[0]])

Expand All @@ -593,8 +588,7 @@ def test_irregular_evaluations(knots, ldim, degree, npts):
for e1 in irregular_grid[0]])

# Weighted
f_direct_w = np.array([[[np.array(space_h.eval_fields([e1, e2, e3], field, weights=weight))
/ np.array(space_h.eval_fields([e1, e2, e3], weight))
f_direct_w = np.array([[[space_h.eval_fields([e1, e2, e3], field, weights=weight)
for e3 in irregular_grid[2]]
for e2 in irregular_grid[1]]
for e1 in irregular_grid[0]])
Expand Down Expand Up @@ -692,3 +686,7 @@ def test_pushforwards_hcurl(ldim):
pushforward_3d_hcurl(field_to_push, inv_jacobians, out)

assert np.allclose(expected, out, atol=ATOL, rtol=RTOL)

if __name__ == '__main__':
test_regular_jacobians('bent_pipe.h5', 2)
test_regular_evaluations([np.sort(np.concatenate((np.zeros(3), np.random.random(9), np.ones(3)))) for i in range(2)], 2, [2] * 2,2)
Loading