From 53cdb088f058ae85e336ee285c7e5f3923ce64b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Wed, 5 Oct 2022 16:02:36 +0200 Subject: [PATCH 01/28] Update pullback operators: use BasicCallableMapping from sympde --- psydac/feec/pull_push.py | 346 ++++++++++++++++++--------------------- 1 file changed, 159 insertions(+), 187 deletions(-) diff --git a/psydac/feec/pull_push.py b/psydac/feec/pull_push.py index 04e108d90..d2fe9998f 100644 --- a/psydac/feec/pull_push.py +++ b/psydac/feec/pull_push.py @@ -1,5 +1,7 @@ # coding: utf-8 +from sympde.topology.callable_mapping import BasicCallableMapping + __all__ = ( # # Pull-back operators @@ -33,143 +35,135 @@ #============================================================================== # 1D PULL-BACKS #============================================================================== -def pull_1d_h1(func_ini, mapping): - - mapping = mapping.get_callable_mapping() - f1, = mapping._func_eval +def pull_1d_h1(f, F): - def fun(xi1): - x = f1(xi1) + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 1 - value = func_ini(x) - return value + def f_logical(eta1): + x, = F(eta1) + return f(x) - return fun + return f_logical #============================================================================== -def pull_1d_l2(func_ini, mapping): +def pull_1d_l2(f, F): - mapping = mapping.get_callable_mapping() - f1, = mapping._func_eval - metric_det = mapping._metric_det + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 1 - def fun(xi1): - x = f1(xi1) + def f_logical(eta1): + x, = F(eta1) - det_value = metric_det(xi1)**0.5 - value = func_ini(x) + det_value = F.metric_det(eta1)**0.5 + value = f(x) return det_value*value - return fun + return f_logical #============================================================================== # 2D PULL-BACKS #============================================================================== -def pull_2d_h1(func_ini, mapping): +def pull_2d_h1(f, F): - mapping = mapping.get_callable_mapping() - f1,f2 = mapping._func_eval + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 - def fun(xi1, xi2): - x = f1(xi1, xi2) - y = f2(xi1, xi2) + def f_logical(eta1, eta2): + x, y = F(eta1, eta2) + return f(x, y) - value = func_ini(x, y) - return value - - return fun + return f_logical #============================================================================== -def pull_2d_hcurl(funcs_ini, mapping): +def pull_2d_hcurl(f, F): - mapping = mapping.get_callable_mapping() - f1,f2 = mapping._func_eval - jacobian = mapping._jacobian + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 - def fun1(xi1, xi2): - x = f1(xi1, xi2) - y = f2(xi1, xi2) + # Assume that f is a list/tuple of callable functions + f1, f2 = f - a1_phys = funcs_ini[0](x, y) - a2_phys = funcs_ini[1](x, y) + def f1_logical(eta1, eta2): + x, y = F(eta1, eta2) - J_T_value = jacobian(xi1, xi2).T - value_1 = J_T_value[0,0]*a1_phys + J_T_value[0,1]*a2_phys - return value_1 + a1_phys = f1(x, y) + a2_phys = f2(x, y) - def fun2(xi1, xi2): - x = f1(xi1, xi2) - y = f2(xi1, xi2) + J_T_value = F.jacobian(eta1, eta2).T + value_1 = J_T_value[0, 0] * a1_phys + J_T_value[0, 1] * a2_phys + return value_1 - a1_phys = funcs_ini[0](x, y) - a2_phys = funcs_ini[1](x, y) + def f2_logical(eta1, eta2): + x, y = F(eta1, eta2) - J_T_value = jacobian(xi1, xi2).T + a1_phys = f1(x, y) + a2_phys = f2(x, y) - value_2 = J_T_value[1,0]*a1_phys + J_T_value[1,1]*a2_phys + J_T_value = F.jacobian(eta1, eta2).T + value_2 = J_T_value[1, 0] * a1_phys + J_T_value[1, 1] * a2_phys return value_2 - return fun1, fun2 + return f1_logical, f2_logical #============================================================================== -def pull_2d_hdiv(funcs_ini, mapping): - - mapping = mapping.get_callable_mapping() - f1,f2 = mapping._func_eval - J_inv = mapping._jacobian_inv - metric_det = mapping._metric_det +def pull_2d_hdiv(f, F): - def fun1(xi1, xi2): - x = f1(xi1, xi2) - y = f2(xi1, xi2) + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 - a1_phys = funcs_ini[0](x, y) - a2_phys = funcs_ini[1](x, y) + # Assume that f is a list/tuple of callable functions + f1, f2 = f - J_inv_value = J_inv(xi1, xi2) - det_value = metric_det(xi1, xi2)**0.5 + def f1_logical(eta1, eta2): + x, y = F(eta1, eta2) - value_1 = J_inv_value[0,0]*a1_phys + J_inv_value[0,1]*a2_phys + a1_phys = f1(x, y) + a2_phys = f2(x, y) - return det_value*value_1 + J_inv_value = F.jacobian_inv(eta1, eta2) + det_value = F.metric_det(eta1, eta2)**0.5 + value_1 = J_inv_value[0, 0] * a1_phys + J_inv_value[0, 1] * a2_phys - def fun2(xi1, xi2): - x = f1(xi1, xi2) - y = f2(xi1, xi2) + return det_value * value_1 - a1_phys = funcs_ini[0](x, y) - a2_phys = funcs_ini[1](x, y) + def f2_logical(eta1, eta2): + x, y = F(eta1, eta2) - J_inv_value = J_inv(xi1, xi2) - det_value = metric_det(xi1, xi2)**0.5 + a1_phys = f1(x, y) + a2_phys = f2(x, y) - value_2 = J_inv_value[1,0]*a1_phys + J_inv_value[1,1]*a2_phys + J_inv_value = F.jacobian_inv(eta1, eta2) + det_value = F.metric_det(eta1, eta2)**0.5 + value_2 = J_inv_value[1, 0] * a1_phys + J_inv_value[1, 1] * a2_phys - return det_value*value_2 + return det_value * value_2 - return fun1, fun2 + return f1_logical, f2_logical #============================================================================== -def pull_2d_l2(func_ini, mapping): +def pull_2d_l2(f, F): - mapping = mapping.get_callable_mapping() - f1,f2 = mapping._func_eval - metric_det = mapping._metric_det + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 - def fun(xi1, xi2): - x = f1(xi1, xi2) - y = f2(xi1, xi2) + def f_logical(eta1, eta2): + x, y = F(eta1, eta2) - det_value = metric_det(xi1, xi2)**0.5 - value = func_ini(x, y) - return det_value*value + det_value = F.metric_det(eta1, eta2)**0.5 + value = f(x, y) + return det_value * value - return fun + return f_logical #============================================================================== # 3D PULL-BACKS #============================================================================== + +# TODO [YG 05.10.2022]: +# Remove? But it makes sense to return a vector-valued function... + def pull_3d_v(funcs_ini, mapping): mapping = mapping.get_callable_mapping() @@ -194,147 +188,125 @@ def fun(xi1, xi2, xi3): return fun #============================================================================== -def pull_3d_h1(func_ini, mapping): +def pull_3d_h1(f, F): - mapping = mapping.get_callable_mapping() - f1,f2,f3 = mapping._func_eval + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 - def fun(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) + def f_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + return f(x, y, z) - value = func_ini(x, y, z) - return value - - return fun + return f_logical #============================================================================== -def pull_3d_hcurl(funcs_ini, mapping): +def pull_3d_hcurl(f, F): - mapping = mapping.get_callable_mapping() - f1,f2,f3 = mapping._func_eval - jacobian = mapping._jacobian + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 - def fun1(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) + # Assume that f is a list/tuple of callable functions + f1, f2, f3 = f - a1_phys = funcs_ini[0](x, y, z) - a2_phys = funcs_ini[1](x, y, z) - a3_phys = funcs_ini[2](x, y, z) + def f1_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + + a1_phys = f1(x, y, z) + a2_phys = f2(x, y, z) + a3_phys = f3(x, y, z) - J_T_value = jacobian(xi1, xi2, xi3).T - value_1 = J_T_value[0,0]*a1_phys + J_T_value[0,1]*a2_phys + J_T_value[0,2]*a3_phys + J_T_value = F.jacobian(eta1, eta2, eta3).T + value_1 = J_T_value[0, 0] * a1_phys + J_T_value[0, 1] * a2_phys + J_T_value[0, 2] * a3_phys return value_1 - def fun2(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) - - a1_phys = funcs_ini[0](x, y, z) - a2_phys = funcs_ini[1](x, y, z) - a3_phys = funcs_ini[2](x, y, z) - - J_T_value = jacobian(xi1, xi2, xi3).T - - value_2 = J_T_value[1,0]*a1_phys + J_T_value[1,1]*a2_phys + J_T_value[1,2]*a3_phys + def f2_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + + a1_phys = f1(x, y, z) + a2_phys = f2(x, y, z) + a3_phys = f3(x, y, z) + J_T_value = F.jacobian(eta1, eta2, eta3).T + value_2 = J_T_value[1, 0] * a1_phys + J_T_value[1, 1] * a2_phys + J_T_value[1, 2] * a3_phys return value_2 - def fun3(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) - - a1_phys = funcs_ini[0](x, y, z) - a2_phys = funcs_ini[1](x, y, z) - a3_phys = funcs_ini[2](x, y, z) + def f3_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + + a1_phys = f1(x, y, z) + a2_phys = f2(x, y, z) + a3_phys = f3(x, y, z) - J_T_value = jacobian(xi1, xi2, xi3).T - - value_3 = J_T_value[2,0]*a1_phys + J_T_value[2,1]*a2_phys + J_T_value[2,2]*a3_phys + J_T_value = F.jacobian(eta1, eta2, eta3).T + value_3 = J_T_value[2, 0] * a1_phys + J_T_value[2, 1] * a2_phys + J_T_value[2, 2] * a3_phys return value_3 - return fun1, fun2, fun3 + return f1_logical, f2_logical, f3_logical #============================================================================== -def pull_3d_hdiv(funcs_ini, mapping): - - mapping = mapping.get_callable_mapping() - f1,f2,f3 = mapping._func_eval - J_inv = mapping._jacobian_inv - metric_det = mapping._metric_det - - def fun1(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) - - a1_phys = funcs_ini[0](x, y, z) - a2_phys = funcs_ini[1](x, y, z) - a3_phys = funcs_ini[2](x, y, z) - - J_inv_value = J_inv(xi1, xi2, xi3) - det_value = metric_det(xi1, xi2, xi3)**0.5 - - value_1 = J_inv_value[0,0]*a1_phys + J_inv_value[0,1]*a2_phys + J_inv_value[0,2]*a3_phys +def pull_3d_hdiv(f, F): - return det_value*value_1 + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 - def fun2(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) + # Assume that f is a list/tuple of callable functions + f1, f2, f3 = f - a1_phys = funcs_ini[0](x, y, z) - a2_phys = funcs_ini[1](x, y, z) - a3_phys = funcs_ini[2](x, y, z) + def f1_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + + a1_phys = f1(x, y, z) + a2_phys = f2(x, y, z) + a3_phys = f3(x, y, z) - J_inv_value = J_inv(xi1, xi2, xi3) - det_value = metric_det(xi1, xi2, xi3)**0.5 + J_inv_value = F.jacobian_inv(eta1, eta2, eta3) + det_value = F.metric_det(eta1, eta2, eta3)**0.5 + value_1 = J_inv_value[0, 0] * a1_phys + J_inv_value[0, 1] * a2_phys + J_inv_value[0, 2] * a3_phys - value_2 = J_inv_value[1,0]*a1_phys + J_inv_value[1,1]*a2_phys + J_inv_value[1,2]*a3_phys + return det_value * value_1 - return det_value*value_2 + def f2_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + + a1_phys = f1(x, y, z) + a2_phys = f2(x, y, z) + a3_phys = f3(x, y, z) - def fun3(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) + J_inv_value = F.jacobian_inv(eta1, eta2, eta3) + det_value = F.metric_det(eta1, eta2, eta3)**0.5 + value_2 = J_inv_value[1, 0] * a1_phys + J_inv_value[1, 1] * a2_phys + J_inv_value[1, 2] * a3_phys - a1_phys = funcs_ini[0](x, y, z) - a2_phys = funcs_ini[1](x, y, z) - a3_phys = funcs_ini[2](x, y, z) + return det_value * value_2 - J_inv_value = J_inv(xi1, xi2, xi3) - det_value = metric_det(xi1, xi2, xi3)**0.5 + def f3_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) + + a1_phys = f1(x, y, z) + a2_phys = f2(x, y, z) + a3_phys = f3(x, y, z) - value_3 = J_inv_value[2,0]*a1_phys + J_inv_value[2,1]*a2_phys + J_inv_value[2,2]*a3_phys + J_inv_value = F.jacobian_inv(eta1, eta2, eta3) + det_value = F.metric_det(eta1, eta2, eta3)**0.5 + value_3 = J_inv_value[2, 0] * a1_phys + J_inv_value[2, 1] * a2_phys + J_inv_value[2, 2] * a3_phys - return det_value*value_3 + return det_value * value_3 - return fun1, fun2, fun3 + return f1_logical, f2_logical, f3_logical #============================================================================== -def pull_3d_l2(func_ini, mapping): +def pull_3d_l2(f, F): - mapping = mapping.get_callable_mapping() - f1,f2,f3 = mapping._func_eval - metric_det = mapping._metric_det + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 - def fun(xi1, xi2, xi3): - x = f1(xi1, xi2, xi3) - y = f2(xi1, xi2, xi3) - z = f3(xi1, xi2, xi3) + def f_logical(eta1, eta2, eta3): + x, y, z = F(eta1, eta2, eta3) - det_value = metric_det(xi1, xi2, xi3)**0.5 - value = func_ini(x, y, z) + det_value = F.metric_det(eta1, eta2, eta3)**0.5 + value = f(x, y, z) return det_value*value - return fun + return f_logical #============================================================================== # PUSH-FORWARD operators: From f160dfbb27b8e3c997610b2be4d724a2ed232f03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Wed, 5 Oct 2022 16:12:43 +0200 Subject: [PATCH 02/28] Use SymPDE branch "fix-psydac-issue-189" --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 4bcf93575..81234b381 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ 'pyevtk', # Our packages from PyPi - 'sympde==0.15.2', + 'sympde @ git+https://github.com/pyccel/sympde@fix-psydac-issue-189', 'pyccel>=1.5.1', 'gelato==0.11', From 75c084477340cc607d40a38df27f9a9de36bfe3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Wed, 5 Oct 2022 18:56:16 +0200 Subject: [PATCH 03/28] Update push-forward operators: use BasicCallableMapping from sympde --- psydac/feec/pull_push.py | 175 +++++++++++++++++++++++---------------- 1 file changed, 104 insertions(+), 71 deletions(-) diff --git a/psydac/feec/pull_push.py b/psydac/feec/pull_push.py index d2fe9998f..9e900ce8e 100644 --- a/psydac/feec/pull_push.py +++ b/psydac/feec/pull_push.py @@ -318,126 +318,159 @@ def f_logical(eta1, eta2, eta3): #============================================================================== # 1D PUSH-FORWARD #============================================================================== -def push_1d_h1(func, xi1): - return func(xi1) +def push_1d_h1(f, eta): + return f(eta) -def push_1d_l2(func, xi1, mapping): +def push_1d_l2(f, eta, F): - mapping = mapping.get_callable_mapping() - metric_det = mapping._metric_det + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 1 + + det_value = F.metric_det(eta)**0.5 + value = f(eta) - det_value = metric_det(xi1)**0.5 - value = func(xi1) - return value/det_value + return f(eta) / det_value #============================================================================== # 2D PUSH-FORWARDS #============================================================================== -def push_2d_h1(func, xi1, xi2): - return func(xi1, xi2) +#def push_2d_h1(f, eta): +def push_2d_h1(f, eta1, eta2): + eta = eta1, eta2 + return f(*eta) + +#def push_2d_hcurl(f, eta, F): +def push_2d_hcurl(f1, f2, eta1, eta2, F): + + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 -def push_2d_hcurl(a1, a2, xi1, xi2, mapping): +# f1, f2 = f + eta = eta1, eta2 - F = mapping.get_callable_mapping() - J_inv_value = F.jacobian_inv(xi1, xi2) + J_inv_value = F.jacobian_inv(*eta) - a1_value = a1(xi1, xi2) - a2_value = a2(xi1, xi2) + f1_value = f1(*eta) + f2_value = f2(*eta) - value1 = J_inv_value[0, 0] * a1_value + J_inv_value[1, 0] * a2_value - value2 = J_inv_value[0, 1] * a1_value + J_inv_value[1, 1] * a2_value + value1 = J_inv_value[0, 0] * f1_value + J_inv_value[1, 0] * f2_value + value2 = J_inv_value[0, 1] * f1_value + J_inv_value[1, 1] * f2_value return value1, value2 #============================================================================== -def push_2d_hdiv(a1, a2, xi1, xi2, mapping): +#def push_2d_hdiv(f, eta, F): +def push_2d_hdiv(f1, f2, eta1, eta2, F): + + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 - mapping = mapping.get_callable_mapping() - J = mapping._jacobian - metric_det = mapping._metric_det +# f1, f2 = f + eta = eta1, eta2 - J_value = J(xi1, xi2) - det_value = metric_det(xi1, xi2)**0.5 + J_value = F.jacobian(*eta) + det_value = F.metric_det(*eta)**0.5 - value1 = ( J_value[0,0]*a1(xi1, xi2) + - J_value[0,1]*a2(xi1, xi2)) / det_value + value1 = (J_value[0, 0] * f1(*eta) + + J_value[0, 1] * f2(*eta)) / det_value - value2 = ( J_value[1,0]*a1(xi1, xi2) + - J_value[1,1]*a2(xi1, xi2)) / det_value + value2 = (J_value[1, 0] * f1(*eta) + + J_value[1, 1] * f2(*eta)) / det_value return value1, value2 #============================================================================== -def push_2d_l2(func, xi1, xi2, mapping): +#def push_2d_l2(f, eta, F): +def push_2d_l2(f, eta1, eta2, F): + + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 2 - F = mapping.get_callable_mapping() + eta = eta1, eta2 - # det_value = F.metric_det(xi1, xi2)**0.5 + # det_value = F.metric_det(eta1, eta2)**0.5 # MCP correction: use the determinant of the mapping Jacobian - J = F._jacobian - J_value = J(xi1, xi2) - det_value = J_value[0,0]*J_value[1,1]-J_value[1,0]*J_value[0,1] - value = func(xi1, xi2) + J_value = F.jacobian(*eta) + det_value = J_value[0, 0] * J_value[1, 1] - J_value[1, 0] * J_value[0, 1] - return value / det_value + return f(*eta) / det_value #============================================================================== # 3D PUSH-FORWARDS #============================================================================== -def push_3d_h1(func, xi1, xi2, xi3): - return func(xi1, xi2, xi3) +#def push_3d_h1(f, eta): +def push_3d_h1(f, eta1, eta2, eta3): + eta = eta1, eta2, eta3 + return f(*eta) + +#def push_3d_hcurl(f, eta, F): +def push_3d_hcurl(f1, f2, f3, eta1, eta2, eta3, F): + + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 -def push_3d_hcurl(a1, a2, a3, xi1, xi2, xi3, mapping): +# f1, f2, f3 = f + eta = eta1, eta2, eta3 - mapping = mapping.get_callable_mapping() - J_inv = mapping._jacobian_inv + f1_value = f1(*eta) + f2_value = f2(*eta) + f3_value = f3(*eta) - J_inv_value = J_inv(xi1, xi2, xi3) + J_inv_value = F.jacobian_inv(*eta) - value1 = (J_inv_value[0,0]*a1(xi1, xi2, xi3) + - J_inv_value[1,0]*a2(xi1, xi2, xi3) + - J_inv_value[2,0]*a3(xi1, xi2, xi3) ) + value1 = (J_inv_value[0, 0] * f1_value + + J_inv_value[1, 0] * f2_value + + J_inv_value[2, 0] * f3_value ) - value2 = (J_inv_value[0,1]*a1(xi1, xi2, xi3) + - J_inv_value[1,1]*a2(xi1, xi2, xi3) + - J_inv_value[2,1]*a3(xi1, xi2, xi3) ) + value2 = (J_inv_value[0, 1] * f1_value + + J_inv_value[1, 1] * f2_value + + J_inv_value[2, 1] * f3_value ) - value3 = (J_inv_value[0,2]*a1(xi1, xi2, xi3) + - J_inv_value[1,2]*a2(xi1, xi2, xi3) + - J_inv_value[2,2]*a3(xi1, xi2, xi3) ) + value3 = (J_inv_value[0, 2] * f1_value + + J_inv_value[1, 2] * f2_value + + J_inv_value[2, 2] * f3_value ) return value1, value2, value3 #============================================================================== -def push_3d_hdiv(a1, a2, a3, xi1, xi2, xi3, mapping): +#def push_3d_hdiv(f, eta, F): +def push_3d_hdiv(f1, f2, f3, eta1, eta2, eta3, F): + + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 - mapping = mapping.get_callable_mapping() - J = mapping._jacobian - metric_det = mapping._metric_det +# f1, f2, f3 = f + eta = eta1, eta2, eta3 - J_value = J(xi1, xi2, xi3) - det_value = metric_det(xi1, xi2, xi3)**0.5 + f1_value = f1(*eta) + f2_value = f2(*eta) + f3_value = f3(*eta) + J_value = F.jacobian(*eta) + det_value = F.metric_det(*eta)**0.5 - value1 = ( J_value[0,0]*a1(xi1, xi2, xi3) + - J_value[0,1]*a2(xi1, xi2, xi3) + - J_value[0,2]*a3(xi1, xi2, xi3) ) / det_value + value1 = ( J_value[0, 0] * f1_value + + J_value[0, 1] * f2_value + + J_value[0, 2] * f3_value ) / det_value - value2 = ( J_value[1,0]*a1(xi1, xi2, xi3) + - J_value[1,1]*a2(xi1, xi2, xi3) + - J_value[1,2]*a3(xi1, xi2, xi3) ) / det_value + value2 = ( J_value[1, 0] * f1_value + + J_value[1, 1] * f2_value + + J_value[1, 2] * f3_value ) / det_value - value3 = ( J_value[2,0]*a1(xi1, xi2, xi3) + - J_value[2,1]*a2(xi1, xi2, xi3) + - J_value[2,2]*a3(xi1, xi2, xi3) ) / det_value + value3 = ( J_value[2, 0] * f1_value + + J_value[2, 1] * f2_value + + J_value[2, 2] * f3_value ) / det_value return value1, value2, value3 #============================================================================== -def push_3d_l2(func, xi1, xi2, xi3, mapping): +#def push_3d_l2(f, eta, F): +def push_3d_l2(f, eta1, eta2, eta3, F): + + assert isinstance(F, BasicCallableMapping) + assert F.ldim == 3 + + eta = eta1, eta2, eta3 - mapping = mapping.get_callable_mapping() - metric_det = mapping._metric_det + det_value = F.metric_det(*eta)**0.5 - det_value = metric_det(xi1, xi2, xi3)**0.5 - value = func(xi1, xi2, xi3) - return value/det_value + return f(*eta) / det_value From f83398dde6d389b27ba54d773b475e3bbf5b21ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Thu, 6 Oct 2022 16:12:11 +0200 Subject: [PATCH 04/28] Update SplineMapping: subclass BasicCallableMapping from sympde --- psydac/mapping/discrete.py | 237 ++++++++++++++++++------------------- 1 file changed, 118 insertions(+), 119 deletions(-) diff --git a/psydac/mapping/discrete.py b/psydac/mapping/discrete.py index f5f1c7a79..be2531d92 100644 --- a/psydac/mapping/discrete.py +++ b/psydac/mapping/discrete.py @@ -10,34 +10,36 @@ import h5py import yaml -from sympde.topology.mapping import Mapping -from sympde.topology.datatype import H1SpaceType, L2SpaceType, HdivSpaceType, HcurlSpaceType, UndefinedSpaceType +from sympde.topology.callable_mapping import BasicCallableMapping +from sympde.topology.datatype import (H1SpaceType, L2SpaceType, + HdivSpaceType, HcurlSpaceType, + UndefinedSpaceType) + +from psydac.fem.basic import FemField +from psydac.fem.tensor import TensorFemSpace +from psydac.fem.vector import ProductFemSpace, VectorFemSpace from psydac.core.kernels import (pushforward_2d_l2, pushforward_3d_l2, pushforward_2d_hdiv, pushforward_3d_hdiv, pushforward_2d_hcurl, pushforward_3d_hcurl) -from psydac.fem.tensor import TensorFemSpace -from psydac.fem.basic import FemField -from psydac.fem.vector import ProductFemSpace, VectorFemSpace - -__all__ = ['SplineMapping', 'NurbsMapping'] +__all__ = ('SplineMapping', 'NurbsMapping') #============================================================================== -def random_string( n ): +def random_string(n): chars = string.ascii_uppercase + string.ascii_lowercase + string.digits selector = random.SystemRandom() - return ''.join( selector.choice( chars ) for _ in range( n ) ) + return ''.join(selector.choice(chars) for _ in range(n)) #============================================================================== -class SplineMapping: +class SplineMapping(BasicCallableMapping): - def __init__( self, *components, name=None ): + def __init__(self, *components, name=None): # Sanity checks - assert len( components ) >= 1 - assert all( isinstance( c, FemField ) for c in components ) - assert all( isinstance( c.space, TensorFemSpace ) for c in components ) - assert all( c.space is components[0].space for c in components ) + assert len(components) >= 1 + assert all(isinstance(c, FemField) for c in components) + assert all(isinstance(c.space, TensorFemSpace) for c in components) + assert all(c.space is components[0].space for c in components) # Store spline space and one field for each coordinate X_i self._space = components[0].space @@ -45,13 +47,13 @@ def __init__( self, *components, name=None ): # Store number of logical and physical dimensions self._ldim = components[0].space.ldim - self._pdim = len( components ) + self._pdim = len(components) # Create helper object for accessing control points with slicing syntax # as if they were stored in a single multi-dimensional array C with # indices [i1, ..., i_n, d] where (i1, ..., i_n) are indices of logical # coordinates, and d is index of physical component of interest. - self._control_points = SplineMapping.ControlPoints( self ) + self._control_points = SplineMapping.ControlPoints(self) self._name = name @property @@ -65,75 +67,98 @@ def set_name(self, name): # Option [1]: initialize from TensorFemSpace and pre-existing mapping #-------------------------------------------------------------------------- @classmethod - def from_mapping( cls, tensor_space, mapping ): + def from_mapping(cls, tensor_space, mapping): - assert isinstance( tensor_space, TensorFemSpace ) - assert isinstance( mapping, Mapping ) + assert isinstance(tensor_space, TensorFemSpace) + assert isinstance(mapping, BasicCallableMapping) assert tensor_space.ldim == mapping.ldim # Create one separate scalar field for each physical dimension # TODO: use one unique field belonging to VectorFemSpace - fields = [FemField( tensor_space ) for d in range( mapping.pdim )] + fields = [FemField(tensor_space) for d in range(mapping.pdim)] V = tensor_space.vector_space - values = [V.zeros() for d in range( mapping.pdim )] - ranges = [range(s,e+1) for s,e in zip( V.starts, V.ends )] + values = [V.zeros() for d in range(mapping.pdim)] + ranges = [range(s, e+1) for s, e in zip(V.starts, V.ends)] grids = [space.greville for space in tensor_space.spaces] # Evaluate analytical mapping at Greville points (tensor-product grid) # and store vector values in one separate scalar field for each # physical dimension # TODO: use one unique field belonging to VectorFemSpace - callable_mapping = mapping.get_callable_mapping() - for index in product( *ranges ): - x = [grid[i] for grid,i in zip( grids, index )] - u = callable_mapping( *x ) - for d,ud in enumerate( u ): + for index in product(*ranges): + x = [grid[i] for grid, i in zip(grids, index)] + u = mapping(*x) + for d, ud in enumerate(u): values[d][index] = ud # Compute spline coefficients for each coordinate X_i - for pvals, field in zip( values, fields ): - tensor_space.compute_interpolant( pvals, field ) + for pvals, field in zip(values, fields): + tensor_space.compute_interpolant(pvals, field) # Create SplineMapping object - return cls( *fields ) + return cls(*fields) #-------------------------------------------------------------------------- # Option [2]: initialize from TensorFemSpace and spline control points #-------------------------------------------------------------------------- @classmethod - def from_control_points( cls, tensor_space, control_points ): + def from_control_points(cls, tensor_space, control_points): - assert isinstance( tensor_space, TensorFemSpace ) - assert isinstance( control_points, (np.ndarray, h5py.Dataset) ) + assert isinstance(tensor_space, TensorFemSpace) + assert isinstance(control_points, (np.ndarray, h5py.Dataset)) assert control_points.ndim == tensor_space.ldim + 1 - assert control_points.shape[:-1] == tuple( V.nbasis for V in tensor_space.spaces ) + assert control_points.shape[:-1] == tuple(V.nbasis for V in tensor_space.spaces) assert control_points.shape[ -1] >= tensor_space.ldim # Create one separate scalar field for each physical dimension # TODO: use one unique field belonging to VectorFemSpace - fields = [FemField( tensor_space ) for d in range( control_points.shape[-1] )] + fields = [FemField(tensor_space) for d in range(control_points.shape[-1])] # Get spline coefficients for each coordinate X_i starts = tensor_space.vector_space.starts ends = tensor_space.vector_space.ends - idx_to = tuple( slice( s, e+1 ) for s,e in zip( starts, ends ) ) - for i,field in enumerate( fields ): - idx_from = tuple(list(idx_to)+[i]) + idx_to = tuple(slice(s, e+1) for s, e in zip(starts, ends)) + for i,field in enumerate(fields): + idx_from = (*idx_to, i) field.coeffs[idx_to] = control_points[idx_from] field.coeffs.update_ghost_regions() # Create SplineMapping object - return cls( *fields ) + return cls(*fields) #-------------------------------------------------------------------------- # Abstract interface #-------------------------------------------------------------------------- - def __call__( self, *eta): - return [map_Xd( *eta) for map_Xd in self._fields] + def __call__(self, *eta): + return [map_Xd(*eta) for map_Xd in self._fields] + + # ... + def jacobian(self, *eta): + return np.array([map_Xd.gradient(*eta) for map_Xd in self._fields]) + + # ... + def metric(self, *eta): + J = self.jacobian(*eta) + return np.dot(J.T, J) + + # ... + def metric_det(self, *eta): + return np.linalg.det(self.metric(*eta)) + @property + def ldim(self): + return self._ldim + + @property + def pdim(self): + return self._pdim + + #-------------------------------------------------------------------------- + # Fast evaluation on a grid + #-------------------------------------------------------------------------- def build_mesh(self, grid, npts_per_cell=None, overlap=0): """Evaluation of the mapping on the given grid. @@ -163,23 +188,6 @@ def build_mesh(self, grid, npts_per_cell=None, overlap=0): mesh = self.space.eval_fields(grid, *self._fields, npts_per_cell=npts_per_cell, overlap=overlap) return mesh - # ... - def jac_mat( self, *eta): - return np.array( [map_Xd.gradient( *eta ) for map_Xd in self._fields] ) - - # ... - def jac_det(self, *eta): - return np.linalg.det(self.jac_mat(*eta)) - - # ... - def metric( self, *eta): - J = self.jac_mat( *eta ) - return np.dot( J.T, J ) - - # ... - def metric_det( self, *eta): - return np.linalg.det( self.metric( *eta ) ) - # ... def jac_mat_grid(self, grid, npts_per_cell=None, overlap=0): """Evaluates the Jacobian matrix of the mapping at the given location(s) grid. @@ -691,32 +699,26 @@ def jac_det_irregular_tensor_grid(self, grid, overlap=0): return jac_dets - @property - def ldim( self ): - return self._ldim - - @property - def pdim( self ): - return self._pdim - #-------------------------------------------------------------------------- # Other properties/methods #-------------------------------------------------------------------------- + def jacobian_det(self, *eta): + return np.linalg.det(self.jac_mat(*eta)) @property - def space( self ): + def space(self): return self._space @property - def fields( self ): + def fields(self): return self._fields @property - def control_points( self ): + def control_points(self): return self._control_points # TODO: move to 'Geometry' class in 'psydac.cad.geometry' module - def export( self, filename ): + def export(self, filename): """ Export tensor-product spline space and mapping to geometry file in HDF5 format (single-patch only). @@ -781,79 +783,79 @@ class ControlPoints: """ # TODO: should not allow access to ghost regions - def __init__( self, mapping ): - assert isinstance( mapping, SplineMapping ) + def __init__(self, mapping): + assert isinstance(mapping, SplineMapping) self._mapping = mapping # ... @property - def mapping( self ): + def mapping(self): return self._mapping # ... - def __getitem__( self, key ): + def __getitem__(self, key): m = self._mapping if key is Ellipsis: - key = tuple( slice( None ) for i in range( m.ldim+1 ) ) - elif isinstance( key, tuple ): - assert len( key ) == m.ldim+1 + key = tuple(slice(None) for i in range(m.ldim + 1)) + elif isinstance(key, tuple): + assert len(key) == m.ldim + 1 else: - raise ValueError( key ) + raise ValueError(key) pnt_idx = key[:-1] dim_idx = key[-1] - if isinstance( dim_idx, slice ): - dim_idx = range( *dim_idx.indices( m.pdim ) ) - coeffs = np.array( [m.fields[d].coeffs[pnt_idx] for d in dim_idx] ) - coords = np.moveaxis( coeffs, 0, -1 ) + if isinstance(dim_idx, slice): + dim_idx = range(*dim_idx.indices(m.pdim)) + coeffs = np.array([m.fields[d].coeffs[pnt_idx] for d in dim_idx]) + coords = np.moveaxis(coeffs, 0, -1) else: - coords = np.array( m.fields[dim_idx].coeffs[pnt_idx] ) + coords = np.array(m.fields[dim_idx].coeffs[pnt_idx]) return coords #============================================================================== -class NurbsMapping( SplineMapping ): +class NurbsMapping(SplineMapping): - def __init__( self, *components, name=None ): + def __init__(self, *components, name=None): weights = components[-1] components = components[:-1] - SplineMapping.__init__( self, *components, name=name ) + SplineMapping.__init__(self, *components, name=name) - self._weights = NurbsMapping.Weights( self ) + self._weights = NurbsMapping.Weights(self) self._weights_field = weights #-------------------------------------------------------------------------- # Option [2]: initialize from TensorFemSpace and spline control points #-------------------------------------------------------------------------- @classmethod - def from_control_points_weights( cls, tensor_space, control_points, weights ): + def from_control_points_weights(cls, tensor_space, control_points, weights): - assert isinstance( tensor_space, TensorFemSpace ) - assert isinstance( control_points, (np.ndarray, h5py.Dataset) ) - assert isinstance( weights, (np.ndarray, h5py.Dataset) ) + assert isinstance(tensor_space, TensorFemSpace) + assert isinstance(control_points, (np.ndarray, h5py.Dataset)) + assert isinstance(weights, (np.ndarray, h5py.Dataset)) assert control_points.ndim == tensor_space.ldim + 1 - assert control_points.shape[:-1] == tuple( V.nbasis for V in tensor_space.spaces ) + assert control_points.shape[:-1] == tuple(V.nbasis for V in tensor_space.spaces) assert control_points.shape[ -1] >= tensor_space.ldim - assert weights.shape == tuple( V.nbasis for V in tensor_space.spaces ) + assert weights.shape == tuple(V.nbasis for V in tensor_space.spaces) # Create one separate scalar field for each physical dimension # TODO: use one unique field belonging to VectorFemSpace - fields = [FemField( tensor_space ) for d in range( control_points.shape[-1] )] - fields += [FemField( tensor_space )] + fields = [FemField(tensor_space) for d in range(control_points.shape[-1])] + fields += [FemField(tensor_space)] # Get spline coefficients for each coordinate X_i # we store w*x where w is the weight and x is the control point starts = tensor_space.vector_space.starts ends = tensor_space.vector_space.ends - idx_to = tuple( slice( s, e+1 ) for s,e in zip( starts, ends ) ) - for i,field in enumerate( fields[:-1] ): - idx_from = tuple(list(idx_to)+[i]) + idx_to = tuple(slice(s, e+1) for s,e in zip(starts, ends)) + for i, field in enumerate(fields[:-1]): + idx_from = (*idx_to, i) # idw_from = tuple(idx_to) field.coeffs[idx_to] = control_points[idx_from] #* weights[idw_from] @@ -862,17 +864,29 @@ def from_control_points_weights( cls, tensor_space, control_points, weights ): fields[-1].coeffs[idx_to] = weights[idx_from] # Create SplineMapping object - return cls( *fields ) + return cls(*fields) #-------------------------------------------------------------------------- # Abstract interface #-------------------------------------------------------------------------- - def __call__( self, *eta): + def __call__(self, *eta): map_W = self._weights_field - w = map_W( *eta ) - Xd = [map_Xd( *eta , weights=map_W.coeffs) for map_Xd in self._fields] - return np.asarray( Xd ) / w + w = map_W(*eta) + Xd = [map_Xd(*eta , weights=map_W.coeffs) for map_Xd in self._fields] + return np.asarray(Xd) / w + + # ... + def jacobian(self, *eta): + map_W = self._weights_field + w = map_W(*eta) + grad_w = np.array(map_W.gradient(*eta)) + v = np.array([map_Xd(*eta, weights=map_W.coeffs) for map_Xd in self._fields]) + grad_v = np.array([map_Xd.gradient(*eta, weights=map_W.coeffs) for map_Xd in self._fields]) + return grad_v / w - v[:, None] @ grad_w[None, :] / w**2 + #-------------------------------------------------------------------------- + # Fast evaluation on a grid + #-------------------------------------------------------------------------- def build_mesh(self, grid, npts_per_cell=None, overlap=0): """Evaluation of the mapping on the given grid. @@ -898,15 +912,6 @@ def build_mesh(self, grid, npts_per_cell=None, overlap=0): mesh = self.space.eval_fields(grid, *self._fields, npts_per_cell=npts_per_cell, weights=self._weights_field, overlap=overlap) return mesh - # ... - def jac_mat(self, *eta): - map_W = self._weights_field - w = map_W(*eta) - grad_w = np.array(map_W.gradient(*eta)) - v = np.array([map_Xd(*eta, weights=map_W.coeffs) for map_Xd in self._fields]) - grad_v = np.array([map_Xd.gradient(*eta, weights=map_W.coeffs) for map_Xd in self._fields]) - return grad_v / w - v[:, None] @ grad_w[None, :] / w**2 - # ... def jac_mat_regular_tensor_grid(self, grid, overlap=0): """Evaluates the Jacobian matrix on a regular tensor product grid. @@ -1243,15 +1248,9 @@ def jac_det_irregular_tensor_grid(self, grid, overlap=0): return jac_dets - #-------------------------------------------------------------------------- # Other properties/methods #-------------------------------------------------------------------------- - - @property - def control_points( self ): - return self._control_points - @property def weights_field( self ): return self._weights_field From 5776b6f43fa7ee6cd49ac1678f7f747567d51ee2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 7 Oct 2022 10:25:49 +0200 Subject: [PATCH 05/28] Update visual tests for spline mappings --- .../tests/visual_test_discrete_mapping_2d.py | 124 +++++++++++------- ...visual_test_discrete_mapping_3d_surface.py | 93 ++++++------- 2 files changed, 124 insertions(+), 93 deletions(-) diff --git a/psydac/mapping/tests/visual_test_discrete_mapping_2d.py b/psydac/mapping/tests/visual_test_discrete_mapping_2d.py index fd8880009..b2a5f0f70 100644 --- a/psydac/mapping/tests/visual_test_discrete_mapping_2d.py +++ b/psydac/mapping/tests/visual_test_discrete_mapping_2d.py @@ -2,80 +2,106 @@ # # Copyright 2018 Yaman Güçlü -from mpi4py import MPI - #============================================================================== # Driver #============================================================================== -def main( mapping='Target', degree=(2,2), ncells=(2,5), **kwargs ): +def main(*, mapping, degree, ncells, name='F'): + from mpi4py import MPI import numpy as np import matplotlib.pyplot as plt - from psydac.fem.basic import FemField - from psydac.fem.splines import SplineSpace - from psydac.fem.tensor import TensorFemSpace - from psydac.mapping.discrete import SplineMapping - from psydac.mapping.analytical import IdentityMapping - from psydac.mapping.analytical_gallery import Annulus, Target, Czarny, Collela - from psydac.utilities.utils import refine_array_1d + from sympde.topology.analytical_mapping import (IdentityMapping, + TargetMapping, CzarnyMapping, PolarMapping, CollelaMapping2D) + + from psydac.mapping.discrete import SplineMapping + from psydac.fem.basic import FemField + from psydac.fem.splines import SplineSpace + from psydac.fem.tensor import TensorFemSpace + from psydac.ddm.cart import DomainDecomposition + from psydac.utilities.utils import refine_array_1d # Input parameters if mapping == 'Identity': - map_analytic = IdentityMapping( ndim=2 ) + map_symbolic = IdentityMapping(name, dim=2) lims1 = (0, 1) lims2 = (0, 1) - period1 = False - period2 = False + periodic1 = False + periodic2 = False elif mapping == 'Collela': - map_analytic = Collela( **kwargs ) + map_symbolic = CollelaMapping2D(name, eps=0.05, k1=1, k2=1) lims1 = (0, 1) lims2 = (0, 1) - period1 = False - period2 = False + periodic1 = False + periodic2 = False - else: - map_analytic = locals()[mapping]( **kwargs ) + elif mapping == 'Polar': + map_symbolic = PolarMapping(name, c1=0.1, c2=0.1, rmin=0.1, rmax=1) + lims1 = (0, 1) + lims2 = (0, 2*np.pi) + periodic1 = False + periodic2 = True + + elif mapping == 'Target': + map_symbolic = TargetMapping(name, c1=0.0, c2=0.0, k=0.3, D=0.1) + lims1 = (0, 1) + lims2 = (0, 2*np.pi) + periodic1 = False + periodic2 = True + + elif mapping == 'Czarny': + map_symbolic = CzarnyMapping(name, eps=0.3, c2=0, b=2) lims1 = (0, 1) lims2 = (0, 2*np.pi) - period1 = False - period2 = True + periodic1 = False + periodic2 = True + + else: + raise ValueError(f'Test case does not support mapping "{mapping}"') + + map_analytic = map_symbolic.get_callable_mapping() p1 , p2 = degree nc1, nc2 = ncells - # Create tensor spline space, distributed - V1 = SplineSpace( grid=np.linspace( *lims1, num=nc1+1 ), degree=p1, periodic=period1 ) - V2 = SplineSpace( grid=np.linspace( *lims2, num=nc2+1 ), degree=p2, periodic=period2 ) - tensor_space = TensorFemSpace( V1, V2, comm=MPI.COMM_WORLD ) + # Create 1D spline spaces along x1 and x2 + V1 = SplineSpace( grid=np.linspace(*lims1, num=nc1+1), degree=p1, periodic=periodic1 ) + V2 = SplineSpace( grid=np.linspace(*lims2, num=nc2+1), degree=p2, periodic=periodic2 ) + + # Create tensor-product 2D spline space, distributed + domain_decomposition = DomainDecomposition([nc1, nc2], [periodic1, periodic2], comm=MPI.COMM_WORLD) + tensor_space = TensorFemSpace(domain_decomposition, V1, V2) # Create spline mapping by interpolating analytical one - map_discrete = SplineMapping.from_mapping( tensor_space, map_analytic ) + map_discrete = SplineMapping.from_mapping(tensor_space, map_analytic) # Display analytical and spline mapping on refined grid, then plot error N = 20 - r = refine_array_1d( V1.breaks, N ) - t = refine_array_1d( V2.breaks, N ) + r = refine_array_1d(V1.breaks, N) + t = refine_array_1d(V2.breaks, N) + +# shape = len(r), len(t) +# xa, ya = [np.array(v).reshape(shape) for v in zip(*[map_analytic(ri, tj) for ri in r for tj in t])] +# xd, yd = [np.array(v).reshape(shape) for v in zip(*[map_discrete(ri, tj) for ri in r for tj in t])] - shape = len(r), len(t) - xa, ya = [np.array( v ).reshape( shape ) for v in zip( *[map_analytic( [ri,tj] ) for ri in r for tj in t] )] - xd, yd = [np.array( v ).reshape( shape ) for v in zip( *[map_discrete( [ri,tj] ) for ri in r for tj in t] )] + xa, ya = map_analytic(*np.meshgrid(r, t, indexing='ij', sparse=True)) + xd, yd = map_discrete.build_mesh([r, t]) - figtitle = 'Mapping: {:s}, Degree: [{:d},{:d}], Ncells: [{:d},{:d}]'.format( - map_analytic.__class__.__name__, p1, p2, nc1, nc2 ) + figtitle = 'Mapping: {:s}, Degree: [{:d},{:d}], Ncells: [{:d},{:d}]'.format( + map_symbolic.__class__.__name__, p1, p2, nc1, nc2) - fig, axes = plt.subplots( 1, 2, figsize=[12,5], num=figtitle ) + fig, axes = plt.subplots(1, 2, figsize=[12,5], num=figtitle) for ax in axes: ax.set_aspect('equal') axes[0].set_title( 'Analytical (black) vs. spline (red)' ) - axes[0].plot( xa[::N,:].T, ya[::N,:].T, 'k' ); axes[0].plot( xa[:,::N] , ya[:,::N] , 'k' ) - axes[0].plot( xd[::N,:].T, yd[::N,:].T, 'r' ); axes[0].plot( xd[:,::N] , yd[:,::N] , 'r' ) + axes[0].plot(xa[::N,:].T, ya[::N,:].T, 'k'); axes[0].plot(xa[:,::N], ya[:,::N], 'k') + axes[0].plot(xd[::N,:].T, yd[::N,:].T, 'r'); axes[0].plot(xd[:,::N], yd[:,::N], 'r') - axes[1].set_title( 'Error (distance)' ) - im = axes[1].contourf( xa, ya, np.sqrt( (xa-xd)**2+(ya-yd)**2) ) - fig.colorbar( im ) + axes[1].set_title('Error (distance)') + im = axes[1].contourf(xa, ya, np.sqrt((xa-xd)**2+(ya-yd)**2)) + fig.colorbar(im) fig.tight_layout() fig.show() @@ -95,28 +121,28 @@ def parse_input_arguments(): ' spline and plot results.' ) - parser.add_argument( '-m', + parser.add_argument('-m', type = str, - choices =('Identity', 'Annulus', 'Target', 'Czarny', 'Collela'), - default = 'Annulus', + choices =('Identity', 'Polar', 'Target', 'Czarny', 'Collela'), + default = 'Polar', dest = 'mapping', help = 'Analytical mapping' ) - parser.add_argument( '-d', + parser.add_argument('-d', type = int, nargs = 2, - default = [2,2], - metavar = ('P1','P2'), + default = [2, 2], + metavar = ('P1', 'P2'), dest = 'degree', help = 'Spline degree along each dimension' ) - parser.add_argument( '-n', + parser.add_argument('-n', type = int, nargs = 2, - default = [2,5], - metavar = ('N1','N2'), + default = [2, 5], + metavar = ('N1', 'N2'), dest = 'ncells', help = 'Number of grid cells (elements) along each dimension' ) @@ -131,10 +157,10 @@ def parse_input_arguments(): if __name__ == '__main__': args = parse_input_arguments() - namespace = main( **vars( args ) ) + namespace = main(**vars(args)) import __main__ - if hasattr( __main__, '__file__' ): + if hasattr(__main__, '__file__'): try: __IPYTHON__ except NameError: diff --git a/psydac/mapping/tests/visual_test_discrete_mapping_3d_surface.py b/psydac/mapping/tests/visual_test_discrete_mapping_3d_surface.py index 7ed4fdba5..814f076fa 100644 --- a/psydac/mapping/tests/visual_test_discrete_mapping_3d_surface.py +++ b/psydac/mapping/tests/visual_test_discrete_mapping_3d_surface.py @@ -2,82 +2,87 @@ # # Copyright 2018 Yaman Güçlü -from mpi4py import MPI - #============================================================================== # Driver #============================================================================== -def main( mapping='TwistedTarget', degree=(2,2), ncells=(4,5), **kwargs ): +def main(*, mapping, degree, ncells, name='F'): + from mpi4py import MPI import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D - from psydac.fem.basic import FemField - from psydac.fem.splines import SplineSpace - from psydac.fem.tensor import TensorFemSpace - from psydac.mapping.discrete import SplineMapping - from psydac.mapping.analytical_gallery import TwistedTarget, Torus - from psydac.utilities.utils import refine_array_1d + from sympde.topology.analytical_mapping import (TwistedTargetSurfaceMapping, + TorusSurfaceMapping) - # Input parameters - map_analytic = locals()[mapping]( **kwargs ) - p1 , p2 = degree - nc1, nc2 = ncells + from psydac.mapping.discrete import SplineMapping + from psydac.fem.basic import FemField + from psydac.fem.splines import SplineSpace + from psydac.fem.tensor import TensorFemSpace + from psydac.ddm.cart import DomainDecomposition + from psydac.utilities.utils import refine_array_1d - # Limits + # Input parameters # TODO: read from mapping object if mapping == 'TwistedTarget': + map_symbolic = TwistedTargetSurfaceMapping(name, c1=0, c2=0, c3=0, k=0.3, D=0.1) lims1 = (0, 1) lims2 = (0, 2*np.pi) - elif mapping == 'Torus': - lims1 = (0, 2*np.pi) - lims2 = (0, 2*np.pi) - else: - raise ValueError( 'Unknown limits for given mapping' ) - - # Periodicity - # TODO: read from mapping object - if mapping == 'TwistedTarget': periodic1 = False periodic2 = True + elif mapping == 'Torus': + map_symbolic = TorusSurfaceMapping(name, R0=3, a=1) + lims1 = (0, 2*np.pi) + lims2 = (0, 2*np.pi) periodic1 = True periodic2 = True + else: - raise ValueError( 'Unknown periodicity for given mapping' ) + raise ValueError(f'Test case does not support mapping "{mapping}"') - # Create tensor spline space, distributed - V1 = SplineSpace( grid=np.linspace( *lims1, num=nc1+1 ), degree=p1, periodic=periodic1 ) - V2 = SplineSpace( grid=np.linspace( *lims2, num=nc2+1 ), degree=p2, periodic=periodic2 ) - tensor_space = TensorFemSpace( V1, V2, comm=MPI.COMM_WORLD ) + map_analytic = map_symbolic.get_callable_mapping() + + p1 , p2 = degree + nc1, nc2 = ncells + + # Create 1D spline spaces along x1 and x2 + V1 = SplineSpace( grid=np.linspace(*lims1, num=nc1+1), degree=p1, periodic=periodic1 ) + V2 = SplineSpace( grid=np.linspace(*lims2, num=nc2+1), degree=p2, periodic=periodic2 ) + + # Create tensor-product 2D spline space, distributed + domain_decomposition = DomainDecomposition([nc1, nc2], [periodic1, periodic2], comm=MPI.COMM_WORLD) + tensor_space = TensorFemSpace(domain_decomposition, V1, V2) # Create spline mapping by interpolating analytical one - map_discrete = SplineMapping.from_mapping( tensor_space, map_analytic ) + map_discrete = SplineMapping.from_mapping(tensor_space, map_analytic) # Display analytical and spline mapping on refined grid, then plot error N = 20 - r = refine_array_1d( V1.breaks, N ) - t = refine_array_1d( V2.breaks, N ) + r = refine_array_1d(V1.breaks, N) + t = refine_array_1d(V2.breaks, N) + +# shape = len(r), len(t) +# xa, ya, za = [np.array(v).reshape(shape) for v in zip(*[map_analytic([ri,tj]) for ri in r for tj in t])] +# xd, yd, zd = [np.array(v).reshape(shape) for v in zip(*[map_discrete([ri,tj]) for ri in r for tj in t])] - shape = len(r), len(t) - xa, ya, za = [np.array( v ).reshape( shape ) for v in zip( *[map_analytic( [ri,tj] ) for ri in r for tj in t] )] - xd, yd, zd = [np.array( v ).reshape( shape ) for v in zip( *[map_discrete( [ri,tj] ) for ri in r for tj in t] )] + xa, ya, za = map_analytic(*np.meshgrid(r, t, indexing='ij', sparse=True)) + xd, yd, zd = map_discrete.build_mesh([r, t]) - figtitle = 'Mapping: {:s}, Degree: [{:d},{:d}], Ncells: [{:d},{:d}]'.format( - map_analytic.__class__.__name__, p1, p2, nc1, nc2 ) + figtitle = 'Mapping: {:s}, Degree: [{:d},{:d}], Ncells: [{:d},{:d}]'.format( + map_analytic.__class__.__name__, p1, p2, nc1, nc2) - fig, ax = plt.subplots( 1, 1, figsize=[5,5], num=figtitle, subplot_kw={'projection':'3d'} ) + fig, ax = plt.subplots(1, 1, figsize=[5,5], num=figtitle, subplot_kw={'projection':'3d'}) - ax.set_title( 'Analytical (black) vs. spline (red)' ) - ax.plot_surface ( xa, ya, za, color='k', alpha=0.2 ) - ax.plot_wireframe( xa, ya, za, color='k', rstride=N, cstride=N ) - ax.plot_wireframe( xd, yd, zd, color='r', rstride=N, cstride=N ) + ax.set_title('Analytical (black) vs. spline (red)') + ax.plot_surface (xa, ya, za, color='k', alpha=0.2) + ax.plot_wireframe(xa, ya, za, color='k', rstride=N, cstride=N) + ax.plot_wireframe(xd, yd, zd, color='r', rstride=N, cstride=N) - scaling = np.array( [getattr( ax, 'get_{}lim'.format(dim) )() for dim in 'xyz'] ) - ax.auto_scale_xyz( *[[np.min(scaling), np.max(scaling)]]*3 ) + scaling = np.array([getattr(ax, 'get_{}lim'.format(dim))() for dim in 'xyz']) + ax.auto_scale_xyz(*[[np.min(scaling), np.max(scaling)]]*3) - ax.set_aspect( 'equal' ) +# ax.set_aspect('equal') fig.tight_layout() fig.show() From 9d25d217297ec4841abc46e407fad60a0f43e89f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 7 Oct 2022 12:47:17 +0200 Subject: [PATCH 06/28] Update function discrete_mapping in module discrete_gallery --- psydac/mapping/discrete_gallery.py | 147 +++++++++++------------------ 1 file changed, 56 insertions(+), 91 deletions(-) diff --git a/psydac/mapping/discrete_gallery.py b/psydac/mapping/discrete_gallery.py index 6a65f3f9f..67a194532 100644 --- a/psydac/mapping/discrete_gallery.py +++ b/psydac/mapping/discrete_gallery.py @@ -1,16 +1,17 @@ # coding: utf-8 -#!/usr/bin/env python import numpy as np from mpi4py import MPI -from sympde.topology.analytical_mapping import IdentityMapping -from sympde.topology.analytical_mapping import PolarMapping, TargetMapping, CzarnyMapping, CollelaMapping2D -from sympde.topology.mapping import Mapping -from psydac.fem.splines import SplineSpace -from psydac.fem.tensor import TensorFemSpace -from psydac.mapping.discrete import SplineMapping -from psydac.ddm.cart import DomainDecomposition +from sympde.topology.mapping import Mapping +from sympde.topology.analytical_mapping import (IdentityMapping, PolarMapping, + TargetMapping, CzarnyMapping, + CollelaMapping2D) + +from psydac.fem.splines import SplineSpace +from psydac.fem.tensor import TensorFemSpace +from psydac.mapping.discrete import SplineMapping +from psydac.ddm.cart import DomainDecomposition class Collela3D( Mapping ): @@ -28,124 +29,88 @@ def discrete_mapping(mapping, ncells, degree, **kwargs): mapping = mapping.lower() dim = len(ncells) - if not( dim in [2,3] ): + if dim not in [2, 3]: raise NotImplementedError('Only 2D and 3D mappings are available') # ... if dim == 2: # Input parameters if mapping == 'identity': - map_analytic = IdentityMapping( 'M', dim=dim ) - lims1 = (0, 1) - lims2 = (0, 1) - period1 = False - period2 = False + map_symbolic = IdentityMapping('M', dim=dim) + limits = ((0, 1), (0, 1)) + periodic = (False, False) elif mapping == 'collela': - default_params = dict( k1=1.0, k2=1.0, eps=0.1 ) - map_analytic = CollelaMapping2D( 'M', dim=dim, **default_params ) - lims1 = (0, 1) - lims2 = (0, 1) - period1 = False - period2 = False + default_params = dict(k1=1.0, k2=1.0, eps=0.1) + map_symbolic = CollelaMapping2D('M', dim=dim, **default_params) + limits = ((0, 1), (0, 1)) + periodic = (False, False) elif mapping == 'circle': - default_params = dict( rmin=0.0, rmax=1.0, c1=0.0, c2=0.0) - map_analytic = PolarMapping( 'M', dim=dim, **default_params ) - lims1 = (0, 1) - lims2 = (0, 2*np.pi) - period1 = False - period2 = True + default_params = dict(rmin=0.0, rmax=1.0, c1=0.0, c2=0.0) + map_symbolic = PolarMapping('M', dim=dim, **default_params) + limits = ((0, 1), (0, 2*np.pi)) + periodic = (False, True) elif mapping == 'annulus': - default_params = dict( rmin=0.0, rmax=1.0, c1=0.0, c2=0.0) - map_analytic = PolarMapping( 'M', dim=dim, **default_params ) - lims1 = (1, 4) - lims2 = (0, 2*np.pi) - period1 = False - period2 = True + default_params = dict(rmin=0.0, rmax=1.0, c1=0.0, c2=0.0) + map_symbolic = PolarMapping('M', dim=dim, **default_params) + limits = ((1, 4), (0, 2*np.pi)) + periodic = (False, True) elif mapping == 'quarter_annulus': - default_params = dict( rmin=0.0, rmax=1.0, c1=0.0, c2=0.0) - map_analytic = PolarMapping( 'M', dim=dim, **default_params ) - lims1 = (1, 4) - lims2 = (0, np.pi/2) - period1 = False - period2 = False + default_params = dict(rmin=0.0, rmax=1.0, c1=0.0, c2=0.0) + map_symbolic = PolarMapping('M', dim=dim, **default_params) + limits = ((1, 4), (0, np.pi/2)) + periodic = (False, False) elif mapping == 'target': - default_params = dict( c1=0, c2=0, k=0.3, D=0.2 ) - map_analytic = TargetMapping( 'M', dim=dim, **default_params ) - lims1 = (0, 1) - lims2 = (0, 2*np.pi) - period1 = False - period2 = True + default_params = dict(c1=0, c2=0, k=0.3, D=0.2) + map_symbolic = TargetMapping('M', dim=dim, **default_params) + limits = ((0, 1), (0, 2*np.pi)) + periodic = (False, True) elif mapping == 'czarny': - default_params = dict( c2=0, b=1.4, eps=0.3 ) - map_analytic = CzarnyMapping( 'M', dim=dim, **default_params ) - lims1 = (0, 1) - lims2 = (0, 2*np.pi) - period1 = False - period2 = True + default_params = dict(c2=0, b=1.4, eps=0.3) + map_symbolic = CzarnyMapping('M', dim=dim, **default_params) + limits = ((0, 1), (0, 2*np.pi)) + periodic = (False, True) + else: raise ValueError("Required 2D mapping not available") - p1 , p2 = degree - nc1, nc2 = ncells - - # Create the domain decomposition - domain_decomposition = DomainDecomposition(ncells=[nc1,nc2], periods=[period1,period2], comm=comm) - - # Create tensor spline space, distributed - V1 = SplineSpace( grid=np.linspace( *lims1, num=nc1+1 ), degree=p1, periodic=period1 ) - V2 = SplineSpace( grid=np.linspace( *lims2, num=nc2+1 ), degree=p2, periodic=period2 ) - space = TensorFemSpace( domain_decomposition, V1, V2 ) - - # Create spline mapping by interpolating analytical one - map_discrete = SplineMapping.from_mapping( space, map_analytic ) - elif dim == 3: # Input parameters if mapping == 'identity': - map_analytic = IdentityMapping( 'M', dim=dim ) - lims1 = (0, 1) - lims2 = (0, 1) - lims3 = (0, 1) - period1 = False - period2 = False - period3 = False + map_symbolic = IdentityMapping('M', dim=dim) + limits = ((0, 1), (0, 1), (0, 1)) + periodic = ( False, False, False) elif mapping == 'collela': - map_analytic = Collela3D( 'M', dim=dim ) - lims1 = (0, 1) - lims2 = (0, 1) - lims3 = (0, 1) - period1 = False - period2 = False - period3 = False + map_symbolic = Collela3D('M', dim=dim) + limits = ((0, 1), (0, 1), (0, 1)) + periodic = ( False, False, False) else: raise ValueError("Required 3D mapping not available") - p1 , p2 , p3 = degree - nc1, nc2, nc3 = ncells + # ... - # Create the domain decomposition - domain_decomposition = DomainDecomposition(ncells=[nc1,nc2,nc3], periods=[period1,period2,period3], comm=comm) + # Create the domain decomposition + domain_decomposition = DomainDecomposition(ncells=ncells, periods=periodic, comm=comm) - # Create tensor spline space, distributed - V1 = SplineSpace( grid=np.linspace( *lims1, num=nc1+1 ), degree=p1, periodic=period1 ) - V2 = SplineSpace( grid=np.linspace( *lims2, num=nc2+1 ), degree=p2, periodic=period2 ) - V3 = SplineSpace( grid=np.linspace( *lims3, num=nc3+1 ), degree=p3, periodic=period3 ) - space = TensorFemSpace( domain_decomposition, V1, V2, V3 ) + # Create 1D spline spaces, not distributed + spaces_1d = [SplineSpace(grid=np.linspace(*lims, num=nc+1), degree=p, periodic=per) + for lims, nc, p, per in zip(limits, ncells, degree, periodic)] - # Create spline mapping by interpolating analytical one - map_discrete = SplineMapping.from_mapping( space, map_analytic ) - # ... + # Create tensor spline space, distributed + space = TensorFemSpace(domain_decomposition, *spaces_1d) + + # Create spline mapping by interpolating analytical one + map_analytic = map_symbolic.get_callable_mapping() + map_discrete = SplineMapping.from_mapping(space, map_analytic) if return_space: return map_discrete, space - else: return map_discrete From a2c2f21c3fbc11e7b5f0fb3949068bc8f9b8b02e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 7 Oct 2022 12:48:17 +0200 Subject: [PATCH 07/28] Update class DiscreteDerham --- psydac/api/feec.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/psydac/api/feec.py b/psydac/api/feec.py index daa0f1485..0897414aa 100644 --- a/psydac/api/feec.py +++ b/psydac/api/feec.py @@ -20,6 +20,7 @@ def __init__(self, mapping, *spaces): self._dim = dim self._spaces = spaces self._mapping = mapping + self._callable_mapping = mapping.get_callable_mapping() if mapping else None if dim == 1: D0 = Derivative_1D(spaces[0], spaces[1]) @@ -86,6 +87,10 @@ def spaces(self): def mapping(self): return self._mapping + @property + def callable_mapping(self): + return self._callable_mapping + @property def derivatives_as_matrices(self): return tuple(V.diff.matrix for V in self.spaces[:-1]) @@ -104,8 +109,8 @@ def projectors(self, *, kind='global', nquads=None): P0 = Projector_H1(self.V0) P1 = Projector_L2(self.V1, nquads) if self.mapping: - P0_m = lambda f: P0(pull_1d_h1(f, self.mapping)) - P1_m = lambda f: P1(pull_1d_l2(f, self.mapping)) + P0_m = lambda f: P0(pull_1d_h1(f, self.callable_mapping)) + P1_m = lambda f: P1(pull_1d_l2(f, self.callable_mapping)) return P0_m, P1_m return P0, P1 @@ -122,12 +127,12 @@ def projectors(self, *, kind='global', nquads=None): raise TypeError('projector of space type {} is not available'.format(kind)) if self.mapping: - P0_m = lambda f: P0(pull_2d_h1(f, self.mapping)) - P2_m = lambda f: P2(pull_2d_l2(f, self.mapping)) + P0_m = lambda f: P0(pull_2d_h1(f, self.callable_mapping)) + P2_m = lambda f: P2(pull_2d_l2(f, self.callable_mapping)) if kind == 'hcurl': - P1_m = lambda f: P1(pull_2d_hcurl(f, self.mapping)) + P1_m = lambda f: P1(pull_2d_hcurl(f, self.callable_mapping)) elif kind == 'hdiv': - P1_m = lambda f: P1(pull_2d_hdiv(f, self.mapping)) + P1_m = lambda f: P1(pull_2d_hdiv(f, self.callable_mapping)) return P0_m, P1_m, P2_m return P0, P1, P2 @@ -137,9 +142,9 @@ def projectors(self, *, kind='global', nquads=None): P2 = Projector_Hdiv (self.V2, nquads) P3 = Projector_L2 (self.V3, nquads) if self.mapping: - P0_m = lambda f: P0(pull_3d_h1 (f, self.mapping)) - P1_m = lambda f: P1(pull_3d_hcurl(f, self.mapping)) - P2_m = lambda f: P2(pull_3d_hdiv (f, self.mapping)) - P3_m = lambda f: P3(pull_3d_l2 (f, self.mapping)) + P0_m = lambda f: P0(pull_3d_h1 (f, self.callable_mapping)) + P1_m = lambda f: P1(pull_3d_hcurl(f, self.callable_mapping)) + P2_m = lambda f: P2(pull_3d_hdiv (f, self.callable_mapping)) + P3_m = lambda f: P3(pull_3d_l2 (f, self.callable_mapping)) return P0_m, P1_m, P2_m, P3_m return P0, P1, P2, P3 From 8b8679b6d9618bd3e4aa3cb414c00375d291d510 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 7 Oct 2022 12:48:49 +0200 Subject: [PATCH 08/28] Fix broken tests --- psydac/api/tests/test_api_feec_1d.py | 8 +- psydac/api/tests/test_api_feec_2d.py | 18 ++-- psydac/api/tests/test_api_feec_3d.py | 26 ++++- psydac/core/tests/test_kernels.py | 8 +- psydac/mapping/tests/test_discrete_mapping.py | 2 +- psydac/polar/tests/test_c1_projections.py | 101 +++++++++--------- 6 files changed, 90 insertions(+), 73 deletions(-) diff --git a/psydac/api/tests/test_api_feec_1d.py b/psydac/api/tests/test_api_feec_1d.py index 770908209..44aa009dc 100644 --- a/psydac/api/tests/test_api_feec_1d.py +++ b/psydac/api/tests/test_api_feec_1d.py @@ -275,7 +275,7 @@ def discrete_energies(e, b): # ... # Prepare animations E_values = [E(xi) for xi in x1] - B_values = push_1d_l2(lambda x1: np.array([B(xi) for xi in x1]), x1, mapping) + B_values = push_1d_l2(lambda x1: np.array([B(xi) for xi in x1]), x1, F) fig2, ax2 = plt.subplots(1, 2, figsize=(12, 6)) make_plot(ax2[0], t, E_ex(0, x), E_values, x, [xmin, xmax], label='E') @@ -350,7 +350,7 @@ def discrete_energies(e, b): if plot_interval and (i % plot_interval == 0 or i == nsteps-1): E_values = [E(xi) for xi in x1] - B_values = push_1d_l2(lambda x1: np.array([B(xi) for xi in x1]), x1, mapping) + B_values = push_1d_l2(lambda x1: np.array([B(xi) for xi in x1]), x1, F) # Update plot update_plot(ax2[0], t, E_ex(t, x), E_values) @@ -386,7 +386,7 @@ def discrete_energies(e, b): # (does not work in parallel right now) # Error at final time E_values = np.array([E(xi) for xi in x1]) - B_values = push_1d_l2(lambda x1: np.array([B(xi) for xi in x1]), x1, mapping) + B_values = push_1d_l2(lambda x1: np.array([B(xi) for xi in x1]), x1, F) # for now: no allreduce needed here, since the spline evaluation already does that for us error_E = max(abs(E_ex(t, x) - E_values)) @@ -398,7 +398,7 @@ def discrete_energies(e, b): # compute L2 error as well F = mapping.get_callable_mapping() errE = lambda x1: (E(x1) - E_ex(t, *F(x1)))**2 * np.sqrt(F.metric_det(x1)) - errB = lambda x1: (push_1d_l2(B, x1, mapping) - B_ex(t, *F(x1)))**2 * np.sqrt(F.metric_det(x1)) + errB = lambda x1: (push_1d_l2(B, x1, F) - B_ex(t, *F(x1)))**2 * np.sqrt(F.metric_det(x1)) error_l2_E = np.sqrt(derham_h.V1.integral(errE)) error_l2_B = np.sqrt(derham_h.V0.integral(errB)) print('L2 norm of error on E(t,x) at final time: {:.2e}'.format(error_l2_E)) diff --git a/psydac/api/tests/test_api_feec_2d.py b/psydac/api/tests/test_api_feec_2d.py index 3c158cfe2..8165ab086 100644 --- a/psydac/api/tests/test_api_feec_2d.py +++ b/psydac/api/tests/test_api_feec_2d.py @@ -404,9 +404,9 @@ def discrete_energies(e, b): for j, x2j in enumerate(x2[0, :]): Ex_values[i, j], Ey_values[i, j] = \ - push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, mapping) + push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) - Bz_values[i, j] = push_2d_l2(B, x1i, x2j, mapping) + Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) # Electric field, x component fig2 = plot_field_and_error(r'E^x', x, y, Ex_values, Ex_ex(0, x, y), *gridlines) @@ -484,9 +484,9 @@ def discrete_energies(e, b): for j, x2j in enumerate(x2[0, :]): Ex_values[i, j], Ey_values[i, j] = \ - push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, mapping) + push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) - Bz_values[i, j] = push_2d_l2(B, x1i, x2j, mapping) + Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) # ... # Update plot @@ -530,9 +530,9 @@ def discrete_energies(e, b): for j, x2j in enumerate(x2[0, :]): Ex_values[i, j], Ey_values[i, j] = \ - push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, mapping) + push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) - Bz_values[i, j] = push_2d_l2(B, x1i, x2j, mapping) + Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) # ... # Error at final time @@ -546,9 +546,9 @@ def discrete_energies(e, b): # compute L2 error as well F = mapping.get_callable_mapping() - errx = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, mapping)[0] - Ex_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) - erry = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, mapping)[1] - Ey_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) - errz = lambda x1, x2: (push_2d_l2(B, x1, x2, mapping) - Bz_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) + errx = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, F)[0] - Ex_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) + erry = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, F)[1] - Ey_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) + errz = lambda x1, x2: (push_2d_l2(B, x1, x2, F) - Bz_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) error_l2_Ex = np.sqrt(derham_h.V1.spaces[0].integral(errx)) error_l2_Ey = np.sqrt(derham_h.V1.spaces[1].integral(erry)) error_l2_Bz = np.sqrt(derham_h.V0.integral(errz)) diff --git a/psydac/api/tests/test_api_feec_3d.py b/psydac/api/tests/test_api_feec_3d.py index 441fe9d5d..6c9297f76 100644 --- a/psydac/api/tests/test_api_feec_3d.py +++ b/psydac/api/tests/test_api_feec_3d.py @@ -83,6 +83,10 @@ def evaluation_all_times(fields, x, y, z): #================================================================================== def run_maxwell_3d_scipy(logical_domain, mapping, e_ex, b_ex, ncells, degree, periodic, dt, niter): + #------------------------------------------------------------------------------ + # Symbolic objects: SymPDE + #------------------------------------------------------------------------------ + domain = mapping(logical_domain) derham = Derham(domain) @@ -96,8 +100,12 @@ def run_maxwell_3d_scipy(logical_domain, mapping, e_ex, b_ex, ncells, degree, pe a2 = BilinearForm((u2, v2), integral(domain, dot(u2, v2))) a3 = BilinearForm((u3, v3), integral(domain, u3*v3)) - #============================================================================== + # Callable mapping + F = mapping.get_callable_mapping() + + #------------------------------------------------------------------------------ # Discrete objects: Psydac + #------------------------------------------------------------------------------ domain_h = discretize(domain, ncells=ncells, periodic=periodic, comm=MPI.COMM_WORLD) derham_h = discretize(derham, domain_h, degree=degree) @@ -155,7 +163,7 @@ def run_maxwell_3d_scipy(logical_domain, mapping, e_ex, b_ex, ncells, degree, pe b_values_0 = [] for zi in z: - b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping) + b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, F) b_values_0.append(b_value_phys[0]) b_values_0 = np.array(b_values_0) @@ -170,6 +178,10 @@ def run_maxwell_3d_scipy(logical_domain, mapping, e_ex, b_ex, ncells, degree, pe #================================================================================== def run_maxwell_3d_stencil(logical_domain, mapping, e_ex, b_ex, ncells, degree, periodic, dt, niter): + #------------------------------------------------------------------------------ + # Symbolic objects: SymPDE + #------------------------------------------------------------------------------ + domain = mapping(logical_domain) derham = Derham(domain) @@ -183,8 +195,12 @@ def run_maxwell_3d_stencil(logical_domain, mapping, e_ex, b_ex, ncells, degree, a2 = BilinearForm((u2, v2), integral(domain, dot(u2, v2))) a3 = BilinearForm((u3, v3), integral(domain, u3*v3)) - #============================================================================== + # Callable mapping + F = mapping.get_callable_mapping() + + #------------------------------------------------------------------------------ # Discrete objects: Psydac + #------------------------------------------------------------------------------ domain_h = discretize(domain, ncells=ncells, periodic=periodic, comm=MPI.COMM_WORLD) derham_h = discretize(derham, domain_h, degree=degree) @@ -239,7 +255,7 @@ def run_maxwell_3d_stencil(logical_domain, mapping, e_ex, b_ex, ncells, degree, b_values_0 = [] for zi in z: - b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, mapping) + b_value_phys = push_3d_hdiv(bx_value_fun, by_value_fun, bz_value_fun, x, y, zi, F) b_values_0.append(b_value_phys[0]) b_values_0 = np.array(b_values_0) @@ -296,6 +312,7 @@ class CollelaMapping3D(Mapping): error = run_maxwell_3d_scipy(logical_domain, M, e_ex, b_ex, ncells, degree, periodic, dt, niter) assert abs(error - 0.04294761712765949) < 1e-9 +#------------------------------------------------------------------------------ def test_maxwell_3d_2(): class CollelaMapping3D(Mapping): @@ -334,6 +351,7 @@ class CollelaMapping3D(Mapping): error = run_maxwell_3d_stencil(logical_domain, M, e_ex, b_ex, ncells, degree, periodic, dt, niter) assert abs(error - 0.24586986658559362) < 1e-9 + #============================================================================== # CLEAN UP SYMPY NAMESPACE #============================================================================== diff --git a/psydac/core/tests/test_kernels.py b/psydac/core/tests/test_kernels.py index ba8ed3c4f..41859afa5 100644 --- a/psydac/core/tests/test_kernels.py +++ b/psydac/core/tests/test_kernels.py @@ -73,10 +73,10 @@ def test_regular_jacobians(geometry, npts_per_cell): # Direct API if ldim == 2: - jacobian_matrix_direct = np.array([[mapping.jac_mat(e1, e2) for e2 in regular_grid[1]] for e1 in regular_grid[0]]) + jacobian_matrix_direct = np.array([[mapping.jacobian(e1, e2) for e2 in regular_grid[1]] for e1 in regular_grid[0]]) if ldim == 3: - jacobian_matrix_direct = np.array([[[mapping.jac_mat(e1, e2, e3) + jacobian_matrix_direct = np.array([[[mapping.jacobian(e1, e2, e3) for e3 in regular_grid[2]] for e2 in regular_grid[1]] for e1 in regular_grid[0]]) @@ -211,10 +211,10 @@ def test_irregular_jacobians(geometry, npts): # Direct API if ldim == 2: - jacobian_matrix_direct = np.array([[mapping.jac_mat(e1, e2) for e2 in irregular_grid[1]] for e1 in irregular_grid[0]]) + jacobian_matrix_direct = np.array([[mapping.jacobian(e1, e2) for e2 in irregular_grid[1]] for e1 in irregular_grid[0]]) if ldim == 3: - jacobian_matrix_direct = np.array([[[mapping.jac_mat(e1, e2, e3) + jacobian_matrix_direct = np.array([[[mapping.jacobian(e1, e2, e3) for e3 in irregular_grid[2]] for e2 in irregular_grid[1]] for e1 in irregular_grid[0]]) diff --git a/psydac/mapping/tests/test_discrete_mapping.py b/psydac/mapping/tests/test_discrete_mapping.py index c448d9d77..77c2be51c 100644 --- a/psydac/mapping/tests/test_discrete_mapping.py +++ b/psydac/mapping/tests/test_discrete_mapping.py @@ -302,7 +302,7 @@ def test_nurbs_circle(): assert np.allclose((x_p, y_p), (x_i, y_i), atol=ATOL, rtol=RTOL) - J_p = mapping.jac_mat(x1, x2) + J_p = mapping.jacobian(x1, x2) J_i = disk.gradient(u=x1, v=x2) assert np.allclose(J_i[:2], J_p, atol=ATOL, rtol=RTOL) diff --git a/psydac/polar/tests/test_c1_projections.py b/psydac/polar/tests/test_c1_projections.py index 814b968f7..9f7f291fb 100644 --- a/psydac/polar/tests/test_c1_projections.py +++ b/psydac/polar/tests/test_c1_projections.py @@ -1,27 +1,26 @@ # coding: utf-8 -#!/usr/bin/env python import numpy as np import pytest - from mpi4py import MPI + from sympde.topology.analytical_mapping import PolarMapping -from psydac.polar.c1_projections import C1Projector -from psydac.mapping.discrete import SplineMapping -from psydac.linalg.stencil import StencilVector, StencilMatrix -from psydac.fem.splines import SplineSpace -from psydac.fem.tensor import TensorFemSpace -from psydac.ddm.cart import DomainDecomposition +from psydac.polar.c1_projections import C1Projector +from psydac.mapping.discrete import SplineMapping +from psydac.linalg.stencil import StencilVector, StencilMatrix +from psydac.fem.splines import SplineSpace +from psydac.fem.tensor import TensorFemSpace +from psydac.ddm.cart import DomainDecomposition #============================================================================== -@pytest.mark.parametrize( 'degrees', [(2,2),(2,3),(3,2),(3,3)] ) -@pytest.mark.parametrize( 'ncells' , [(9,11),(10,12),(12,14)] ) +@pytest.mark.parametrize('degrees', [(2, 2), (2, 3), (3,2), (3, 3)]) +@pytest.mark.parametrize('ncells' , [(9, 11), (10, 12), (12, 14)]) -def test_c1_projections( degrees, ncells, verbose=False ): +def test_c1_projections(degrees, ncells, verbose=False): if verbose: - np.set_printoptions( precision=2, linewidth=200 ) + np.set_printoptions(precision=2, linewidth=200) #-------------------------------------------- # Setup @@ -32,7 +31,7 @@ def test_c1_projections( degrees, ncells, verbose=False ): lims_2 = (0, 2*np.pi) period_1 = False period_2 = True - map_analytic = PolarMapping( 'M', dim=2, rmin=0, rmax=1, c1=0.0, c2=0.0) + map_analytic = PolarMapping('M', dim=2, rmin=0, rmax=1, c1=0.0, c2=0.0) # Discretization: number of elements and polynomial degree ne1, ne2 = ncells @@ -43,31 +42,31 @@ def test_c1_projections( degrees, ncells, verbose=False ): #-------------------------------------------- # Uniform grid in logical space - grid_1 = np.linspace( *lims_1, num=ne1+1 ) - grid_2 = np.linspace( *lims_2, num=ne2+1 ) + grid_1 = np.linspace(*lims_1, num=ne1+1) + grid_2 = np.linspace(*lims_2, num=ne2+1) # 1D finite element spaces - V1 = SplineSpace( p1, grid=grid_1, periodic=period_1 ) - V2 = SplineSpace( p2, grid=grid_2, periodic=period_2 ) + V1 = SplineSpace(p1, grid=grid_1, periodic=period_1) + V2 = SplineSpace(p2, grid=grid_2, periodic=period_2) domain_decomposition = DomainDecomposition(ncells, periods=[period_1, period_2], comm=MPI.COMM_WORLD) # 2D tensor-product space - V = TensorFemSpace( domain_decomposition, V1, V2 ) + V = TensorFemSpace(domain_decomposition, V1, V2) # Spline mapping - map_discrete = SplineMapping.from_mapping( V, map_analytic ) + map_discrete = SplineMapping.from_mapping(V, map_analytic.get_callable_mapping()) # C1 projector - proj = C1Projector( map_discrete ) + proj = C1Projector(map_discrete) #-------------------------------------------- # Linear algebra objects #-------------------------------------------- # Matrix and vector in tensor-product basis - A = StencilMatrix( V.vector_space, V.vector_space ) - b = StencilVector( V.vector_space ) + A = StencilMatrix(V.vector_space, V.vector_space) + b = StencilVector(V.vector_space) # Set values of matrix A[:, :, 0, 0] = 4 @@ -80,33 +79,33 @@ def test_c1_projections( degrees, ncells, verbose=False ): s1, s2 = V.vector_space.starts e1, e2 = V.vector_space.ends n1, n2 = A.domain.npts - perturbation = 0.1 * np.random.random( (e1-s1+1, e2-s2+1, p1, p2) ) - for i1 in range(s1,e1+1): - for i2 in range(s2,e2+1): - for k1 in range(1,p1): - for k2 in range(1,p2): - j1 = (i1+k1)%n1 - j2 = (i2+k2)%n2 - eps = perturbation[i1-s1,i2-s2,k1,k2] + perturbation = 0.1 * np.random.random((e1-s1+1, e2-s2+1, p1, p2)) + for i1 in range(s1, e1+1): + for i2 in range(s2, e2+1): + for k1 in range(1, p1): + for k2 in range(1, p2): + j1 = (i1+k1) % n1 + j2 = (i2+k2) % n2 + eps = perturbation[i1-s1, i2-s2, k1, k2] A[i1,i2, k1, k2] += eps A[j1,j2,-k1,-k2] += eps A.remove_spurious_entries() if verbose: - print( "A:" ) - print( A.toarray() ) + print("A:") + print(A.toarray()) print() # Set values of vector s1, s2 = b.starts e1, e2 = b.ends - b[s1:e1+1, s2:e2+1] = np.random.random( (e1-s1+1, e2-s2+1) ) + b[s1:e1+1, s2:e2+1] = np.random.random((e1-s1+1, e2-s2+1)) b.update_ghost_regions() if verbose: - print( "b:" ) - print( b.toarray().reshape( b.space.npts ) ) + print("b:") + print(b.toarray().reshape(b.space.npts)) print() #-------------------------------------------- @@ -115,37 +114,37 @@ def test_c1_projections( degrees, ncells, verbose=False ): # Compute A' = E^T A E # Compute b' = E b - Ap = proj.change_matrix_basis( A ) - bp = proj.change_rhs_basis ( b ) + Ap = proj.change_matrix_basis(A) + bp = proj.change_rhs_basis (b) # Compute (E^T A E) b' = A' b' - Ap_bp = Ap.dot( bp ) + Ap_bp = Ap.dot(bp) # Compute E^T (A (E b')) = A' b' - E_bp = proj.convert_to_tensor_basis( bp ) - A_E_bp = A.dot( E_bp ) - Et_A_E_bp = proj.change_rhs_basis( A_E_bp ) + E_bp = proj.convert_to_tensor_basis(bp) + A_E_bp = A.dot(E_bp) + Et_A_E_bp = proj.change_rhs_basis(A_E_bp) if verbose: - print( "(E^T A E) b' :" ) - print( Ap_bp[0].toarray() ) - print( Ap_bp[1].toarray().reshape( Ap_bp[1].space.npts ) ) + print("(E^T A E) b' :") + print(Ap_bp[0].toarray()) + print(Ap_bp[1].toarray().reshape(Ap_bp[1].space.npts)) print() - print( "E^T (A (E b')) :" ) - print( Et_A_E_bp[0].toarray() ) - print( Et_A_E_bp[1].toarray().reshape( Et_A_E_bp[1].space.npts ) ) + print("E^T (A (E b')) :" ) + print(Et_A_E_bp[0].toarray() ) + print(Et_A_E_bp[1].toarray().reshape(Et_A_E_bp[1].space.npts)) print() # Verity that two results are identical kwargs = {'rtol': 1e-10, 'atol': 1e-10} - assert np.allclose( Ap_bp[0].toarray(), Et_A_E_bp[0].toarray(), **kwargs ) - assert np.allclose( Ap_bp[1].toarray(), Et_A_E_bp[1].toarray(), **kwargs ) + assert np.allclose(Ap_bp[0].toarray(), Et_A_E_bp[0].toarray(), **kwargs) + assert np.allclose(Ap_bp[1].toarray(), Et_A_E_bp[1].toarray(), **kwargs) if verbose: - print( "PASSED" ) + print("PASSED") return locals() #============================================================================== if __name__ == "__main__": - namespace = test_c1_projections( degrees=(2,2), ncells=(4,5), verbose=True ) + namespace = test_c1_projections(degrees=(2, 2), ncells=(4, 5), verbose=True) From 1cb0eeabb75e744fd381db1af9a1ebd5498763b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 14 Oct 2022 16:12:21 +0200 Subject: [PATCH 09/28] Add some comments and checks --- psydac/api/feec.py | 4 ++++ psydac/feec/pull_push.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/psydac/api/feec.py b/psydac/api/feec.py index 0897414aa..94f224109 100644 --- a/psydac/api/feec.py +++ b/psydac/api/feec.py @@ -1,3 +1,5 @@ +from sympde.topology.mapping import Mapping + from psydac.api.basic import BasicDiscrete from psydac.feec.derivatives import Derivative_1D, Gradient_2D, Gradient_3D from psydac.feec.derivatives import ScalarCurl_2D, VectorCurl_2D, Curl_3D @@ -16,6 +18,8 @@ class DiscreteDerham(BasicDiscrete): """ def __init__(self, mapping, *spaces): + assert (mapping is None) or isinstance(mapping, Mapping) + dim = len(spaces) - 1 self._dim = dim self._spaces = spaces diff --git a/psydac/feec/pull_push.py b/psydac/feec/pull_push.py index 9e900ce8e..47ff815ed 100644 --- a/psydac/feec/pull_push.py +++ b/psydac/feec/pull_push.py @@ -345,6 +345,7 @@ def push_2d_hcurl(f1, f2, eta1, eta2, F): assert isinstance(F, BasicCallableMapping) assert F.ldim == 2 +# # Assume that f is a list/tuple of callable functions # f1, f2 = f eta = eta1, eta2 @@ -365,6 +366,7 @@ def push_2d_hdiv(f1, f2, eta1, eta2, F): assert isinstance(F, BasicCallableMapping) assert F.ldim == 2 +# # Assume that f is a list/tuple of callable functions # f1, f2 = f eta = eta1, eta2 @@ -409,6 +411,7 @@ def push_3d_hcurl(f1, f2, f3, eta1, eta2, eta3, F): assert isinstance(F, BasicCallableMapping) assert F.ldim == 3 +# # Assume that f is a list/tuple of callable functions # f1, f2, f3 = f eta = eta1, eta2, eta3 @@ -439,6 +442,7 @@ def push_3d_hdiv(f1, f2, f3, eta1, eta2, eta3, F): assert isinstance(F, BasicCallableMapping) assert F.ldim == 3 +# # Assume that f is a list/tuple of callable functions # f1, f2, f3 = f eta = eta1, eta2, eta3 From dd191c1d87828d5502c7ad289fbf9c329563695b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 14 Oct 2022 16:33:11 +0200 Subject: [PATCH 10/28] Attach spline mapping to sympde Mapping object This is done by Geometry.read(), which is called when discretizing a SymPDE domain using a geometry file. To this end we call the method Mapping.set_callable_mapping(F), with F being a spline/NURBS mapping. --- psydac/cad/geometry.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/psydac/cad/geometry.py b/psydac/cad/geometry.py index 198938b60..3c99a2052 100644 --- a/psydac/cad/geometry.py +++ b/psydac/cad/geometry.py @@ -344,6 +344,11 @@ def read( self, filename, comm=None ): h5.close() # ... + # Add spline callable mappings to domain undefined mappings + # NOTE: We assume that interiors and mappings.values() use the same ordering + for patch, F in zip(interiors, mappings.values()): + patch.mapping.set_callable_mapping(F) + # ... self._ldim = ldim self._pdim = pdim From f3bf330e941f85fbbadaff8d0b94abb6d4c24d13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 14 Oct 2022 16:39:13 +0200 Subject: [PATCH 11/28] In discretize_derham get Mapping from Geometry object --- psydac/api/discretization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/psydac/api/discretization.py b/psydac/api/discretization.py index 62b061449..a46f9ec03 100644 --- a/psydac/api/discretization.py +++ b/psydac/api/discretization.py @@ -45,8 +45,8 @@ #============================================================================== def discretize_derham(derham, domain_h, *args, **kwargs): - ldim = derham.shape - mapping = derham.spaces[0].domain.mapping + ldim = derham.shape + mapping = domain_h.domain.mapping # NOTE: assuming single-patch domain! bases = ['B'] + ldim * ['M'] spaces = [discretize_space(V, domain_h, basis=basis, **kwargs) \ From 412543f776e9166bddc6e76997440090714cb23f Mon Sep 17 00:00:00 2001 From: Said Hadjout Date: Sat, 15 Oct 2022 16:35:17 +0200 Subject: [PATCH 12/28] Update test_geometry.py --- psydac/cad/tests/test_geometry.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/psydac/cad/tests/test_geometry.py b/psydac/cad/tests/test_geometry.py index 3c60a9882..7ca102d85 100644 --- a/psydac/cad/tests/test_geometry.py +++ b/psydac/cad/tests/test_geometry.py @@ -4,7 +4,7 @@ import numpy as np import os -from sympde.topology import Domain, Line, Square, Cube +from sympde.topology import Domain, Line, Square, Cube, Mapping from psydac.cad.geometry import Geometry, export_nurbs_to_hdf5, refine_nurbs from psydac.cad.geometry import import_geopdes_to_nurbs @@ -27,7 +27,8 @@ def test_geometry_2d_1(): mapping = discrete_mapping('identity', ncells=ncells, degree=degree) # create a topological domain - domain = Square(name='Omega') + F = Mapping('F', dim=2) + domain = F(Square(name='Omega')) # associate the mapping to the topological domain mappings = {'Omega': mapping} @@ -73,7 +74,8 @@ def test_geometry_2d_2(): mapping = refine( mapping, axis=0, values=[0.3, 0.6, 0.8] ) # create a topological domain - domain = Square(name='Omega') + F = Mapping('F', dim=2) + domain = F(Square(name='Omega')) # associate the mapping to the topological domain mappings = {'Omega': mapping} From 5b83cc65d39c2a9f0829667bb69ec13eff9d929c Mon Sep 17 00:00:00 2001 From: Said Hadjout Date: Sat, 15 Oct 2022 20:58:08 +0200 Subject: [PATCH 13/28] Update geometry.py --- psydac/cad/geometry.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/psydac/cad/geometry.py b/psydac/cad/geometry.py index 3c99a2052..c0c8727cc 100644 --- a/psydac/cad/geometry.py +++ b/psydac/cad/geometry.py @@ -81,10 +81,10 @@ def __init__( self, domain=None, ncells=None, periodic=None, mappings=None, assert isinstance( periodic, dict) # ... check sanity - interior_names = sorted(domain.interior_names) + interior_names = domain.interior_names if domain.logical_domain is None else domain.logical_domain.interior_names mappings_keys = sorted(list(mappings.keys())) - assert( interior_names == mappings_keys ) + assert( sorted(interior_names) == mappings_keys ) # ... if periodic is None: @@ -100,10 +100,10 @@ def __init__( self, domain=None, ncells=None, periodic=None, mappings=None, self._is_parallel = comm is not None if len(domain) == 1: - self._ddm = DomainDecomposition(ncells[domain.name], periodic[domain.name], comm=comm) + self._ddm = DomainDecomposition(ncells[interior_names[0]], periodic[interior_names[0]], comm=comm) else: - ncells = [ncells[itr.name] for itr in domain.interior] - periodic = [periodic[itr.name] for itr in domain.interior] + ncells = [ncells[itr] for itr in interior_names] + periodic = [periodic[itr] for itr in interior_names] self._ddm = MultiPatchDomainDecomposition(ncells, periodic, comm=comm) @@ -123,7 +123,8 @@ def from_discrete_mapping( cls, mapping, comm=None ): raise NotImplementedError('') if mapping.ldim == 2: - domain = Square(name='Omega') + M = Mapping('mapping_0',dim=2) + domain = M(Square(name='Omega')) mappings = {'Omega': mapping} ncells = {'Omega':mapping.space.domain_decomposition.ncells} periodic = {'Omega':mapping.space.domain_decomposition.periods} @@ -131,7 +132,8 @@ def from_discrete_mapping( cls, mapping, comm=None ): return Geometry(domain=domain, ncells=ncells, periodic=periodic, mappings=mappings, comm=comm) elif mapping.ldim == 3: - domain = Cube(name='Omega') + M = Mapping('mapping_0',dim=3) + domain = M(Cube(name='Omega')) mappings = {'Omega': mapping} ncells = {'Omega':mapping.space.domain_decomposition.ncells} periodic = {'Omega':mapping.space.domain_decomposition.periods} From 29b91b6e6c551fb30a843bcdd0b7b29900fdbd9d Mon Sep 17 00:00:00 2001 From: saidctb Date: Sat, 15 Oct 2022 22:36:52 +0200 Subject: [PATCH 14/28] use mapped domain name --- psydac/cad/geometry.py | 14 +++++++------- psydac/cad/tests/test_geometry.py | 10 +++++----- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/psydac/cad/geometry.py b/psydac/cad/geometry.py index c0c8727cc..dd4249118 100644 --- a/psydac/cad/geometry.py +++ b/psydac/cad/geometry.py @@ -81,7 +81,7 @@ def __init__( self, domain=None, ncells=None, periodic=None, mappings=None, assert isinstance( periodic, dict) # ... check sanity - interior_names = domain.interior_names if domain.logical_domain is None else domain.logical_domain.interior_names + interior_names = domain.interior_names mappings_keys = sorted(list(mappings.keys())) assert( sorted(interior_names) == mappings_keys ) @@ -125,18 +125,18 @@ def from_discrete_mapping( cls, mapping, comm=None ): if mapping.ldim == 2: M = Mapping('mapping_0',dim=2) domain = M(Square(name='Omega')) - mappings = {'Omega': mapping} - ncells = {'Omega':mapping.space.domain_decomposition.ncells} - periodic = {'Omega':mapping.space.domain_decomposition.periods} + mappings = {domain.name: mapping} + ncells = {domain.name:mapping.space.domain_decomposition.ncells} + periodic = {domain.name:mapping.space.domain_decomposition.periods} return Geometry(domain=domain, ncells=ncells, periodic=periodic, mappings=mappings, comm=comm) elif mapping.ldim == 3: M = Mapping('mapping_0',dim=3) domain = M(Cube(name='Omega')) - mappings = {'Omega': mapping} - ncells = {'Omega':mapping.space.domain_decomposition.ncells} - periodic = {'Omega':mapping.space.domain_decomposition.periods} + mappings = {domain.name: mapping} + ncells = {domain.name:mapping.space.domain_decomposition.ncells} + periodic = {domain.name:mapping.space.domain_decomposition.periods} return Geometry(domain=domain, ncells=ncells, periodic=periodic, mappings=mappings, comm=comm) diff --git a/psydac/cad/tests/test_geometry.py b/psydac/cad/tests/test_geometry.py index 7ca102d85..ec9a3c0d2 100644 --- a/psydac/cad/tests/test_geometry.py +++ b/psydac/cad/tests/test_geometry.py @@ -31,10 +31,10 @@ def test_geometry_2d_1(): domain = F(Square(name='Omega')) # associate the mapping to the topological domain - mappings = {'Omega': mapping} + mappings = {domain.name: mapping} # Define ncells as a dict - ncells = {'Omega':ncells} + ncells = {domain.name:ncells} # create a geometry from a topological domain and the dict of mappings geo = Geometry(domain=domain, ncells=ncells, mappings=mappings) @@ -78,12 +78,12 @@ def test_geometry_2d_2(): domain = F(Square(name='Omega')) # associate the mapping to the topological domain - mappings = {'Omega': mapping} + mappings = {domain.name: mapping} # Define ncells as a dict - ncells = {'Omega':[len(space.breaks)-1 for space in mapping.space.spaces]} + ncells = {domain.name:[len(space.breaks)-1 for space in mapping.space.spaces]} - periodic = {'Omega':[space.periodic for space in mapping.space.spaces]} + periodic = {domain.name:[space.periodic for space in mapping.space.spaces]} # create a geometry from a topological domain and the dict of mappings geo = Geometry(domain=domain, ncells=ncells, periodic=periodic, mappings=mappings) From c51a46535e5ceaa89e7039ff502ee42a10985ac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Mon, 17 Oct 2022 08:41:24 +0200 Subject: [PATCH 15/28] Add draft of FEEC test with spline mapping --- .../temp-test_api_feec_2d_spline-mapping.py | 830 ++++++++++++++++++ 1 file changed, 830 insertions(+) create mode 100644 psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py diff --git a/psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py b/psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py new file mode 100644 index 000000000..3b8bc364f --- /dev/null +++ b/psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py @@ -0,0 +1,830 @@ +# coding: utf-8 +# Copyright 2021 Yaman Güçlü + +import pytest + +#============================================================================== +# TIME STEPPING METHOD +#============================================================================== +def step_faraday_2d(dt, e, b, M1, M2, D1, D1_T, **kwargs): + """ + Exactly integrate the semi-discrete Faraday equation over one time-step: + + b_new = b - ∆t D1 e + + """ + b -= dt * D1.dot(e) + # e += 0 + +def step_ampere_2d(dt, e, b, M1, M2, D1, D1_T, *, pc=None, tol=1e-7, verbose=False): + """ + Exactly integrate the semi-discrete Amperè equation over one time-step: + + e_new = e - ∆t (M1^{-1} D1^T M2) b + + """ + options = dict(tol=tol, verbose=verbose) + if pc: + from psydac.linalg.iterative_solvers import pcg as isolve + options['pc'] = pc + else: + from psydac.linalg.iterative_solvers import cg as isolve + + # b += 0 + e += dt * isolve(M1, D1_T.dot(M2.dot(b)), **options)[0] + +#============================================================================== +# ANALYTICAL SOLUTION +#============================================================================== +class CavitySolution: + """ + Time-harmonic solution of Maxwell's equations in a rectangular cavity with + perfectly conducting walls. This is a "transverse electric" solution, with + E = (Ex, Ey) and B = Bz. Domain is [0, a] x [0, b]. + + Parameters + ---------- + a : float + Size of cavity along x direction. + + b : float + Size of cavity along y direction. + + c : float + Speed of light in arbitrary units. + + nx : int + Number of half wavelengths along x direction. + + ny : int + Number of half wavelengths along y direction. + + """ + def __init__(self, *, a, b, c, nx, ny): + + from sympy import symbols + from sympy import lambdify + + sym_params, sym_fields, sym_energy = self.symbolic() + + params = {'a': a, 'b': b, 'c': c, 'nx': nx, 'ny': ny} + repl = [(sym_params[k], params[k]) for k in sym_params.keys()] + args = symbols('t, x, y') + + # Callable functions + fields = {k: lambdify(args , v.subs(repl), 'numpy') for k, v in sym_fields.items()} + energy = {k: lambdify(args[0], v.subs(repl), 'numpy') for k, v in sym_energy.items()} + + # Store private attributes + self._sym_params = sym_params + self._sym_fields = sym_fields + self._sym_energy = sym_energy + + self._params = params + self._fields = fields + self._energy = energy + + #-------------------------------------------------------------------------- + @staticmethod + def symbolic(): + + from sympy import symbols + from sympy import cos, sin, pi, sqrt + from sympy.integrals import integrate + + t, x, y = symbols('t x y', real=True) + a, b, c = symbols('a b c', positive=True) + nx, ny = symbols('nx ny', positive=True, integer=True) + + kx = pi * nx / a + ky = pi * ny / b + omega = c * sqrt(kx**2 + ky**2) + + # Exact solutions for electric and magnetic field + Ex = cos(kx * x) * sin(ky * y) * cos(omega * t) + Ey = -sin(kx * x) * cos(ky * y) * cos(omega * t) + Bz = cos(kx * x) * cos(ky * y) * sin(omega * t) * (kx + ky) / omega + + # Electric and magnetic energy in domain + We = integrate(integrate((Ex**2 + Ey**2)/ 2, (x, 0, a)), (y, 0, b)).simplify() + Wb = integrate(integrate( Bz**2 / 2, (x, 0, a)), (y, 0, b)).simplify() + + params = {'a': a, 'b': b, 'c': c, 'nx': nx, 'ny': ny} + fields = {'Ex': Ex, 'Ey': Ey, 'Bz': Bz} + energy = {'We': We, 'Wb': Wb} + + return params, fields, energy + + #-------------------------------------------------------------------------- + @property + def params(self): + return self._params + + @property + def fields(self): + return self._fields + + @property + def energy(self): + return self._energy + + @property + def derived_params(self): + from numpy import pi, sqrt + kx = pi * self.params['nx'] / self.params['a'] + ky = pi * self.params['ny'] / self.params['b'] + omega = self.params['c'] * sqrt(kx**2 + ky**2) + return {'kx': kx, 'ky' : ky, 'omega': omega} + + @property + def sym_params(self): + return self._sym_params + + @property + def sym_fields(self): + return self._sym_fields + + @property + def sym_energy(self): + return self._sym_energy + +#============================================================================== +# VISUALIZATION +#============================================================================== + +def add_colorbar(im, ax, **kwargs): + from mpl_toolkits.axes_grid1 import make_axes_locatable + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=0.2, pad=0.3) + cbar = ax.get_figure().colorbar(im, cax=cax, **kwargs) + return cbar + +def plot_field_and_error(name, x, y, field_h, field_ex, *gridlines): + import matplotlib.pyplot as plt + fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15, 6)) + im0 = ax0.contourf(x, y, field_h) + im1 = ax1.contourf(x, y, field_ex - field_h) + ax0.set_title(r'${0}_h$'.format(name)) + ax1.set_title(r'${0} - {0}_h$'.format(name)) + for ax in (ax0, ax1): + ax.plot(*gridlines[0], color='k') + ax.plot(*gridlines[1], color='k') + ax.set_xlabel('x', fontsize=14) + ax.set_ylabel('y', fontsize=14, rotation='horizontal') + ax.set_aspect('equal') + add_colorbar(im0, ax0) + add_colorbar(im1, ax1) + fig.suptitle('Time t = {:10.3e}'.format(0)) + fig.tight_layout() + return fig + +def update_plot(fig, t, x, y, field_h, field_ex): + ax0, ax1, cax0, cax1 = fig.axes + ax0.collections.clear(); cax0.clear() + ax1.collections.clear(); cax1.clear() + im0 = ax0.contourf(x, y, field_h) + im1 = ax1.contourf(x, y, field_ex - field_h) + fig.colorbar(im0, cax=cax0) + fig.colorbar(im1, cax=cax1) + fig.suptitle('Time t = {:10.3e}'.format(t)) + fig.canvas.draw() + +#============================================================================== +# SIMULATION +#============================================================================== +def run_maxwell_2d_TE(*, eps, ncells, degree, periodic, Cp, nsteps, tend, + splitting_order, plot_interval, diagnostics_interval, tol, verbose): + + import numpy as np + import matplotlib.pyplot as plt + from mpi4py import MPI + from scipy.integrate import dblquad + + from sympde.topology import Square + from sympde.topology import Mapping +# from sympde.topology import CollelaMapping2D + from sympde.topology import Derham + from sympde.topology import elements_of + from sympde.topology import NormalVector + from sympde.calculus import dot, cross + from sympde.expr import integral + from sympde.expr import BilinearForm + + from psydac.api.discretization import discretize + from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL + from psydac.feec.pull_push import push_2d_hcurl, push_2d_l2 + from psydac.utilities.utils import refine_array_1d + + #-------------------------------------------------------------------------- + # Problem setup + #-------------------------------------------------------------------------- + + # Physical domain is rectangle [0, a] x [0, b] + a = 2.0 + b = 2.0 + + # Speed of light is 1 + c = 1.0 + + # Mode number + (nx, ny) = (2, 2) + + # Exact solution + exact_solution = CavitySolution(a=a, b=b, c=c, nx=nx, ny=ny) + + # Exact fields, as callable functions of (t, x, y) + Ex_ex = exact_solution.fields['Ex'] + Ey_ex = exact_solution.fields['Ey'] + Bz_ex = exact_solution.fields['Bz'] + + #... + + #-------------------------------------------------------------------------- + # Analytical objects: SymPDE + #-------------------------------------------------------------------------- + + # Logical domain is unit square [0, 1] x [0, 1] + logical_domain = Square('Omega') + + # Mapping and physical domain + class CollelaMapping2D(Mapping): + + _ldim = 2 + _pdim = 2 + _expressions = {'x': 'a * (x1 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))', + 'y': 'b * (x2 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))'} + +# mapping = CollelaMapping2D('M', k1=1, k2=1, eps=eps) + mapping = CollelaMapping2D('M', a=a, b=b, eps=eps) +# domain = mapping(logical_domain) + + # ------------- + import os + from sympde.topology.domain import Domain + mesh_dir = 'mesh' + filename = os.path.join(mesh_dir, 'collela_2d.h5') + domain = Domain.from_file(filename) + # ------------- + + # DeRham sequence + derham = Derham(domain, sequence=['h1', 'hcurl', 'l2']) + + # Trial and test functions + u1, v1 = elements_of(derham.V1, names='u1, v1') # electric field E = (Ex, Ey) + u2, v2 = elements_of(derham.V2, names='u2, v2') # magnetic field Bz + + # Bilinear forms that correspond to mass matrices for spaces V1 and V2 + a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1))) + a2 = BilinearForm((u2, v2), integral(domain, u2 * v2)) + + # If needed, use penalization to apply homogeneous Dirichlet BCs + if not periodic: + nn = NormalVector('nn') + a1_bc = BilinearForm((u1, v1), + integral(domain.boundary, 1e30 * cross(u1, nn) * cross(v1, nn))) + + #-------------------------------------------------------------------------- + # Discrete objects: Psydac + #-------------------------------------------------------------------------- + + # Discrete physical domain and discrete DeRham sequence +# domain_h = discretize(domain, ncells=[ncells, ncells], periodic=[periodic, periodic], comm=MPI.COMM_WORLD) + domain_h = discretize(domain, filename=filename, comm=MPI.COMM_WORLD) + derham_h = discretize(derham, domain_h, degree=[degree, degree]) + + # Discrete bilinear forms + a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) + a2_h = discretize(a2, domain_h, (derham_h.V2, derham_h.V2), backend=PSYDAC_BACKEND_GPYCCEL) + + # Mass matrices (StencilMatrix objects) + M1 = a1_h.assemble() + M2 = a2_h.assemble() + + # Differential operators + D0, D1 = derham_h.derivatives_as_matrices + + # Discretize and assemble penalization matrix + if not periodic: + a1_bc_h = discretize(a1_bc, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) + M1_bc = a1_bc_h.assemble() + + # Transpose of derivative matrix + D1_T = D1.T + + # Projectors + P0, P1, P2 = derham_h.projectors(nquads=[degree+2, degree+2]) + + # Logical and physical grids + F = mapping.get_callable_mapping() + grid_x1 = derham_h.V0.breaks[0] + grid_x2 = derham_h.V0.breaks[1] + + grid_x, grid_y = F(*np.meshgrid(grid_x1, grid_x2, indexing='ij')) + + #-------------------------------------------------------------------------- + # Time integration setup + #-------------------------------------------------------------------------- + + t = 0 + + # Initial conditions, discrete fields + E = P1((lambda x, y: Ex_ex(0, x, y), lambda x, y: Ey_ex(0, x, y))) + B = P2(lambda x, y: Bz_ex(0, x, y)) + + # Initial conditions, spline coefficients + e = E.coeffs + b = B.coeffs + + # Time step size + dx_min_1 = np.sqrt(np.diff(grid_x, axis=0)**2 + np.diff(grid_y, axis=0)**2).min() + dx_min_2 = np.sqrt(np.diff(grid_x, axis=1)**2 + np.diff(grid_y, axis=1)**2).min() + + dx_min = min(dx_min_1, dx_min_2) + dt = Cp * dx_min / c + + # If final time is given, compute number of time steps + if tend is not None: + nsteps = int(np.ceil(tend / dt)) + + #-------------------------------------------------------------------------- + # Scalar diagnostics setup + #-------------------------------------------------------------------------- + + # Energy of exact solution + def exact_energies(t): + """ Compute electric & magnetic energies of exact solution. + """ + We = exact_solution.energy['We'](t) + Wb = exact_solution.energy['Wb'](t) + return (We, Wb) + + # Energy of numerical solution + def discrete_energies(e, b): + """ Compute electric & magnetic energies of numerical solution. + """ + We = 0.5 * M1.dot(e).dot(e) + Wb = 0.5 * M2.dot(b).dot(b) + return (We, Wb) + + # Scalar diagnostics: + diagnostics_ex = {'time': [], 'electric_energy': [], 'magnetic_energy': []} + diagnostics_num = {'time': [], 'electric_energy': [], 'magnetic_energy': []} + + #-------------------------------------------------------------------------- + # Visualization and diagnostics setup + #-------------------------------------------------------------------------- + + # Very fine grids for evaluation of solution + N = 5 + x1 = refine_array_1d(grid_x1, N) + x2 = refine_array_1d(grid_x2, N) + + x1, x2 = np.meshgrid(x1, x2, indexing='ij') + x, y = F(x1, x2) + + gridlines_x1 = (x[:, ::N], y[:, ::N] ) + gridlines_x2 = (x[::N, :].T, y[::N, :].T) + gridlines = (gridlines_x1, gridlines_x2) + + Ex_values = np.empty_like(x1) + Ey_values = np.empty_like(x1) + Bz_values = np.empty_like(x1) + + # Prepare plots + if plot_interval: + + # Plot physical grid and mapping's metric determinant + fig1, ax1 = plt.subplots(1, 1, figsize=(8, 6)) + im = ax1.contourf(x, y, np.sqrt(F.metric_det(x1, x2))) + add_colorbar(im, ax1, label=r'Metric determinant $\sqrt{g}$ of mapping $F$') + ax1.plot(*gridlines_x1, color='k') + ax1.plot(*gridlines_x2, color='k') + ax1.set_title('Mapped grid of {} x {} cells'.format(ncells, ncells)) + ax1.set_xlabel('x', fontsize=14) + ax1.set_ylabel('y', fontsize=14) + ax1.set_aspect('equal') + fig1.tight_layout() + fig1.show() + + # ... + # Plot initial conditions + # TODO: improve + for i, x1i in enumerate(x1[:, 0]): + for j, x2j in enumerate(x2[0, :]): + + Ex_values[i, j], Ey_values[i, j] = \ + push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) + + Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) + + # Electric field, x component + fig2 = plot_field_and_error(r'E^x', x, y, Ex_values, Ex_ex(0, x, y), *gridlines) + fig2.show() + + # Electric field, y component + fig3 = plot_field_and_error(r'E^y', x, y, Ey_values, Ey_ex(0, x, y), *gridlines) + fig3.show() + + # Magnetic field, z component + fig4 = plot_field_and_error(r'B^z', x, y, Bz_values, Bz_ex(0, x, y), *gridlines) + fig4.show() + # ... + + input('\nSimulation setup done... press any key to start') + + # Prepare diagnostics + if diagnostics_interval: + + # Exact energy at t=0 + We_ex, Wb_ex = exact_energies(t) + diagnostics_ex['time'].append(t) + diagnostics_ex['electric_energy'].append(We_ex) + diagnostics_ex['magnetic_energy'].append(Wb_ex) + + # Discrete energy at t=0 + We_num, Wb_num = discrete_energies(e, b) + diagnostics_num['time'].append(t) + diagnostics_num['electric_energy'].append(We_num) + diagnostics_num['magnetic_energy'].append(Wb_num) + + print('\nTotal energy in domain:') + print('ts = {:4d}, t = {:8.4f}, exact = {Wt_ex:.13e}, discrete = {Wt_num:.13e}'.format(0, + t, + Wt_ex = We_ex + Wb_ex, + Wt_num = We_num + Wb_num) + ) + else: + print('ts = {:4d}, t = {:8.4f}'.format(0, t)) + + #-------------------------------------------------------------------------- + # Solution + #-------------------------------------------------------------------------- + + # TODO: add option to convert to scipy sparse format + + # ... Arguments for time stepping + kwargs = {'verbose': verbose, 'tol': tol} + + if periodic: + args = (e, b, M1, M2, D1, D1_T) + else: + args = (e, b, M1 + M1_bc, M2, D1, D1_T) + kwargs['pc'] = 'jacobi' + # ... + + # Time loop + for ts in range(1, nsteps+1): + + # TODO: allow for high-order splitting + + # Strang splitting, 2nd order + step_faraday_2d(0.5*dt, *args, **kwargs) + step_ampere_2d ( dt, *args, **kwargs) + step_faraday_2d(0.5*dt, *args, **kwargs) + + t += dt + + # Animation + if plot_interval and (ts % plot_interval == 0 or ts == nsteps): + + # ... + # TODO: improve + for i, x1i in enumerate(x1[:, 0]): + for j, x2j in enumerate(x2[0, :]): + + Ex_values[i, j], Ey_values[i, j] = \ + push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) + + Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) + # ... + + # Update plot + update_plot(fig2, t, x, y, Ex_values, Ex_ex(t, x, y)) + update_plot(fig3, t, x, y, Ey_values, Ey_ex(t, x, y)) + update_plot(fig4, t, x, y, Bz_values, Bz_ex(t, x, y)) + plt.pause(0.1) + + # Scalar diagnostics + if diagnostics_interval and ts % diagnostics_interval == 0: + + # Update exact diagnostics + We_ex, Wb_ex = exact_energies(t) + diagnostics_ex['time'].append(t) + diagnostics_ex['electric_energy'].append(We_ex) + diagnostics_ex['magnetic_energy'].append(Wb_ex) + + # Update numerical diagnostics + We_num, Wb_num = discrete_energies(e, b) + diagnostics_num['time'].append(t) + diagnostics_num['electric_energy'].append(We_num) + diagnostics_num['magnetic_energy'].append(Wb_num) + + # Print total energy to terminal + print('ts = {:4d}, t = {:8.4f}, exact = {Wt_ex:.13e}, discrete = {Wt_num:.13e}'.format(ts, + t, + Wt_ex = We_ex + Wb_ex, + Wt_num = We_num + Wb_num) + ) + else: + print('ts = {:4d}, t = {:8.4f}'.format(ts, t)) + + #-------------------------------------------------------------------------- + # Post-processing + #-------------------------------------------------------------------------- + if MPI.COMM_WORLD.size == 1: + # (currently not available in parallel) + # ... + # TODO: improve + for i, x1i in enumerate(x1[:, 0]): + for j, x2j in enumerate(x2[0, :]): + + Ex_values[i, j], Ey_values[i, j] = \ + push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) + + Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) + # ... + + # Error at final time + error_Ex = abs(Ex_ex(t, x, y) - Ex_values).max() + error_Ey = abs(Ey_ex(t, x, y) - Ey_values).max() + error_Bz = abs(Bz_ex(t, x, y) - Bz_values).max() + print() + print('Max-norm of error on Ex(t,x) at final time: {:.2e}'.format(error_Ex)) + print('Max-norm of error on Ey(t,x) at final time: {:.2e}'.format(error_Ey)) + print('Max-norm of error on Bz(t,x) at final time: {:.2e}'.format(error_Bz)) + + # compute L2 error as well + F = mapping.get_callable_mapping() + errx = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, F)[0] - Ex_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) + erry = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, F)[1] - Ey_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) + errz = lambda x1, x2: (push_2d_l2(B, x1, x2, F) - Bz_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) + error_l2_Ex = np.sqrt(derham_h.V1.spaces[0].integral(errx)) + error_l2_Ey = np.sqrt(derham_h.V1.spaces[1].integral(erry)) + error_l2_Bz = np.sqrt(derham_h.V0.integral(errz)) + print('L2 norm of error on Ex(t,x,y) at final time: {:.2e}'.format(error_l2_Ex)) + print('L2 norm of error on Ey(t,x,y) at final time: {:.2e}'.format(error_l2_Ey)) + print('L2 norm of error on Bz(t,x,y) at final time: {:.2e}'.format(error_l2_Bz)) + + if diagnostics_interval: + + # Extract exact diagnostics + t_ex = np.asarray(diagnostics_ex['time']) + We_ex = np.asarray(diagnostics_ex['electric_energy']) + Wb_ex = np.asarray(diagnostics_ex['magnetic_energy']) + Wt_ex = We_ex + Wb_ex + + # Extract numerical diagnostics + t_num = np.asarray(diagnostics_num['time']) + We_num = np.asarray(diagnostics_num['electric_energy']) + Wb_num = np.asarray(diagnostics_num['magnetic_energy']) + Wt_num = We_num + Wb_num + + # Energy plots + fig3, (ax31, ax32, ax33) = plt.subplots(3, 1, figsize=(12, 10)) + # + ax31.set_title('Energy of exact solution') + ax31.plot(t_ex, We_ex, label='electric') + ax31.plot(t_ex, Wb_ex, label='magnetic') + ax31.plot(t_ex, Wt_ex, label='total' ) + ax31.legend() + ax31.set_xlabel('t') + ax31.set_ylabel('W', rotation='horizontal') + ax31.grid() + # + ax32.set_title('Energy of numerical solution') + ax32.plot(t_num, We_num, label='electric') + ax32.plot(t_num, Wb_num, label='magnetic') + ax32.plot(t_num, Wt_num, label='total' ) + ax32.legend() + ax32.set_xlabel('t') + ax32.set_ylabel('W', rotation='horizontal') + ax32.grid() + # + ax33.set_title('Relative error in total energy') + ax33.plot(t_ex , (Wt_ex - Wt_ex) / Wt_ex[0], '--', label='exact') + ax33.plot(t_num, (Wt_num - Wt_ex) / Wt_ex[0], '-' , label='numerical') + ax33.legend() + ax33.set_xlabel('t') + ax33.set_ylabel('(W - W_ex) / W_ex(t=0)') + ax33.grid() + # + fig3.tight_layout() + fig3.show() + + # Return whole namespace as dictionary + return locals() + +#============================================================================== +# UNIT TESTS +#============================================================================== + +def test_maxwell_2d_periodic(): + + namespace = run_maxwell_2d_TE( + eps = 0.5, + ncells = 12, + degree = 3, + periodic = True, + Cp = 0.5, + nsteps = 1, + tend = None, + splitting_order = 2, + plot_interval = 0, + diagnostics_interval = 0, + tol = 1e-6, + verbose = False + ) + + TOL = 1e-6 + ref = dict(error_Ex = 6.870389e-03, + error_Ey = 6.870389e-03, + error_Bz = 4.443822e-03) + + assert abs(namespace['error_Ex'] - ref['error_Ex']) / ref['error_Ex'] <= TOL + assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL + assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL + + +def test_maxwell_2d_dirichlet(): + + namespace = run_maxwell_2d_TE( + eps = 0.5, + ncells = 10, + degree = 5, + periodic = False, + Cp = 0.5, + nsteps = 1, + tend = None, + splitting_order = 2, + plot_interval = 0, + diagnostics_interval = 0, + tol = 1e-6, + verbose = False + ) + + TOL = 1e-6 + ref = dict(error_Ex = 3.597840e-03, + error_Ey = 3.597840e-03, + error_Bz = 4.366314e-03) + + assert abs(namespace['error_Ex'] - ref['error_Ex']) / ref['error_Ex'] <= TOL + assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL + assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL + +@pytest.mark.parallel +def test_maxwell_2d_periodic_par(): + + namespace = run_maxwell_2d_TE( + eps = 0.5, + ncells = 12, + degree = 3, + periodic = True, + Cp = 0.5, + nsteps = 1, + tend = None, + splitting_order = 2, + plot_interval = 0, + diagnostics_interval = 0, + tol = 1e-6, + verbose = False + ) + + TOL = 1e-6 + ref = dict(error_l2_Ex = 4.2115063593622278e-03, + error_l2_Ey = 4.2115065915750306e-03, + error_l2_Bz = 3.6252141126597646e-03) + + assert abs(namespace['error_l2_Ex'] - ref['error_l2_Ex']) / ref['error_l2_Ex'] <= TOL + assert abs(namespace['error_l2_Ey'] - ref['error_l2_Ey']) / ref['error_l2_Ey'] <= TOL + assert abs(namespace['error_l2_Bz'] - ref['error_l2_Bz']) / ref['error_l2_Bz'] <= TOL + +@pytest.mark.parallel +def test_maxwell_2d_dirichlet_par(): + + namespace = run_maxwell_2d_TE( + eps = 0.5, + ncells = 10, + degree = 5, + periodic = False, + Cp = 0.5, + nsteps = 1, + tend = None, + splitting_order = 2, + plot_interval = 0, + diagnostics_interval = 0, + tol = 1e-6, + verbose = False + ) + + TOL = 1e-6 + ref = dict(error_l2_Ex = 1.3223335792411782e-03, + error_l2_Ey = 1.3223335792411910e-03, + error_l2_Bz = 4.0492562719804193e-03) + + assert abs(namespace['error_l2_Ex'] - ref['error_l2_Ex']) / ref['error_l2_Ex'] <= TOL + assert abs(namespace['error_l2_Ey'] - ref['error_l2_Ey']) / ref['error_l2_Ey'] <= TOL + assert abs(namespace['error_l2_Bz'] - ref['error_l2_Bz']) / ref['error_l2_Bz'] <= TOL + +#============================================================================== +# SCRIPT CAPABILITIES +#============================================================================== +if __name__ == '__main__': + + import argparse + + parser = argparse.ArgumentParser( + formatter_class = argparse.ArgumentDefaultsHelpFormatter, + description = "Solve 2D Maxwell's equations in rectangular cavity with spline FEEC method." + ) + + parser.add_argument('ncells', + type = int, + help = 'Number of cells in domain' + ) + + parser.add_argument('degree', + type = int, + help = 'Polynomial spline degree' + ) + + parser.add_argument( '-P', '--periodic', + action = 'store_true', + help = 'Use periodic boundary conditions' + ) + + parser.add_argument('-o', '--splitting_order', + type = int, + default = 2, + choices = [2, 4, 6], + help = 'Order of accuracy of operator splitting' + ) + + parser.add_argument( '-e', + type = float, + default = 0.25, + dest = 'eps', + metavar = 'EPS', + help = 'Deformation level (0 <= EPS < 1)' + ) + + parser.add_argument( '-c', + type = float, + default = 0.5, + dest = 'Cp', + metavar = 'Cp', + help = 'Courant parameter on uniform grid' + ) + + # ... + time_opts = parser.add_mutually_exclusive_group() + time_opts.add_argument( '-t', + type = int, + default = 1, + dest = 'nsteps', + metavar = 'NSTEPS', + help = 'Number of time-steps to be taken' + ) + time_opts.add_argument( '-T', + type = float, + dest = 'tend', + metavar = 'END_TIME', + help = 'Run simulation until given final time' + ) + # ... + + parser.add_argument( '-p', + type = int, + default = 4, + metavar = 'I', + dest = 'plot_interval', + help = 'No. of time steps between successive plots of solution, if I=0 no plots are made' + ) + + parser.add_argument( '-d', + type = int, + default = 1, + metavar = 'I', + dest = 'diagnostics_interval', + help = 'No. of time steps between successive calculations of scalar diagnostics, if I=0 no diagnostics are computed' + ) + + parser.add_argument( '--tol', + type = float, + default = 1e-7, + help = 'Tolerance for iterative solver (L2-norm of residual)' + ) + + parser.add_argument( '-v', '--verbose', + action = 'store_true', + help = 'Print convergence information of iterative solver' + ) + + # Read input arguments + args = parser.parse_args() + + # Run simulation + namespace = run_maxwell_2d_TE(**vars(args)) + + # Keep matplotlib windows open + import matplotlib.pyplot as plt + plt.show() From b72fc3bacda1ec2737e358148a25cb78a72117e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Wed, 19 Oct 2022 19:13:31 +0200 Subject: [PATCH 16/28] Add method jacobian_inv to SplineMapping --- psydac/mapping/discrete.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/psydac/mapping/discrete.py b/psydac/mapping/discrete.py index be2531d92..a9d26f7b0 100644 --- a/psydac/mapping/discrete.py +++ b/psydac/mapping/discrete.py @@ -139,6 +139,10 @@ def __call__(self, *eta): def jacobian(self, *eta): return np.array([map_Xd.gradient(*eta) for map_Xd in self._fields]) + # ... + def jacobian_inv(self, *eta): + return np.linalg.inv(self.jacobian(*eta)) + # ... def metric(self, *eta): J = self.jacobian(*eta) From 4258b8400f29dfcfcfba77b69219f2954f496d3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Wed, 19 Oct 2022 19:57:50 +0200 Subject: [PATCH 17/28] Add FEEC unit test for 2D Maxwell with spline mapping --- psydac/api/tests/test_api_feec_2d.py | 200 ++++++++++++++++++++------- 1 file changed, 153 insertions(+), 47 deletions(-) diff --git a/psydac/api/tests/test_api_feec_2d.py b/psydac/api/tests/test_api_feec_2d.py index 8165ab086..6ed25c662 100644 --- a/psydac/api/tests/test_api_feec_2d.py +++ b/psydac/api/tests/test_api_feec_2d.py @@ -192,16 +192,22 @@ def update_plot(fig, t, x, y, field_h, field_ex): #============================================================================== # SIMULATION #============================================================================== -def run_maxwell_2d_TE(*, eps, ncells, degree, periodic, Cp, nsteps, tend, +def run_maxwell_2d_TE(*, use_spline_mapping, + eps, ncells, degree, periodic, + Cp, nsteps, tend, splitting_order, plot_interval, diagnostics_interval, tol, verbose): + import os + import numpy as np import matplotlib.pyplot as plt from mpi4py import MPI from scipy.integrate import dblquad + from sympde.topology import Domain from sympde.topology import Square from sympde.topology import Mapping + from sympde.topology import CallableMapping # from sympde.topology import CollelaMapping2D from sympde.topology import Derham from sympde.topology import elements_of @@ -214,6 +220,7 @@ def run_maxwell_2d_TE(*, eps, ncells, degree, periodic, Cp, nsteps, tend, from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL from psydac.feec.pull_push import push_2d_hcurl, push_2d_l2 from psydac.utilities.utils import refine_array_1d + from psydac.mapping.discrete import SplineMapping, NurbsMapping #-------------------------------------------------------------------------- # Problem setup @@ -243,20 +250,33 @@ def run_maxwell_2d_TE(*, eps, ncells, degree, periodic, Cp, nsteps, tend, # Analytical objects: SymPDE #-------------------------------------------------------------------------- - # Logical domain is unit square [0, 1] x [0, 1] - logical_domain = Square('Omega') + if use_spline_mapping: + + try: + mesh_dir = os.environ['PSYDAC_MESH_DIR'] + except KeyError: + base_dir = os.path.dirname(os.path.realpath(__file__)) + mesh_dir = os.path.join(base_dir, '..', '..', '..', 'mesh') + + filename = os.path.join(mesh_dir, 'collela_2d.h5') + domain = Domain.from_file(filename) + mapping = domain.mapping + + else: + # Logical domain is unit square [0, 1] x [0, 1] + logical_domain = Square('Omega') - # Mapping and physical domain - class CollelaMapping2D(Mapping): + # Mapping and physical domain + class CollelaMapping2D(Mapping): - _ldim = 2 - _pdim = 2 - _expressions = {'x': 'a * (x1 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))', - 'y': 'b * (x2 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))'} + _ldim = 2 + _pdim = 2 + _expressions = {'x': 'a * (x1 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))', + 'y': 'b * (x2 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))'} -# mapping = CollelaMapping2D('M', k1=1, k2=1, eps=eps) - mapping = CollelaMapping2D('M', a=a, b=b, eps=eps) - domain = mapping(logical_domain) + # mapping = CollelaMapping2D('M', k1=1, k2=1, eps=eps) + mapping = CollelaMapping2D('M', a=a, b=b, eps=eps) + domain = mapping(logical_domain) # DeRham sequence derham = Derham(domain, sequence=['h1', 'hcurl', 'l2']) @@ -279,9 +299,30 @@ class CollelaMapping2D(Mapping): # Discrete objects: Psydac #-------------------------------------------------------------------------- - # Discrete physical domain and discrete DeRham sequence - domain_h = discretize(domain, ncells=[ncells, ncells], periodic=[periodic, periodic], comm=MPI.COMM_WORLD) - derham_h = discretize(derham, domain_h, degree=[degree, degree]) + if use_spline_mapping: + domain_h = discretize(domain, filename=filename, comm=MPI.COMM_WORLD) + derham_h = discretize(derham, domain_h) + + periodic_list = mapping.get_callable_mapping().space.periodic + degree_list = mapping.get_callable_mapping().space.degree + + # Determine if periodic boundary conditions should be used + if all(periodic_list): + periodic = True + elif not any(periodic_list): + periodic = False + else: + raise ValueError('Cannot handle periodicity along one direction only') + + # Enforce same degree along x1 and x2 + degree = degree_list[0] + if degree != degree_list[1]: + raise ValueError('Cannot handle different degrees in the two directions') + + else: + # Discrete physical domain and discrete DeRham sequence + domain_h = discretize(domain, ncells=[ncells, ncells], periodic=[periodic, periodic], comm=MPI.COMM_WORLD) + derham_h = discretize(derham, domain_h, degree=[degree, degree]) # Discrete bilinear forms a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) @@ -310,7 +351,13 @@ class CollelaMapping2D(Mapping): grid_x1 = derham_h.V0.breaks[0] grid_x2 = derham_h.V0.breaks[1] - grid_x, grid_y = F(*np.meshgrid(grid_x1, grid_x2, indexing='ij')) + # TODO: fix for spline mapping + if isinstance(F, (SplineMapping, NurbsMapping)): + grid_x, grid_y = F.build_mesh([grid_x1, grid_x2]) + elif isinstance(F, CallableMapping): + grid_x, grid_y = F(*np.meshgrid(grid_x1, grid_x2, indexing='ij')) + else: + raise TypeError(F) #-------------------------------------------------------------------------- # Time integration setup @@ -367,11 +414,15 @@ def discrete_energies(e, b): # Very fine grids for evaluation of solution N = 5 - x1 = refine_array_1d(grid_x1, N) - x2 = refine_array_1d(grid_x2, N) + x1_a = refine_array_1d(grid_x1, N) + x2_a = refine_array_1d(grid_x2, N) - x1, x2 = np.meshgrid(x1, x2, indexing='ij') - x, y = F(x1, x2) + x1, x2 = np.meshgrid(x1_a, x2_a, indexing='ij') + + if use_spline_mapping: + x, y = F.build_mesh([x1_a, x2_a]) + else: + x, y = F(x1, x2) gridlines_x1 = (x[:, ::N], y[:, ::N] ) gridlines_x2 = (x[::N, :].T, y[::N, :].T) @@ -386,7 +437,12 @@ def discrete_energies(e, b): # Plot physical grid and mapping's metric determinant fig1, ax1 = plt.subplots(1, 1, figsize=(8, 6)) - im = ax1.contourf(x, y, np.sqrt(F.metric_det(x1, x2))) + + if use_spline_mapping: + im = ax1.contourf(x, y, F.jac_det_grid([x1_a, x2_a])) + else: + im = ax1.contourf(x, y, np.sqrt(F.metric_det(x1, x2))) + add_colorbar(im, ax1, label=r'Metric determinant $\sqrt{g}$ of mapping $F$') ax1.plot(*gridlines_x1, color='k') ax1.plot(*gridlines_x2, color='k') @@ -662,6 +718,41 @@ def test_maxwell_2d_dirichlet(): assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL + +def test_maxwell_2d_dirichlet_spline_mapping(): + + namespace = run_maxwell_2d_TE( + use_spline_mapping = True, + eps = None, + ncells = None, + degree = None, + periodic = None, + Cp = 0.5, + nsteps = 1, + tend = None, + splitting_order = 2, + plot_interval = 0, + diagnostics_interval = 0, + tol = 1e-6, + verbose = False + ) + + TOL = 1e-6 + ref = dict(error_Ex = 0.11197875072599534, + error_Ey = 0.11197875071916191, + error_Bz = 0.09616100464412525) + + print() + print(namespace['error_Ex']) + print(namespace['error_Ey']) + print(namespace['error_Bz']) + print() + + assert abs(namespace['error_Ex'] - ref['error_Ex']) / ref['error_Ex'] <= TOL + assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL + assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL + + @pytest.mark.parallel def test_maxwell_2d_periodic_par(): @@ -728,46 +819,56 @@ def test_maxwell_2d_dirichlet_par(): description = "Solve 2D Maxwell's equations in rectangular cavity with spline FEEC method." ) - parser.add_argument('ncells', - type = int, - help = 'Number of cells in domain' + parser.add_argument('-s', '--spline', + action = 'store_true', + dest = 'use_spline_mapping', + help = 'Use spline mapping from geometry file "collela_2d.h5"' ) - parser.add_argument('degree', - type = int, - help = 'Polynomial spline degree' + # ... + disc_group = parser.add_argument_group('Discretization and geometry parameters (ignored for spline mapping)') + disc_group.add_argument('-n', + type = int, + default = 10, + dest = 'ncells', + help = 'Number of cells in domain ' ) - - parser.add_argument( '-P', '--periodic', + disc_group.add_argument('-d', + type = int, + default = 3, + dest = 'degree', + help = 'Polynomial spline degree' + ) + disc_group.add_argument( '-P', '--periodic', action = 'store_true', help = 'Use periodic boundary conditions' ) - - parser.add_argument('-o', '--splitting_order', - type = int, - default = 2, - choices = [2, 4, 6], - help = 'Order of accuracy of operator splitting' - ) - - parser.add_argument( '-e', + disc_group.add_argument( '-e', type = float, default = 0.25, dest = 'eps', metavar = 'EPS', help = 'Deformation level (0 <= EPS < 1)' ) + # ... - parser.add_argument( '-c', + # ... + time_group = parser.add_argument_group('Time integration options') + time_group.add_argument('-o', + type = int, + default = 2, + dest = 'splitting_order', + choices = [2, 4, 6], + help = 'Order of accuracy of operator splitting' + ) + time_group.add_argument( '-c', type = float, default = 0.5, dest = 'Cp', metavar = 'Cp', help = 'Courant parameter on uniform grid' ) - - # ... - time_opts = parser.add_mutually_exclusive_group() + time_opts = time_group.add_mutually_exclusive_group() time_opts.add_argument( '-t', type = int, default = 1, @@ -783,32 +884,37 @@ def test_maxwell_2d_dirichlet_par(): ) # ... - parser.add_argument( '-p', + # ... + out_group = parser.add_argument_group('Output options') + out_group.add_argument( '-p', type = int, default = 4, metavar = 'I', dest = 'plot_interval', help = 'No. of time steps between successive plots of solution, if I=0 no plots are made' ) - - parser.add_argument( '-d', + out_group.add_argument( '-D', type = int, default = 1, metavar = 'I', dest = 'diagnostics_interval', help = 'No. of time steps between successive calculations of scalar diagnostics, if I=0 no diagnostics are computed' ) + # ... - parser.add_argument( '--tol', + # ... + solver_group = parser.add_argument_group('Iterative solver') + solver_group.add_argument( '--tol', type = float, default = 1e-7, help = 'Tolerance for iterative solver (L2-norm of residual)' ) - parser.add_argument( '-v', '--verbose', + solver_group.add_argument( '-v', '--verbose', action = 'store_true', help = 'Print convergence information of iterative solver' ) + # ... # Read input arguments args = parser.parse_args() From cc3b67c069ab73584050bedea9a7824ec4b9e563 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Thu, 20 Oct 2022 09:49:09 +0200 Subject: [PATCH 18/28] Add missing argument to broken unit tests --- psydac/api/tests/test_api_feec_2d.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/psydac/api/tests/test_api_feec_2d.py b/psydac/api/tests/test_api_feec_2d.py index 6ed25c662..d584e97da 100644 --- a/psydac/api/tests/test_api_feec_2d.py +++ b/psydac/api/tests/test_api_feec_2d.py @@ -668,6 +668,7 @@ def discrete_energies(e, b): def test_maxwell_2d_periodic(): namespace = run_maxwell_2d_TE( + use_spline_mapping = False, eps = 0.5, ncells = 12, degree = 3, @@ -695,6 +696,7 @@ def test_maxwell_2d_periodic(): def test_maxwell_2d_dirichlet(): namespace = run_maxwell_2d_TE( + use_spline_mapping = False, eps = 0.5, ncells = 10, degree = 5, @@ -742,12 +744,6 @@ def test_maxwell_2d_dirichlet_spline_mapping(): error_Ey = 0.11197875071916191, error_Bz = 0.09616100464412525) - print() - print(namespace['error_Ex']) - print(namespace['error_Ey']) - print(namespace['error_Bz']) - print() - assert abs(namespace['error_Ex'] - ref['error_Ex']) / ref['error_Ex'] <= TOL assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL @@ -757,6 +753,7 @@ def test_maxwell_2d_dirichlet_spline_mapping(): def test_maxwell_2d_periodic_par(): namespace = run_maxwell_2d_TE( + use_spline_mapping = False, eps = 0.5, ncells = 12, degree = 3, @@ -784,6 +781,7 @@ def test_maxwell_2d_periodic_par(): def test_maxwell_2d_dirichlet_par(): namespace = run_maxwell_2d_TE( + use_spline_mapping = False, eps = 0.5, ncells = 10, degree = 5, From 54131548a4d4b04de965fa0b869d6a196933539f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Thu, 20 Oct 2022 09:52:35 +0200 Subject: [PATCH 19/28] Remove draft FEEC test --- .../temp-test_api_feec_2d_spline-mapping.py | 830 ------------------ 1 file changed, 830 deletions(-) delete mode 100644 psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py diff --git a/psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py b/psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py deleted file mode 100644 index 3b8bc364f..000000000 --- a/psydac/api/tests/temp-test_api_feec_2d_spline-mapping.py +++ /dev/null @@ -1,830 +0,0 @@ -# coding: utf-8 -# Copyright 2021 Yaman Güçlü - -import pytest - -#============================================================================== -# TIME STEPPING METHOD -#============================================================================== -def step_faraday_2d(dt, e, b, M1, M2, D1, D1_T, **kwargs): - """ - Exactly integrate the semi-discrete Faraday equation over one time-step: - - b_new = b - ∆t D1 e - - """ - b -= dt * D1.dot(e) - # e += 0 - -def step_ampere_2d(dt, e, b, M1, M2, D1, D1_T, *, pc=None, tol=1e-7, verbose=False): - """ - Exactly integrate the semi-discrete Amperè equation over one time-step: - - e_new = e - ∆t (M1^{-1} D1^T M2) b - - """ - options = dict(tol=tol, verbose=verbose) - if pc: - from psydac.linalg.iterative_solvers import pcg as isolve - options['pc'] = pc - else: - from psydac.linalg.iterative_solvers import cg as isolve - - # b += 0 - e += dt * isolve(M1, D1_T.dot(M2.dot(b)), **options)[0] - -#============================================================================== -# ANALYTICAL SOLUTION -#============================================================================== -class CavitySolution: - """ - Time-harmonic solution of Maxwell's equations in a rectangular cavity with - perfectly conducting walls. This is a "transverse electric" solution, with - E = (Ex, Ey) and B = Bz. Domain is [0, a] x [0, b]. - - Parameters - ---------- - a : float - Size of cavity along x direction. - - b : float - Size of cavity along y direction. - - c : float - Speed of light in arbitrary units. - - nx : int - Number of half wavelengths along x direction. - - ny : int - Number of half wavelengths along y direction. - - """ - def __init__(self, *, a, b, c, nx, ny): - - from sympy import symbols - from sympy import lambdify - - sym_params, sym_fields, sym_energy = self.symbolic() - - params = {'a': a, 'b': b, 'c': c, 'nx': nx, 'ny': ny} - repl = [(sym_params[k], params[k]) for k in sym_params.keys()] - args = symbols('t, x, y') - - # Callable functions - fields = {k: lambdify(args , v.subs(repl), 'numpy') for k, v in sym_fields.items()} - energy = {k: lambdify(args[0], v.subs(repl), 'numpy') for k, v in sym_energy.items()} - - # Store private attributes - self._sym_params = sym_params - self._sym_fields = sym_fields - self._sym_energy = sym_energy - - self._params = params - self._fields = fields - self._energy = energy - - #-------------------------------------------------------------------------- - @staticmethod - def symbolic(): - - from sympy import symbols - from sympy import cos, sin, pi, sqrt - from sympy.integrals import integrate - - t, x, y = symbols('t x y', real=True) - a, b, c = symbols('a b c', positive=True) - nx, ny = symbols('nx ny', positive=True, integer=True) - - kx = pi * nx / a - ky = pi * ny / b - omega = c * sqrt(kx**2 + ky**2) - - # Exact solutions for electric and magnetic field - Ex = cos(kx * x) * sin(ky * y) * cos(omega * t) - Ey = -sin(kx * x) * cos(ky * y) * cos(omega * t) - Bz = cos(kx * x) * cos(ky * y) * sin(omega * t) * (kx + ky) / omega - - # Electric and magnetic energy in domain - We = integrate(integrate((Ex**2 + Ey**2)/ 2, (x, 0, a)), (y, 0, b)).simplify() - Wb = integrate(integrate( Bz**2 / 2, (x, 0, a)), (y, 0, b)).simplify() - - params = {'a': a, 'b': b, 'c': c, 'nx': nx, 'ny': ny} - fields = {'Ex': Ex, 'Ey': Ey, 'Bz': Bz} - energy = {'We': We, 'Wb': Wb} - - return params, fields, energy - - #-------------------------------------------------------------------------- - @property - def params(self): - return self._params - - @property - def fields(self): - return self._fields - - @property - def energy(self): - return self._energy - - @property - def derived_params(self): - from numpy import pi, sqrt - kx = pi * self.params['nx'] / self.params['a'] - ky = pi * self.params['ny'] / self.params['b'] - omega = self.params['c'] * sqrt(kx**2 + ky**2) - return {'kx': kx, 'ky' : ky, 'omega': omega} - - @property - def sym_params(self): - return self._sym_params - - @property - def sym_fields(self): - return self._sym_fields - - @property - def sym_energy(self): - return self._sym_energy - -#============================================================================== -# VISUALIZATION -#============================================================================== - -def add_colorbar(im, ax, **kwargs): - from mpl_toolkits.axes_grid1 import make_axes_locatable - divider = make_axes_locatable(ax) - cax = divider.append_axes("right", size=0.2, pad=0.3) - cbar = ax.get_figure().colorbar(im, cax=cax, **kwargs) - return cbar - -def plot_field_and_error(name, x, y, field_h, field_ex, *gridlines): - import matplotlib.pyplot as plt - fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(15, 6)) - im0 = ax0.contourf(x, y, field_h) - im1 = ax1.contourf(x, y, field_ex - field_h) - ax0.set_title(r'${0}_h$'.format(name)) - ax1.set_title(r'${0} - {0}_h$'.format(name)) - for ax in (ax0, ax1): - ax.plot(*gridlines[0], color='k') - ax.plot(*gridlines[1], color='k') - ax.set_xlabel('x', fontsize=14) - ax.set_ylabel('y', fontsize=14, rotation='horizontal') - ax.set_aspect('equal') - add_colorbar(im0, ax0) - add_colorbar(im1, ax1) - fig.suptitle('Time t = {:10.3e}'.format(0)) - fig.tight_layout() - return fig - -def update_plot(fig, t, x, y, field_h, field_ex): - ax0, ax1, cax0, cax1 = fig.axes - ax0.collections.clear(); cax0.clear() - ax1.collections.clear(); cax1.clear() - im0 = ax0.contourf(x, y, field_h) - im1 = ax1.contourf(x, y, field_ex - field_h) - fig.colorbar(im0, cax=cax0) - fig.colorbar(im1, cax=cax1) - fig.suptitle('Time t = {:10.3e}'.format(t)) - fig.canvas.draw() - -#============================================================================== -# SIMULATION -#============================================================================== -def run_maxwell_2d_TE(*, eps, ncells, degree, periodic, Cp, nsteps, tend, - splitting_order, plot_interval, diagnostics_interval, tol, verbose): - - import numpy as np - import matplotlib.pyplot as plt - from mpi4py import MPI - from scipy.integrate import dblquad - - from sympde.topology import Square - from sympde.topology import Mapping -# from sympde.topology import CollelaMapping2D - from sympde.topology import Derham - from sympde.topology import elements_of - from sympde.topology import NormalVector - from sympde.calculus import dot, cross - from sympde.expr import integral - from sympde.expr import BilinearForm - - from psydac.api.discretization import discretize - from psydac.api.settings import PSYDAC_BACKEND_GPYCCEL - from psydac.feec.pull_push import push_2d_hcurl, push_2d_l2 - from psydac.utilities.utils import refine_array_1d - - #-------------------------------------------------------------------------- - # Problem setup - #-------------------------------------------------------------------------- - - # Physical domain is rectangle [0, a] x [0, b] - a = 2.0 - b = 2.0 - - # Speed of light is 1 - c = 1.0 - - # Mode number - (nx, ny) = (2, 2) - - # Exact solution - exact_solution = CavitySolution(a=a, b=b, c=c, nx=nx, ny=ny) - - # Exact fields, as callable functions of (t, x, y) - Ex_ex = exact_solution.fields['Ex'] - Ey_ex = exact_solution.fields['Ey'] - Bz_ex = exact_solution.fields['Bz'] - - #... - - #-------------------------------------------------------------------------- - # Analytical objects: SymPDE - #-------------------------------------------------------------------------- - - # Logical domain is unit square [0, 1] x [0, 1] - logical_domain = Square('Omega') - - # Mapping and physical domain - class CollelaMapping2D(Mapping): - - _ldim = 2 - _pdim = 2 - _expressions = {'x': 'a * (x1 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))', - 'y': 'b * (x2 + eps / (2*pi) * sin(2*pi*x1) * sin(2*pi*x2))'} - -# mapping = CollelaMapping2D('M', k1=1, k2=1, eps=eps) - mapping = CollelaMapping2D('M', a=a, b=b, eps=eps) -# domain = mapping(logical_domain) - - # ------------- - import os - from sympde.topology.domain import Domain - mesh_dir = 'mesh' - filename = os.path.join(mesh_dir, 'collela_2d.h5') - domain = Domain.from_file(filename) - # ------------- - - # DeRham sequence - derham = Derham(domain, sequence=['h1', 'hcurl', 'l2']) - - # Trial and test functions - u1, v1 = elements_of(derham.V1, names='u1, v1') # electric field E = (Ex, Ey) - u2, v2 = elements_of(derham.V2, names='u2, v2') # magnetic field Bz - - # Bilinear forms that correspond to mass matrices for spaces V1 and V2 - a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1))) - a2 = BilinearForm((u2, v2), integral(domain, u2 * v2)) - - # If needed, use penalization to apply homogeneous Dirichlet BCs - if not periodic: - nn = NormalVector('nn') - a1_bc = BilinearForm((u1, v1), - integral(domain.boundary, 1e30 * cross(u1, nn) * cross(v1, nn))) - - #-------------------------------------------------------------------------- - # Discrete objects: Psydac - #-------------------------------------------------------------------------- - - # Discrete physical domain and discrete DeRham sequence -# domain_h = discretize(domain, ncells=[ncells, ncells], periodic=[periodic, periodic], comm=MPI.COMM_WORLD) - domain_h = discretize(domain, filename=filename, comm=MPI.COMM_WORLD) - derham_h = discretize(derham, domain_h, degree=[degree, degree]) - - # Discrete bilinear forms - a1_h = discretize(a1, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) - a2_h = discretize(a2, domain_h, (derham_h.V2, derham_h.V2), backend=PSYDAC_BACKEND_GPYCCEL) - - # Mass matrices (StencilMatrix objects) - M1 = a1_h.assemble() - M2 = a2_h.assemble() - - # Differential operators - D0, D1 = derham_h.derivatives_as_matrices - - # Discretize and assemble penalization matrix - if not periodic: - a1_bc_h = discretize(a1_bc, domain_h, (derham_h.V1, derham_h.V1), backend=PSYDAC_BACKEND_GPYCCEL) - M1_bc = a1_bc_h.assemble() - - # Transpose of derivative matrix - D1_T = D1.T - - # Projectors - P0, P1, P2 = derham_h.projectors(nquads=[degree+2, degree+2]) - - # Logical and physical grids - F = mapping.get_callable_mapping() - grid_x1 = derham_h.V0.breaks[0] - grid_x2 = derham_h.V0.breaks[1] - - grid_x, grid_y = F(*np.meshgrid(grid_x1, grid_x2, indexing='ij')) - - #-------------------------------------------------------------------------- - # Time integration setup - #-------------------------------------------------------------------------- - - t = 0 - - # Initial conditions, discrete fields - E = P1((lambda x, y: Ex_ex(0, x, y), lambda x, y: Ey_ex(0, x, y))) - B = P2(lambda x, y: Bz_ex(0, x, y)) - - # Initial conditions, spline coefficients - e = E.coeffs - b = B.coeffs - - # Time step size - dx_min_1 = np.sqrt(np.diff(grid_x, axis=0)**2 + np.diff(grid_y, axis=0)**2).min() - dx_min_2 = np.sqrt(np.diff(grid_x, axis=1)**2 + np.diff(grid_y, axis=1)**2).min() - - dx_min = min(dx_min_1, dx_min_2) - dt = Cp * dx_min / c - - # If final time is given, compute number of time steps - if tend is not None: - nsteps = int(np.ceil(tend / dt)) - - #-------------------------------------------------------------------------- - # Scalar diagnostics setup - #-------------------------------------------------------------------------- - - # Energy of exact solution - def exact_energies(t): - """ Compute electric & magnetic energies of exact solution. - """ - We = exact_solution.energy['We'](t) - Wb = exact_solution.energy['Wb'](t) - return (We, Wb) - - # Energy of numerical solution - def discrete_energies(e, b): - """ Compute electric & magnetic energies of numerical solution. - """ - We = 0.5 * M1.dot(e).dot(e) - Wb = 0.5 * M2.dot(b).dot(b) - return (We, Wb) - - # Scalar diagnostics: - diagnostics_ex = {'time': [], 'electric_energy': [], 'magnetic_energy': []} - diagnostics_num = {'time': [], 'electric_energy': [], 'magnetic_energy': []} - - #-------------------------------------------------------------------------- - # Visualization and diagnostics setup - #-------------------------------------------------------------------------- - - # Very fine grids for evaluation of solution - N = 5 - x1 = refine_array_1d(grid_x1, N) - x2 = refine_array_1d(grid_x2, N) - - x1, x2 = np.meshgrid(x1, x2, indexing='ij') - x, y = F(x1, x2) - - gridlines_x1 = (x[:, ::N], y[:, ::N] ) - gridlines_x2 = (x[::N, :].T, y[::N, :].T) - gridlines = (gridlines_x1, gridlines_x2) - - Ex_values = np.empty_like(x1) - Ey_values = np.empty_like(x1) - Bz_values = np.empty_like(x1) - - # Prepare plots - if plot_interval: - - # Plot physical grid and mapping's metric determinant - fig1, ax1 = plt.subplots(1, 1, figsize=(8, 6)) - im = ax1.contourf(x, y, np.sqrt(F.metric_det(x1, x2))) - add_colorbar(im, ax1, label=r'Metric determinant $\sqrt{g}$ of mapping $F$') - ax1.plot(*gridlines_x1, color='k') - ax1.plot(*gridlines_x2, color='k') - ax1.set_title('Mapped grid of {} x {} cells'.format(ncells, ncells)) - ax1.set_xlabel('x', fontsize=14) - ax1.set_ylabel('y', fontsize=14) - ax1.set_aspect('equal') - fig1.tight_layout() - fig1.show() - - # ... - # Plot initial conditions - # TODO: improve - for i, x1i in enumerate(x1[:, 0]): - for j, x2j in enumerate(x2[0, :]): - - Ex_values[i, j], Ey_values[i, j] = \ - push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) - - Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) - - # Electric field, x component - fig2 = plot_field_and_error(r'E^x', x, y, Ex_values, Ex_ex(0, x, y), *gridlines) - fig2.show() - - # Electric field, y component - fig3 = plot_field_and_error(r'E^y', x, y, Ey_values, Ey_ex(0, x, y), *gridlines) - fig3.show() - - # Magnetic field, z component - fig4 = plot_field_and_error(r'B^z', x, y, Bz_values, Bz_ex(0, x, y), *gridlines) - fig4.show() - # ... - - input('\nSimulation setup done... press any key to start') - - # Prepare diagnostics - if diagnostics_interval: - - # Exact energy at t=0 - We_ex, Wb_ex = exact_energies(t) - diagnostics_ex['time'].append(t) - diagnostics_ex['electric_energy'].append(We_ex) - diagnostics_ex['magnetic_energy'].append(Wb_ex) - - # Discrete energy at t=0 - We_num, Wb_num = discrete_energies(e, b) - diagnostics_num['time'].append(t) - diagnostics_num['electric_energy'].append(We_num) - diagnostics_num['magnetic_energy'].append(Wb_num) - - print('\nTotal energy in domain:') - print('ts = {:4d}, t = {:8.4f}, exact = {Wt_ex:.13e}, discrete = {Wt_num:.13e}'.format(0, - t, - Wt_ex = We_ex + Wb_ex, - Wt_num = We_num + Wb_num) - ) - else: - print('ts = {:4d}, t = {:8.4f}'.format(0, t)) - - #-------------------------------------------------------------------------- - # Solution - #-------------------------------------------------------------------------- - - # TODO: add option to convert to scipy sparse format - - # ... Arguments for time stepping - kwargs = {'verbose': verbose, 'tol': tol} - - if periodic: - args = (e, b, M1, M2, D1, D1_T) - else: - args = (e, b, M1 + M1_bc, M2, D1, D1_T) - kwargs['pc'] = 'jacobi' - # ... - - # Time loop - for ts in range(1, nsteps+1): - - # TODO: allow for high-order splitting - - # Strang splitting, 2nd order - step_faraday_2d(0.5*dt, *args, **kwargs) - step_ampere_2d ( dt, *args, **kwargs) - step_faraday_2d(0.5*dt, *args, **kwargs) - - t += dt - - # Animation - if plot_interval and (ts % plot_interval == 0 or ts == nsteps): - - # ... - # TODO: improve - for i, x1i in enumerate(x1[:, 0]): - for j, x2j in enumerate(x2[0, :]): - - Ex_values[i, j], Ey_values[i, j] = \ - push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) - - Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) - # ... - - # Update plot - update_plot(fig2, t, x, y, Ex_values, Ex_ex(t, x, y)) - update_plot(fig3, t, x, y, Ey_values, Ey_ex(t, x, y)) - update_plot(fig4, t, x, y, Bz_values, Bz_ex(t, x, y)) - plt.pause(0.1) - - # Scalar diagnostics - if diagnostics_interval and ts % diagnostics_interval == 0: - - # Update exact diagnostics - We_ex, Wb_ex = exact_energies(t) - diagnostics_ex['time'].append(t) - diagnostics_ex['electric_energy'].append(We_ex) - diagnostics_ex['magnetic_energy'].append(Wb_ex) - - # Update numerical diagnostics - We_num, Wb_num = discrete_energies(e, b) - diagnostics_num['time'].append(t) - diagnostics_num['electric_energy'].append(We_num) - diagnostics_num['magnetic_energy'].append(Wb_num) - - # Print total energy to terminal - print('ts = {:4d}, t = {:8.4f}, exact = {Wt_ex:.13e}, discrete = {Wt_num:.13e}'.format(ts, - t, - Wt_ex = We_ex + Wb_ex, - Wt_num = We_num + Wb_num) - ) - else: - print('ts = {:4d}, t = {:8.4f}'.format(ts, t)) - - #-------------------------------------------------------------------------- - # Post-processing - #-------------------------------------------------------------------------- - if MPI.COMM_WORLD.size == 1: - # (currently not available in parallel) - # ... - # TODO: improve - for i, x1i in enumerate(x1[:, 0]): - for j, x2j in enumerate(x2[0, :]): - - Ex_values[i, j], Ey_values[i, j] = \ - push_2d_hcurl(E.fields[0], E.fields[1], x1i, x2j, F) - - Bz_values[i, j] = push_2d_l2(B, x1i, x2j, F) - # ... - - # Error at final time - error_Ex = abs(Ex_ex(t, x, y) - Ex_values).max() - error_Ey = abs(Ey_ex(t, x, y) - Ey_values).max() - error_Bz = abs(Bz_ex(t, x, y) - Bz_values).max() - print() - print('Max-norm of error on Ex(t,x) at final time: {:.2e}'.format(error_Ex)) - print('Max-norm of error on Ey(t,x) at final time: {:.2e}'.format(error_Ey)) - print('Max-norm of error on Bz(t,x) at final time: {:.2e}'.format(error_Bz)) - - # compute L2 error as well - F = mapping.get_callable_mapping() - errx = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, F)[0] - Ex_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) - erry = lambda x1, x2: (push_2d_hcurl(E.fields[0], E.fields[1], x1, x2, F)[1] - Ey_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) - errz = lambda x1, x2: (push_2d_l2(B, x1, x2, F) - Bz_ex(t, *F(x1, x2)))**2 * np.sqrt(F.metric_det(x1,x2)) - error_l2_Ex = np.sqrt(derham_h.V1.spaces[0].integral(errx)) - error_l2_Ey = np.sqrt(derham_h.V1.spaces[1].integral(erry)) - error_l2_Bz = np.sqrt(derham_h.V0.integral(errz)) - print('L2 norm of error on Ex(t,x,y) at final time: {:.2e}'.format(error_l2_Ex)) - print('L2 norm of error on Ey(t,x,y) at final time: {:.2e}'.format(error_l2_Ey)) - print('L2 norm of error on Bz(t,x,y) at final time: {:.2e}'.format(error_l2_Bz)) - - if diagnostics_interval: - - # Extract exact diagnostics - t_ex = np.asarray(diagnostics_ex['time']) - We_ex = np.asarray(diagnostics_ex['electric_energy']) - Wb_ex = np.asarray(diagnostics_ex['magnetic_energy']) - Wt_ex = We_ex + Wb_ex - - # Extract numerical diagnostics - t_num = np.asarray(diagnostics_num['time']) - We_num = np.asarray(diagnostics_num['electric_energy']) - Wb_num = np.asarray(diagnostics_num['magnetic_energy']) - Wt_num = We_num + Wb_num - - # Energy plots - fig3, (ax31, ax32, ax33) = plt.subplots(3, 1, figsize=(12, 10)) - # - ax31.set_title('Energy of exact solution') - ax31.plot(t_ex, We_ex, label='electric') - ax31.plot(t_ex, Wb_ex, label='magnetic') - ax31.plot(t_ex, Wt_ex, label='total' ) - ax31.legend() - ax31.set_xlabel('t') - ax31.set_ylabel('W', rotation='horizontal') - ax31.grid() - # - ax32.set_title('Energy of numerical solution') - ax32.plot(t_num, We_num, label='electric') - ax32.plot(t_num, Wb_num, label='magnetic') - ax32.plot(t_num, Wt_num, label='total' ) - ax32.legend() - ax32.set_xlabel('t') - ax32.set_ylabel('W', rotation='horizontal') - ax32.grid() - # - ax33.set_title('Relative error in total energy') - ax33.plot(t_ex , (Wt_ex - Wt_ex) / Wt_ex[0], '--', label='exact') - ax33.plot(t_num, (Wt_num - Wt_ex) / Wt_ex[0], '-' , label='numerical') - ax33.legend() - ax33.set_xlabel('t') - ax33.set_ylabel('(W - W_ex) / W_ex(t=0)') - ax33.grid() - # - fig3.tight_layout() - fig3.show() - - # Return whole namespace as dictionary - return locals() - -#============================================================================== -# UNIT TESTS -#============================================================================== - -def test_maxwell_2d_periodic(): - - namespace = run_maxwell_2d_TE( - eps = 0.5, - ncells = 12, - degree = 3, - periodic = True, - Cp = 0.5, - nsteps = 1, - tend = None, - splitting_order = 2, - plot_interval = 0, - diagnostics_interval = 0, - tol = 1e-6, - verbose = False - ) - - TOL = 1e-6 - ref = dict(error_Ex = 6.870389e-03, - error_Ey = 6.870389e-03, - error_Bz = 4.443822e-03) - - assert abs(namespace['error_Ex'] - ref['error_Ex']) / ref['error_Ex'] <= TOL - assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL - assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL - - -def test_maxwell_2d_dirichlet(): - - namespace = run_maxwell_2d_TE( - eps = 0.5, - ncells = 10, - degree = 5, - periodic = False, - Cp = 0.5, - nsteps = 1, - tend = None, - splitting_order = 2, - plot_interval = 0, - diagnostics_interval = 0, - tol = 1e-6, - verbose = False - ) - - TOL = 1e-6 - ref = dict(error_Ex = 3.597840e-03, - error_Ey = 3.597840e-03, - error_Bz = 4.366314e-03) - - assert abs(namespace['error_Ex'] - ref['error_Ex']) / ref['error_Ex'] <= TOL - assert abs(namespace['error_Ey'] - ref['error_Ey']) / ref['error_Ey'] <= TOL - assert abs(namespace['error_Bz'] - ref['error_Bz']) / ref['error_Bz'] <= TOL - -@pytest.mark.parallel -def test_maxwell_2d_periodic_par(): - - namespace = run_maxwell_2d_TE( - eps = 0.5, - ncells = 12, - degree = 3, - periodic = True, - Cp = 0.5, - nsteps = 1, - tend = None, - splitting_order = 2, - plot_interval = 0, - diagnostics_interval = 0, - tol = 1e-6, - verbose = False - ) - - TOL = 1e-6 - ref = dict(error_l2_Ex = 4.2115063593622278e-03, - error_l2_Ey = 4.2115065915750306e-03, - error_l2_Bz = 3.6252141126597646e-03) - - assert abs(namespace['error_l2_Ex'] - ref['error_l2_Ex']) / ref['error_l2_Ex'] <= TOL - assert abs(namespace['error_l2_Ey'] - ref['error_l2_Ey']) / ref['error_l2_Ey'] <= TOL - assert abs(namespace['error_l2_Bz'] - ref['error_l2_Bz']) / ref['error_l2_Bz'] <= TOL - -@pytest.mark.parallel -def test_maxwell_2d_dirichlet_par(): - - namespace = run_maxwell_2d_TE( - eps = 0.5, - ncells = 10, - degree = 5, - periodic = False, - Cp = 0.5, - nsteps = 1, - tend = None, - splitting_order = 2, - plot_interval = 0, - diagnostics_interval = 0, - tol = 1e-6, - verbose = False - ) - - TOL = 1e-6 - ref = dict(error_l2_Ex = 1.3223335792411782e-03, - error_l2_Ey = 1.3223335792411910e-03, - error_l2_Bz = 4.0492562719804193e-03) - - assert abs(namespace['error_l2_Ex'] - ref['error_l2_Ex']) / ref['error_l2_Ex'] <= TOL - assert abs(namespace['error_l2_Ey'] - ref['error_l2_Ey']) / ref['error_l2_Ey'] <= TOL - assert abs(namespace['error_l2_Bz'] - ref['error_l2_Bz']) / ref['error_l2_Bz'] <= TOL - -#============================================================================== -# SCRIPT CAPABILITIES -#============================================================================== -if __name__ == '__main__': - - import argparse - - parser = argparse.ArgumentParser( - formatter_class = argparse.ArgumentDefaultsHelpFormatter, - description = "Solve 2D Maxwell's equations in rectangular cavity with spline FEEC method." - ) - - parser.add_argument('ncells', - type = int, - help = 'Number of cells in domain' - ) - - parser.add_argument('degree', - type = int, - help = 'Polynomial spline degree' - ) - - parser.add_argument( '-P', '--periodic', - action = 'store_true', - help = 'Use periodic boundary conditions' - ) - - parser.add_argument('-o', '--splitting_order', - type = int, - default = 2, - choices = [2, 4, 6], - help = 'Order of accuracy of operator splitting' - ) - - parser.add_argument( '-e', - type = float, - default = 0.25, - dest = 'eps', - metavar = 'EPS', - help = 'Deformation level (0 <= EPS < 1)' - ) - - parser.add_argument( '-c', - type = float, - default = 0.5, - dest = 'Cp', - metavar = 'Cp', - help = 'Courant parameter on uniform grid' - ) - - # ... - time_opts = parser.add_mutually_exclusive_group() - time_opts.add_argument( '-t', - type = int, - default = 1, - dest = 'nsteps', - metavar = 'NSTEPS', - help = 'Number of time-steps to be taken' - ) - time_opts.add_argument( '-T', - type = float, - dest = 'tend', - metavar = 'END_TIME', - help = 'Run simulation until given final time' - ) - # ... - - parser.add_argument( '-p', - type = int, - default = 4, - metavar = 'I', - dest = 'plot_interval', - help = 'No. of time steps between successive plots of solution, if I=0 no plots are made' - ) - - parser.add_argument( '-d', - type = int, - default = 1, - metavar = 'I', - dest = 'diagnostics_interval', - help = 'No. of time steps between successive calculations of scalar diagnostics, if I=0 no diagnostics are computed' - ) - - parser.add_argument( '--tol', - type = float, - default = 1e-7, - help = 'Tolerance for iterative solver (L2-norm of residual)' - ) - - parser.add_argument( '-v', '--verbose', - action = 'store_true', - help = 'Print convergence information of iterative solver' - ) - - # Read input arguments - args = parser.parse_args() - - # Run simulation - namespace = run_maxwell_2d_TE(**vars(args)) - - # Keep matplotlib windows open - import matplotlib.pyplot as plt - plt.show() From f17ba9fb8ce31637510aa23d639fb2cfcc166d2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Thu, 20 Oct 2022 15:44:50 +0200 Subject: [PATCH 20/28] Don't use `periodic` argument in unit test when not meaningful --- psydac/api/tests/test_api_feec_2d.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/psydac/api/tests/test_api_feec_2d.py b/psydac/api/tests/test_api_feec_2d.py index d584e97da..d3e25b306 100644 --- a/psydac/api/tests/test_api_feec_2d.py +++ b/psydac/api/tests/test_api_feec_2d.py @@ -289,16 +289,14 @@ class CollelaMapping2D(Mapping): a1 = BilinearForm((u1, v1), integral(domain, dot(u1, v1))) a2 = BilinearForm((u2, v2), integral(domain, u2 * v2)) - # If needed, use penalization to apply homogeneous Dirichlet BCs - if not periodic: - nn = NormalVector('nn') - a1_bc = BilinearForm((u1, v1), - integral(domain.boundary, 1e30 * cross(u1, nn) * cross(v1, nn))) + # Penalization to apply homogeneous Dirichlet BCs (will only be used if domain is not periodic) + nn = NormalVector('nn') + a1_bc = BilinearForm((u1, v1), + integral(domain.boundary, 1e30 * cross(u1, nn) * cross(v1, nn))) #-------------------------------------------------------------------------- # Discrete objects: Psydac #-------------------------------------------------------------------------- - if use_spline_mapping: domain_h = discretize(domain, filename=filename, comm=MPI.COMM_WORLD) derham_h = discretize(derham, domain_h) From 124dec17fcca3b4190c1af9fe8a5b8fec4a1126c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 21 Oct 2022 10:55:41 +0200 Subject: [PATCH 21/28] Use SymPDE version 0.16.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 81234b381..e09ef9599 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ 'pyevtk', # Our packages from PyPi - 'sympde @ git+https://github.com/pyccel/sympde@fix-psydac-issue-189', + 'sympde==0.16.0', 'pyccel>=1.5.1', 'gelato==0.11', From f606fe0ab9e9ba189de50e25683c73d521055664 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Wed, 16 Nov 2022 13:37:20 +0100 Subject: [PATCH 22/28] Use Sympde version 0.16.1 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index e09ef9599..68a658e0e 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ 'pyevtk', # Our packages from PyPi - 'sympde==0.16.0', + 'sympde==0.16.1', 'pyccel>=1.5.1', 'gelato==0.11', From dd9fa538047b52f40743e74403136ef51bfbf651 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 25 Nov 2022 13:34:00 +0100 Subject: [PATCH 23/28] Fix 2D Poisson example: use new Mapping class --- examples/poisson_2d_mapping.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/examples/poisson_2d_mapping.py b/examples/poisson_2d_mapping.py index 9f0bafd23..87028f7aa 100644 --- a/examples/poisson_2d_mapping.py +++ b/examples/poisson_2d_mapping.py @@ -10,6 +10,7 @@ from sympde.topology.analytical_mapping import IdentityMapping, PolarMapping from sympde.topology.analytical_mapping import TargetMapping, CzarnyMapping +from psydac.ddm.cart import DomainDecomposition from psydac.linalg.stencil import StencilVector, StencilMatrix from psydac.linalg.iterative_solvers import cg from psydac.fem.splines import SplineSpace @@ -574,12 +575,15 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr grid_1 = np.linspace( *model.domain[0], num=ne1+1 ) grid_2 = np.linspace( *model.domain[1], num=ne2+1 ) + # Decompose 2D domain across MPI processes + dd = DomainDecomposition(ncells, model.periodic, comm=mpi_comm) + # Create 1D finite element spaces V1 = SplineSpace( p1, grid=grid_1, periodic=per1 ) V2 = SplineSpace( p2, grid=grid_2, periodic=per2 ) # Create 2D tensor product finite element space - V = TensorFemSpace( V1, V2, comm=mpi_comm ) + V = TensorFemSpace(dd, V1, V2) s1, s2 = V.vector_space.starts e1, e2 = V.vector_space.ends @@ -623,8 +627,7 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr map_analytic = model.mapping if use_spline_mapping: - map_discrete = SplineMapping.from_mapping( V, map_analytic.symbolic_mapping ) - map_discrete.jacobian = map_discrete.jac_mat # needed after changes in mapping classes + map_discrete = SplineMapping.from_mapping(V, map_analytic) # Write discrete geometry to HDF5 file t0 = time() geometry = Geometry.from_discrete_mapping(map_discrete, comm=mpi_comm) @@ -758,7 +761,8 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr V = map_discrete.space mapping = map_discrete else: - V = TensorFemSpace( V1, V2, comm=MPI.COMM_SELF ) + dd = DomainDecomposition(ncells, model.periodic, comm=MPI.COMM_SELF) + V = TensorFemSpace(dd, V1, V2) # Import solution vector into new serial field phi, = V.import_fields( 'fields.h5', 'phi' ) From 14486bfff2b930010e1de6fac3243d6f724f4476 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Fri, 25 Nov 2022 14:14:47 +0100 Subject: [PATCH 24/28] Apply formatting to 2D Poisson example --- examples/poisson_2d_mapping.py | 420 ++++++++++++++++----------------- 1 file changed, 210 insertions(+), 210 deletions(-) diff --git a/examples/poisson_2d_mapping.py b/examples/poisson_2d_mapping.py index 87028f7aa..c613403ad 100644 --- a/examples/poisson_2d_mapping.py +++ b/examples/poisson_2d_mapping.py @@ -36,22 +36,22 @@ def __init__(self, mapping): self._metric_det = sym.metric_det_expr # ... - def __call__( self, phi ): + def __call__(self, phi): from sympy import sqrt, Matrix u = self._eta G = self._metric - sqrt_g = sqrt( self._metric_det ) + sqrt_g = sqrt(self._metric_det) # Store column vector of partial derivatives of phi w.r.t. uj - dphi_du = Matrix( [phi.diff( uj ) for uj in u] ) + dphi_du = Matrix([phi.diff(uj) for uj in u]) # Compute gradient of phi in tangent basis: A = G^(-1) dphi_du - A = G.LUsolve( dphi_du ) + A = G.LUsolve(dphi_du) # Compute Laplacian of phi using formula for divergence of vector A - lapl = sum( (sqrt_g*Ai).diff( ui ) for ui,Ai in zip( u,A ) ) / sqrt_g + lapl = sum((sqrt_g * Ai).diff(ui) for ui, Ai in zip(u, A)) / sqrt_g return lapl @@ -65,7 +65,7 @@ class Poisson2D: $(\partial^2_{xx} + \partial^2_{yy}) \phi(x,y) = -\rho(x,y)$ """ - def __init__( self, domain, periodic, mapping, phi, rho, O_point=False ): + def __init__(self, domain, periodic, mapping, phi, rho, O_point=False): self._domain = domain self._periodic = periodic @@ -76,7 +76,7 @@ def __init__( self, domain, periodic, mapping, phi, rho, O_point=False ): # ... @staticmethod - def new_square( mx=1, my=1 ): + def new_square(mx=1, my=1): """ Solve Poisson's equation on the unit square. @@ -92,13 +92,13 @@ def new_square( mx=1, my=1 ): from sympy import symbols, sin, cos, pi, lambdify x,y = symbols('x y') - phi_e = sin( mx*pi*x ) * sin( my*pi*y ) - rho_e = -phi_e.diff(x,2)-phi_e.diff(y,2) + phi_e = sin(mx * pi * x) * sin(my * pi * y) + rho_e = -phi_e.diff(x, 2) - phi_e.diff(y, 2) - phi = lambdify( [x,y], phi_e ) - rho = lambdify( [x,y], rho_e ) + phi = lambdify([x, y], phi_e) + rho = lambdify([x, y], rho_e) - return Poisson2D( domain, periodic, mapping, phi, rho ) + return Poisson2D(domain, periodic, mapping, phi, rho) # ... @staticmethod @@ -120,23 +120,23 @@ def new_annulus( rmin=0.5, rmax=1.0 ): from sympy import symbols, sin, cos, pi, lambdify - lapl = Laplacian( mapping ) + lapl = Laplacian(mapping) r, t = mapping.symbolic_mapping.logical_coordinates x, y = mapping.symbolic_mapping.expressions # Manufactured solutions in logical coordinates parab = (r-rmin) * (rmax-r) * 4 / (rmax-rmin)**2 - phi_e = parab * sin( 2*pi*x ) * sin( 2*pi*y ) - rho_e = -lapl( phi_e ) + phi_e = parab * sin(2*pi*x) * sin(2*pi*y) + rho_e = -lapl(phi_e) # For further simplifications, assume that (r,t) are positive and real - R,T = symbols( 'R T', real=True, positive=True ) - phi_e = phi_e.subs( {r:R, t:T} ).simplify() - rho_e = rho_e.subs( {r:R, t:T} ).simplify() + R,T = symbols('R T', real=True, positive=True) + phi_e = phi_e.subs({r:R, t:T}).simplify() + rho_e = rho_e.subs({r:R, t:T}).simplify() # Callable functions - phi = lambdify( [R,T], phi_e ) - rho = lambdify( [R,T], rho_e ) + phi = lambdify([R, T], phi_e) + rho = lambdify([R, T], rho_e) return Poisson2D( domain, periodic, mapping, phi, rho, O_point=(rmin==0) ) @@ -154,39 +154,39 @@ def new_circle(): $\phi(x,y) = 1-r**2$. """ - domain = ((0,1),(0,2*np.pi)) + domain = ((0, 1), (0, 2*np.pi)) periodic = (False, True) mapping = PolarMapping('F', c1=0, c2=0, rmin=0, rmax=2*np.pi).get_callable_mapping() from sympy import lambdify - lapl = Laplacian( mapping ) + lapl = Laplacian(mapping) r, t = mapping.symbolic_mapping.logical_coordinates # Manufactured solutions in logical coordinates phi_e = 1-r**2 - rho_e = -lapl( phi_e ) + rho_e = -lapl(phi_e) # Callable functions - phi = lambdify( [r,t], phi_e ) - rho = lambdify( [r,t], rho_e ) + phi = lambdify([r, t], phi_e) + rho = lambdify([r, t], rho_e) - rho = np.vectorize( rho ) + rho = np.vectorize(rho) - return Poisson2D( domain, periodic, mapping, phi, rho, O_point=True ) + return Poisson2D(domain, periodic, mapping, phi, rho, O_point=True) # ... @staticmethod def new_target(): - domain = ((0,1),(0,2*np.pi)) + domain = ((0, 1), (0, 2*np.pi)) periodic = (False, True) params = dict(c1=0, c2=0, k=0.3, D=0.2) mapping = TargetMapping('F', **params).get_callable_mapping() from sympy import symbols, sin, cos, pi, lambdify - lapl = Laplacian( mapping ) + lapl = Laplacian(mapping) s, t = mapping.symbolic_mapping.logical_coordinates x, y = mapping.symbolic_mapping.expressions @@ -195,12 +195,12 @@ def new_target(): D = params['D'] kx = 2*pi/(1-k+D) ky = 2*pi/(1+k) - phi_e = (1-s**8) * sin( kx*(x-0.5) ) * cos( ky*y ) - rho_e = -lapl( phi_e ) + phi_e = (1-s**8) * sin(kx*(x-0.5)) * cos(ky*y) + rho_e = -lapl(phi_e) # Callable functions - phi = lambdify( [s,t], phi_e ) - rho = lambdify( [s,t], rho_e ) + phi = lambdify([s, t], phi_e) + rho = lambdify([s, t], rho_e) return Poisson2D( domain, periodic, mapping, phi, rho, O_point=True ) @@ -208,54 +208,54 @@ def new_target(): @staticmethod def new_czarny(): - domain = ((0,1),(0,2*np.pi)) + domain = ((0, 1), (0, 2*np.pi)) periodic = (False, True) params = dict(c1=0, c2=0, eps=0.2, b=1.4) mapping = CzarnyMapping('F', **params).get_callable_mapping() from sympy import symbols, sin, cos, pi, lambdify - lapl = Laplacian( mapping ) - s, t = mapping.symbolic_mapping.logical_coordinates - x, y = mapping.symbolic_mapping.expressions + lapl = Laplacian(mapping) + s, t = mapping.symbolic_mapping.logical_coordinates + x, y = mapping.symbolic_mapping.expressions # Manufactured solution in logical coordinates - phi_e = (1-s**8) * sin( pi*x ) * cos( pi*y ) - rho_e = -lapl( phi_e ) + phi_e = (1-s**8) * sin(pi*x) * cos(pi*y) + rho_e = -lapl(phi_e) # Callable functions - phi = lambdify( [s,t], phi_e ) - rho = lambdify( [s,t], rho_e ) + phi = lambdify([s, t], phi_e) + rho = lambdify([s, t], rho_e) - return Poisson2D( domain, periodic, mapping, phi, rho, O_point=True ) + return Poisson2D(domain, periodic, mapping, phi, rho, O_point=True) # ... @property - def domain( self ): + def domain(self): return self._domain @property - def periodic( self ): + def periodic(self): return self._periodic @property - def mapping( self ): + def mapping(self): return self._mapping @property - def phi( self ): + def phi(self): return self._phi @property - def rho( self ): + def rho(self): return self._rho @property - def O_point( self ): + def O_point(self): return self._O_point #============================================================================== -def kernel( p1, p2, nq1, nq2, bs1, bs2, w1, w2, jac_mat, mat_m, mat_s ): +def kernel(p1, p2, nq1, nq2, bs1, bs2, w1, w2, jac_mat, mat_m, mat_s): """ Kernel for computing the mass/stiffness element matrices. @@ -298,8 +298,8 @@ def kernel( p1, p2, nq1, nq2, bs1, bs2, w1, w2, jac_mat, mat_m, mat_s ): """ # Reset element matrices - mat_m[:,:,:,:] = 0. - mat_s[:,:,:,:] = 0. + mat_m[:, :, :, :] = 0. + mat_s[:, :, :, :] = 0. # Cycle over non-zero test functions in element for il1 in range(p1+1): @@ -346,18 +346,18 @@ def kernel( p1, p2, nq1, nq2, bs1, bs2, w1, w2, jac_mat, mat_m, mat_s ): bj_y = inv_jac_det * (-x_x2*bj_x1 + x_x1*bj_x2) # Get volume associated to quadrature point - wvol = w1[q1] * w2[q2] * abs( jac_det ) + wvol = w1[q1] * w2[q2] * abs(jac_det) # Add contribution to integrals v_m += bi_0 * bj_0 * wvol v_s += (bi_x * bj_x + bi_y * bj_y) * wvol # Update element matrices - mat_m[il1, il2, p1+jl1-il1, p2+jl2-il2 ] = v_m - mat_s[il1, il2, p1+jl1-il1, p2+jl2-il2 ] = v_s + mat_m[il1, il2, p1+jl1-il1, p2+jl2-il2] = v_m + mat_s[il1, il2, p1+jl1-il1, p2+jl2-il2] = v_s #============================================================================== -def assemble_matrices( V, mapping, kernel ): +def assemble_matrices(V, mapping, kernel): """ Assemble mass and stiffness matrices using 2D stencil format. @@ -395,36 +395,36 @@ def assemble_matrices( V, mapping, kernel ): [weights_1, weights_2] = [g.weights for g in V.quad_grids] # Create global matrices - mass = StencilMatrix( V.vector_space, V.vector_space ) - stiffness = StencilMatrix( V.vector_space, V.vector_space ) + mass = StencilMatrix(V.vector_space, V.vector_space) + stiffness = StencilMatrix(V.vector_space, V.vector_space) # Create element matrices - mat_m = np.zeros( (p1+1, p2+1, 2*p1+1, 2*p2+1) ) # mass - mat_s = np.zeros( (p1+1, p2+1, 2*p1+1, 2*p2+1) ) # stiffness + mat_m = np.zeros((p1+1, p2+1, 2*p1+1, 2*p2+1)) # mass + mat_s = np.zeros((p1+1, p2+1, 2*p1+1, 2*p2+1)) # stiffness # Build global matrices: cycle over elements - for k1 in range( nk1 ): - for k2 in range( nk2 ): + for k1 in range(nk1): + for k2 in range(nk2): # Get spline index, B-splines' values and quadrature weights is1 = spans_1[k1] - bs1 = basis_1[k1,:,:,:] - w1 = weights_1[k1,:] + bs1 = basis_1[k1, :, :, :] + w1 = weights_1[k1, :] is2 = spans_2[k2] - bs2 = basis_2[k2,:,:,:] - w2 = weights_2[k2,:] + bs2 = basis_2[k2, :, :, :] + w2 = weights_2[k2, :] # Compute Jacobian matrix at all quadrature points - jac_mat = np.empty( (nq1,nq2, 2, 2) ) - for q1 in range( nq1 ): - for q2 in range( nq2 ): - x1 = points_1[k1,q1] - x2 = points_2[k2,q2] + jac_mat = np.empty((nq1, nq2, 2, 2)) + for q1 in range(nq1): + for q2 in range(nq2): + x1 = points_1[k1, q1] + x2 = points_2[k2, q2] jac_mat[q1, q2, :, :] = mapping.jacobian(x1, x2) # Compute element matrices - kernel( p1, p2, nq1, nq2, bs1, bs2, w1, w2, jac_mat, mat_m, mat_s ) + kernel(p1, p2, nq1, nq2, bs1, bs2, w1, w2, jac_mat, mat_m, mat_s) # Update global matrices mass [is1-p1:is1+1, is2-p2:is2+1, :, :] += mat_m[:, :, :, :] @@ -437,7 +437,7 @@ def assemble_matrices( V, mapping, kernel ): return mass, stiffness #============================================================================== -def assemble_rhs( V, mapping, f ): +def assemble_rhs(V, mapping, f): """ Assemble right-hand-side vector. @@ -472,42 +472,42 @@ def assemble_rhs( V, mapping, f ): [weights_1, weights_2] = [g.weights for g in V.quad_grids] # Data structure - rhs = StencilVector( V.vector_space ) + rhs = StencilVector(V.vector_space) # Build RHS - for k1 in range( nk1 ): - for k2 in range( nk2 ): + for k1 in range(nk1): + for k2 in range(nk2): # Get spline index, B-splines' values and quadrature weights is1 = spans_1[k1] - bs1 = basis_1[k1,:,:,:] - w1 = weights_1[k1,:] - x1 = points_1[k1,:] + bs1 = basis_1[k1, :, :, :] + w1 = weights_1[k1, :] + x1 = points_1[k1, :] is2 = spans_2[k2] - bs2 = basis_2[k2,:,:,:] - w2 = weights_2[k2,:] - x2 = points_2[k2,:] + bs2 = basis_2[k2, :, :, :] + w2 = weights_2[k2, :] + x2 = points_2[k2, :] # Evaluate function at all quadrature points - f_quad = f( *np.meshgrid( x1, x2, indexing='ij' ) ) + f_quad = f(*np.meshgrid(x1, x2, indexing='ij')) # Compute Jacobian determinant at all quadrature points - metric_det = np.empty( (nq1,nq2) ) - for q1 in range( nq1 ): - for q2 in range( nq2 ): + metric_det = np.empty((nq1, nq2)) + for q1 in range(nq1): + for q2 in range(nq2): metric_det[q1, q2] = mapping.metric_det(x1[q1], x2[q2]) - jac_det = np.sqrt( metric_det ) + jac_det = np.sqrt(metric_det) - for il1 in range( p1+1 ): - for il2 in range( p2+1 ): + for il1 in range(p1+1): + for il2 in range(p2+1): v = 0.0 - for q1 in range( nq1 ): - for q2 in range( nq2 ): + for q1 in range(nq1): + for q2 in range(nq2): bi_0 = bs1[il1, 0, q1] * bs2[il2, 0, q2] - wvol = w1[q1] * w2[q2] * jac_det[q1,q2] - v += bi_0 * f_quad[q1,q2] * wvol + wvol = w1[q1] * w2[q2] * jac_det[q1, q2] + v += bi_0 * f_quad[q1, q2] * wvol # Global index of test basis i1 = is1 - p1 + il1 @@ -523,7 +523,7 @@ def assemble_rhs( V, mapping, f ): #################################################################################### -def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distribute_viz ): +def main(*, test_case, ncells, degree, use_spline_mapping, c1_correction, distribute_viz): timing = {} timing['assembly' ] = 0.0 @@ -534,9 +534,9 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr # Method of manufactured solution if test_case == 'square': - model = Poisson2D.new_square( mx=1, my=1 ) + model = Poisson2D.new_square(mx=1, my=1) elif test_case == 'annulus': - model = Poisson2D.new_annulus( rmin=0.1, rmax=1.0 ) + model = Poisson2D.new_annulus(rmin=0.1, rmax=1.0) elif test_case == 'circle': model = Poisson2D.new_circle() elif test_case == 'target': @@ -544,18 +544,18 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr elif test_case == 'czarny': model = Poisson2D.new_czarny() else: - raise ValueError( "Only available test-cases are 'square', 'annulus', " - "'circle', 'target' and 'czarny'" ) + raise ValueError("Only available test-cases are 'square', 'annulus', " + "'circle', 'target' and 'czarny'") if c1_correction and (not model.O_point): - print( "WARNING: cannot use C1 correction in geometry without polar singularity!" ) - print( "WARNING: setting 'c1_correction' flag to False..." ) + print("WARNING: cannot use C1 correction in geometry without polar singularity!") + print("WARNING: setting 'c1_correction' flag to False...") print() c1_correction = False if c1_correction and (not use_spline_mapping): - print( "WARNING: cannot use C1 correction without spline mapping!" ) - print( "WARNING: setting 'c1_correction' flag to False..." ) + print("WARNING: cannot use C1 correction without spline mapping!") + print("WARNING: setting 'c1_correction' flag to False...") print() c1_correction = False @@ -572,15 +572,15 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr per1, per2 = model.periodic # Create uniform grid - grid_1 = np.linspace( *model.domain[0], num=ne1+1 ) - grid_2 = np.linspace( *model.domain[1], num=ne2+1 ) + grid_1 = np.linspace(*model.domain[0], num=ne1+1) + grid_2 = np.linspace(*model.domain[1], num=ne2+1) # Decompose 2D domain across MPI processes dd = DomainDecomposition(ncells, model.periodic, comm=mpi_comm) # Create 1D finite element spaces - V1 = SplineSpace( p1, grid=grid_1, periodic=per1 ) - V2 = SplineSpace( p2, grid=grid_2, periodic=per2 ) + V1 = SplineSpace(p1, grid=grid_1, periodic=per1) + V2 = SplineSpace(p2, grid=grid_2, periodic=per2) # Create 2D tensor product finite element space V = TensorFemSpace(dd, V1, V2) @@ -591,34 +591,34 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Print decomposition information to terminal if mpi_rank == 0: - print( '--------------------------------------------------' ) - print( ' CARTESIAN DECOMPOSITION' ) - print( '--------------------------------------------------' ) - int_array_to_str = lambda array: ','.join( '{:3d}'.format(i) for i in array ) + print('--------------------------------------------------') + print(' CARTESIAN DECOMPOSITION' ) + print('--------------------------------------------------') + int_array_to_str = lambda array: ','.join('{:3d}'.format(i) for i in array) int_tuples_to_str = lambda tuples: ', '.join( - '[{:d}, {:d}]'.format(a,b) for a,b in tuples ) + '[{:d}, {:d}]'.format(a,b) for a,b in tuples) cart = V.vector_space.cart - block_sizes_i1 = [e1-s1+1 for s1,e1 in zip( cart.global_starts[0], cart.global_ends[0] )] - block_sizes_i2 = [e2-s2+1 for s2,e2 in zip( cart.global_starts[1], cart.global_ends[1] )] - - block_intervals_i1 = [(s1,e1) for s1,e1 in zip( cart.global_starts[0], cart.global_ends[0] )] - block_intervals_i2 = [(s2,e2) for s2,e2 in zip( cart.global_starts[1], cart.global_ends[1] )] - - print( '> No. of points along eta1 :: {:d}'.format( cart.npts[0] ) ) - print( '> No. of points along eta2 :: {:d}'.format( cart.npts[1] ) ) - print( '' ) - print( '> No. of blocks along eta1 :: {:d}'.format( cart.nprocs[0] ) ) - print( '> No. of blocks along eta2 :: {:d}'.format( cart.nprocs[1] ) ) - print( '' ) - print( '> Block sizes along eta1 :: ' + int_array_to_str( block_sizes_i1 ) ) - print( '> Block sizes along eta2 :: ' + int_array_to_str( block_sizes_i2 ) ) - print( '' ) - print( '> Intervals along eta1 :: ' + int_tuples_to_str( block_intervals_i1 ) ) - print( '> Intervals along eta2 :: ' + int_tuples_to_str( block_intervals_i2 ) ) - print( '', flush=True ) - sleep( 0.001 ) + block_sizes_i1 = [e1-s1+1 for s1, e1 in zip(cart.global_starts[0], cart.global_ends[0])] + block_sizes_i2 = [e2-s2+1 for s2, e2 in zip(cart.global_starts[1], cart.global_ends[1])] + + block_intervals_i1 = [(s1, e1) for s1, e1 in zip(cart.global_starts[0], cart.global_ends[0])] + block_intervals_i2 = [(s2, e2) for s2, e2 in zip(cart.global_starts[1], cart.global_ends[1])] + + print('> No. of points along eta1 :: {:d}'.format(cart.npts[0])) + print('> No. of points along eta2 :: {:d}'.format(cart.npts[1])) + print('') + print('> No. of blocks along eta1 :: {:d}'.format(cart.nprocs[0])) + print('> No. of blocks along eta2 :: {:d}'.format(cart.nprocs[1])) + print('') + print('> Block sizes along eta1 :: ' + int_array_to_str(block_sizes_i1)) + print('> Block sizes along eta2 :: ' + int_array_to_str(block_sizes_i2)) + print('') + print('> Intervals along eta1 :: ' + int_tuples_to_str(block_intervals_i1)) + print('> Intervals along eta2 :: ' + int_tuples_to_str(block_intervals_i2)) + print('', flush=True) + sleep(0.001) mpi_comm.Barrier() #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -640,8 +640,8 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr # Build mass and stiffness matrices, and right-hand side vector t0 = time() - M, S = assemble_matrices( V, mapping, kernel ) - b = assemble_rhs( V, mapping, model.rho ) + M, S = assemble_matrices(V, mapping, kernel) + b = assemble_rhs(V, mapping, model.rho) t1 = time() timing['assembly'] = t1-t0 @@ -649,10 +649,10 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr # stiffness/mass matrices and right-hand-side vector to C1 space if c1_correction: t0 = time() - proj = C1Projector( mapping ) - Sp = proj.change_matrix_basis( S ) - Mp = proj.change_matrix_basis( M ) - bp = proj.change_rhs_basis( b ) + proj = C1Projector(mapping) + Sp = proj.change_matrix_basis(S) + Mp = proj.change_matrix_basis(M) + bp = proj.change_rhs_basis(b) t1 = time() timing['projection'] = t1-t0 @@ -661,79 +661,79 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr if not V1.periodic: # left bc at x=0. if not model.O_point and s1 == 0: - S[s1,:,:,:] = 0. - b[s1,:] = 0. + S[s1, :, :, :] = 0. + b[s1, :] = 0. # right bc at x=1. if e1 == V1.nbasis-1: - S[e1,:,:,:] = 0. - b[e1,:] = 0. + S[e1, :, :, :] = 0. + b[e1, :] = 0. if not V2.periodic: # lower bc at y=0. if s2 == 0: - S[:,s2,:,:] = 0. - b[:,s2] = 0. + S[:, s2, :, :] = 0. + b[:, s2] = 0. # upper bc at y=1. if e2 == V2.nbasis-1: - S[:,e2,:,:] = 0. - b[:,e2] = 0. + S[:, e2, :, :] = 0. + b[:, e2] = 0. if c1_correction and e1 == V1.nbasis-1: # only bc is at s=1 - last = bp[1].space.npts[0]-1 - Sp[1,1][last,:,:,:] = 0. - bp[1] [last,:] = 0. + last = bp[1].space.npts[0] - 1 + Sp[1,1][last, :, :, :] = 0. + bp[1] [last, :] = 0. # Solve linear system if c1_correction: t0 = time() - xp, info = cg( Sp, bp, tol=1e-7, maxiter=100, verbose=False ) - x = proj.convert_to_tensor_basis( xp ) + xp, info = cg(Sp, bp, tol=1e-7, maxiter=100, verbose=False) + x = proj.convert_to_tensor_basis(xp) t1 = time() else: t0 = time() - x, info = cg( S, b, tol=1e-7, maxiter=100, verbose=False ) + x, info = cg(S, b, tol=1e-7, maxiter=100, verbose=False) t1 = time() timing['solution'] = t1-t0 # Create potential field - phi = FemField( V, coeffs=x ) + phi = FemField(V, coeffs=x) phi.coeffs.update_ghost_regions() # Compute L2 norm of error t0 = time() - sqrt_g = lambda *x: np.sqrt( mapping.metric_det(*x) ) - integrand = lambda *x: (phi(*x)-model.phi(*x))**2 * sqrt_g(*x) - err2 = np.sqrt( V.integral( integrand ) ) + sqrt_g = lambda *x: np.sqrt(mapping.metric_det(*x)) + integrand = lambda *x: (phi(*x) - model.phi(*x))**2 * sqrt_g(*x) + err2 = np.sqrt(V.integral(integrand)) t1 = time() timing['diagnostics'] = t1-t0 # Write solution to HDF5 file t0 = time() - V.export_fields( 'fields.h5', phi=phi ) + V.export_fields('fields.h5', phi=phi) t1 = time() timing['export'] += t1-t0 #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Print some information to terminal - for i in range( mpi_size ): + for i in range(mpi_size): if i == mpi_rank: - print( '--------------------------------------------------' ) - print( ' RANK = {}'.format( mpi_rank ) ) - print( '--------------------------------------------------' ) - print( '> Grid :: [{ne1},{ne2}]'.format( ne1=ne1, ne2=ne2) ) - print( '> Degree :: [{p1},{p2}]' .format( p1=p1, p2=p2 ) ) - print( '> CG info :: ',info ) - print( '> L2 error :: {:.2e}'.format( err2 ) ) - print( '' ) - print( '> Assembly time :: {:.2e}'.format( timing['assembly'] ) ) + print('--------------------------------------------------' ) + print(' RANK = {}'.format(mpi_rank)) + print('--------------------------------------------------' ) + print('> Grid :: [{ne1},{ne2}]'.format(ne1=ne1, ne2=ne2)) + print('> Degree :: [{p1},{p2}]' .format(p1=p1, p2=p2)) + print('> CG info :: ', info) + print('> L2 error :: {:.2e}'.format(err2)) + print('' ) + print('> Assembly time :: {:.2e}'.format(timing['assembly'])) if c1_correction: - print( '> Project. time :: {:.2e}'.format( timing['projection'] ) ) - print( '> Solution time :: {:.2e}'.format( timing['solution'] ) ) - print( '> Evaluat. time :: {:.2e}'.format( timing['diagnostics'] ) ) - print( '> Export time :: {:.2e}'.format( timing['export'] ) ) - print( '', flush=True ) - sleep( 0.001 ) + print('> Project. time :: {:.2e}'.format( timing['projection'])) + print('> Solution time :: {:.2e}'.format(timing['solution'])) + print('> Evaluat. time :: {:.2e}'.format(timing['diagnostics'])) + print('> Export time :: {:.2e}'.format(timing['export'])) + print('', flush=True) + sleep(0.001) mpi_comm.Barrier() #+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -745,7 +745,7 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr ########## # Plot domain decomposition (master only) - V.plot_2d_decomposition( model.mapping, refine=N ) + V.plot_2d_decomposition(model.mapping, refine=N) # Perform other visualization using master or all processes if not distribute_viz: @@ -768,68 +768,68 @@ def main( *, test_case, ncells, degree, use_spline_mapping, c1_correction, distr phi, = V.import_fields( 'fields.h5', 'phi' ) # Compute numerical solution (and error) on refined logical grid - [sk1,sk2], [ek1,ek2] = V.local_domain + [sk1, sk2], [ek1, ek2] = V.local_domain - eta1 = refine_array_1d( V1.breaks[sk1:ek1+2], N ) - eta2 = refine_array_1d( V2.breaks[sk2:ek2+2], N ) - num = np.array( [[ phi( e1,e2 ) for e2 in eta2] for e1 in eta1] ) - ex = np.array( [[model.phi( e1,e2 ) for e2 in eta2] for e1 in eta1] ) + eta1 = refine_array_1d(V1.breaks[sk1:ek1+2], N) + eta2 = refine_array_1d(V2.breaks[sk2:ek2+2], N) + num = np.array([[ phi(e1, e2) for e2 in eta2] for e1 in eta1]) + ex = np.array([[model.phi(e1, e2) for e2 in eta2] for e1 in eta1]) err = num - ex # Compute physical coordinates of logical grid - pcoords = np.array( [[model.mapping(e1, e2) for e2 in eta2] for e1 in eta1] ) - xx = pcoords[:,:,0] - yy = pcoords[:,:,1] + pcoords = np.array([[model.mapping(e1, e2) for e2 in eta2] for e1 in eta1]) + xx = pcoords[:, :, 0] + yy = pcoords[:, :, 1] # Create figure with 3 subplots: # 1. exact solution on exact domain # 2. numerical solution on mapped domain (analytical or spline) # 3. numerical error on mapped domain (analytical or spline) - fig, axes = plt.subplots( 1, 3, figsize=(12.8, 4.8) ) + fig, axes = plt.subplots(1, 3, figsize=(12.8, 4.8)) - def add_colorbar( im, ax ): - divider = make_axes_locatable( ax ) - cax = divider.append_axes( "right", size=0.2, pad=0.2 ) - cbar = ax.get_figure().colorbar( im, cax=cax ) + def add_colorbar(im, ax): + divider = make_axes_locatable(ax) + cax = divider.append_axes("right", size=0.2, pad=0.2) + cbar = ax.get_figure().colorbar(im, cax=cax) return cbar # Plot exact solution ax = axes[0] - im = ax.contourf( xx, yy, ex, 40, cmap='jet' ) - add_colorbar( im, ax ) - ax.set_xlabel( r'$x$', rotation='horizontal' ) - ax.set_ylabel( r'$y$', rotation='horizontal' ) - ax.set_title ( r'$\phi_{ex}(x,y)$' ) - ax.plot( xx[:,::N] , yy[:,::N] , 'k' ) - ax.plot( xx[::N,:].T, yy[::N,:].T, 'k' ) + im = ax.contourf(xx, yy, ex, 40, cmap='jet') + add_colorbar(im, ax) + ax.set_xlabel(r'$x$', rotation='horizontal') + ax.set_ylabel(r'$y$', rotation='horizontal') + ax.set_title (r'$\phi_{ex}(x,y)$') + ax.plot(xx[:, ::N] , yy[:, ::N] , 'k') + ax.plot(xx[::N, :].T, yy[::N, :].T, 'k') ax.set_aspect('equal') if use_spline_mapping: # Recompute physical coordinates of logical grid using spline mapping - pcoords = np.array( [[map_discrete(e1, e2) for e2 in eta2] for e1 in eta1] ) - xx = pcoords[:,:,0] - yy = pcoords[:,:,1] + pcoords = np.array([[map_discrete(e1, e2) for e2 in eta2] for e1 in eta1]) + xx = pcoords[:, :, 0] + yy = pcoords[:, :, 1] # Plot numerical solution ax = axes[1] - im = ax.contourf( xx, yy, num, 40, cmap='jet' ) - add_colorbar( im, ax ) - ax.set_xlabel( r'$x$', rotation='horizontal' ) - ax.set_ylabel( r'$y$', rotation='horizontal' ) - ax.set_title ( r'$\phi(x,y)$' ) - ax.plot( xx[:,::N] , yy[:,::N] , 'k' ) - ax.plot( xx[::N,:].T, yy[::N,:].T, 'k' ) + im = ax.contourf(xx, yy, num, 40, cmap='jet') + add_colorbar(im, ax) + ax.set_xlabel(r'$x$', rotation='horizontal') + ax.set_ylabel(r'$y$', rotation='horizontal') + ax.set_title (r'$\phi(x,y)$') + ax.plot(xx[:, ::N] , yy[:, ::N] , 'k') + ax.plot(xx[::N, :].T, yy[::N, :].T, 'k') ax.set_aspect('equal') # Plot numerical error ax = axes[2] - im = ax.contourf( xx, yy, err, 40, cmap='jet' ) - add_colorbar( im, ax ) - ax.set_xlabel( r'$x$', rotation='horizontal' ) - ax.set_ylabel( r'$y$', rotation='horizontal' ) - ax.set_title ( r'$\phi(x,y) - \phi_{ex}(x,y)$' ) - ax.plot( xx[:,::N] , yy[:,::N] , 'k' ) - ax.plot( xx[::N,:].T, yy[::N,:].T, 'k' ) + im = ax.contourf(xx, yy, err, 40, cmap='jet') + add_colorbar(im, ax) + ax.set_xlabel(r'$x$', rotation='horizontal') + ax.set_ylabel(r'$y$', rotation='horizontal') + ax.set_title (r'$\phi(x,y) - \phi_{ex}(x,y)$') + ax.plot(xx[:, ::N] , yy[:, ::N] , 'k') + ax.plot(xx[::N, :].T, yy[::N, :].T, 'k') ax.set_aspect('equal') # Show figure @@ -860,7 +860,7 @@ def parse_input_arguments(): parser.add_argument( '-d', type = int, nargs = 2, - default = [2,2], + default = [2, 2], metavar = ('P1','P2'), dest = 'degree', help = 'Spline degree along each dimension' @@ -869,7 +869,7 @@ def parse_input_arguments(): parser.add_argument( '-n', type = int, nargs = 2, - default = [10,10], + default = [10, 10], metavar = ('N1','N2'), dest = 'ncells', help = 'Number of grid cells (elements) along each dimension' From cbf16e2d8f96f50856665ffa4b527f2577f04d91 Mon Sep 17 00:00:00 2001 From: YzzIzzY Date: Fri, 24 Mar 2023 13:26:14 +0100 Subject: [PATCH 25/28] Try fix issue 257 --- psydac/api/discretization.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/psydac/api/discretization.py b/psydac/api/discretization.py index a46f9ec03..28512e858 100644 --- a/psydac/api/discretization.py +++ b/psydac/api/discretization.py @@ -271,7 +271,8 @@ def discretize_space(V, domain_h, *, degree=None, multiplicity=None, knots=None, connectivity = construct_connectivity(domain) if isinstance(domain_h, Geometry) and all(domain_h.mappings.values()): # from a discrete geoemtry - mappings = [domain_h.mappings[inter.logical_domain.name] for inter in interiors] + # mappings = [domain_h.mappings[inter.logical_domain.name] for inter in interiors] + mappings = [domain_h.mappings[inter.name] for inter in interiors] spaces = [m.space for m in mappings] g_spaces = dict(zip(interiors, spaces)) spaces = [S.spaces for S in spaces] From 44b087f4b26ced2b690a713a8dddc1d34b63807f Mon Sep 17 00:00:00 2001 From: YzzIzzY Date: Fri, 24 Mar 2023 13:37:52 +0100 Subject: [PATCH 26/28] fix C1 spaces --- psydac/polar/c1_cart.py | 10 +++++++--- psydac/polar/c1_spaces.py | 7 +++---- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/psydac/polar/c1_cart.py b/psydac/polar/c1_cart.py index d667fadb1..4b23e1919 100644 --- a/psydac/polar/c1_cart.py +++ b/psydac/polar/c1_cart.py @@ -53,6 +53,13 @@ def __init__( self, cart, radial_dim=0 ): # Recompute shape of local arrays in topology (with ghost regions) self._shape = tuple( e-s+1+2*p for s,e,p in zip( self._starts, self._ends, self._pads ) ) + # Store "parent" cart object for later reference + self._parent_cart = cart + + # Stop here in the serial case + if self._comm is None: + return + # Recompute information for communicating with neighbors self._shift_info = {} for dimension in range( self._ndims ): @@ -60,9 +67,6 @@ def __init__( self, cart, radial_dim=0 ): self._shift_info[ dimension, disp ] = \ self._compute_shift_info( dimension, disp ) - # Store "parent" cart object for later reference - self._parent_cart = cart - # ... @property def parent_cart( self ): diff --git a/psydac/polar/c1_spaces.py b/psydac/polar/c1_spaces.py index 42c4f2162..a5f7bf59f 100644 --- a/psydac/polar/c1_spaces.py +++ b/psydac/polar/c1_spaces.py @@ -42,13 +42,12 @@ def new_c1_vector_space( V, radial_dim=0, angle_dim=1 ): assert V.periods[radial_dim] == False assert V.periods[ angle_dim] == True + c1_cart = C1_Cart( V.cart, radial_dim ) + S = StencilVectorSpace( cart=c1_cart, dtype=V.dtype ) + if V.parallel: - c1_cart = C1_Cart( V.cart, radial_dim ) - S = StencilVectorSpace( cart=c1_cart, dtype=V.dtype ) D = DenseVectorSpace( 3, cart=V.cart, radial_dim=radial_dim, angle_dim=angle_dim ) else: - c1_npts = [(n-2 if d==radial_dim else n) for (d,n) in enumerate( V.npts )] - S = StencilVectorSpace( c1_npts, V.pads, V.periods, V.dtype ) D = DenseVectorSpace( 3 ) P = BlockVectorSpace( D, S ) From 017865ae3c07d3befa01de2d7d2606451590389b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yaman=20G=C3=BC=C3=A7l=C3=BC?= Date: Tue, 28 Mar 2023 16:50:07 +0200 Subject: [PATCH 27/28] Use SymPDE branch "logical_n_physical-coordinates" --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 68a658e0e..c66b543be 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ 'pyevtk', # Our packages from PyPi - 'sympde==0.16.1', + 'sympde @ git+https://github.com/pyccel/sympde@logical_n_physical-coordinates', 'pyccel>=1.5.1', 'gelato==0.11', From f8fe0bc0cd7ffb2ec413ad9230a68958339b9e71 Mon Sep 17 00:00:00 2001 From: YzzIzzY Date: Tue, 28 Mar 2023 16:52:57 +0200 Subject: [PATCH 28/28] call LogicalExpr always before TerminalExpr --- psydac/api/discretization.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/psydac/api/discretization.py b/psydac/api/discretization.py index 910ded7f6..964cee493 100644 --- a/psydac/api/discretization.py +++ b/psydac/api/discretization.py @@ -385,16 +385,21 @@ def discretize(a, *args, **kwargs): kwargs['symbolic_mapping'] = mapping if isinstance(a, sym_BasicForm): - if isinstance(a, sym_Norm): - kernel_expr = TerminalExpr(a, domain) - if not mapping is None: - kernel_expr = tuple(LogicalExpr(i, domain) for i in kernel_expr) - else: - if not mapping is None: - a = LogicalExpr (a, domain) - domain = domain.logical_domain - - kernel_expr = TerminalExpr(a, domain) + # if isinstance(a, sym_Norm): + # kernel_expr = TerminalExpr(a, domain) + # if not mapping is None: + # kernel_expr = tuple(LogicalExpr(i, domain) for i in kernel_expr) + # else: + # if not mapping is None: + # a = LogicalExpr (a, domain) + # domain = domain.logical_domain + + # kernel_expr = TerminalExpr(a, domain) + if not mapping is None: + a = LogicalExpr (a, domain) + domain = domain.logical_domain + + kernel_expr = TerminalExpr(a, domain) if len(kernel_expr) > 1: return DiscreteSumForm(a, kernel_expr, *args, **kwargs)