[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[Getfem-commits] (no subject)
From: |
Yves Renard |
Subject: |
[Getfem-commits] (no subject) |
Date: |
Tue, 27 Aug 2019 14:37:03 -0400 (EDT) |
branch: master
commit d1d911be488613c19d70fc219db70123914adf36
Author: Yves Renard <address@hidden>
Date: Sat Aug 17 20:51:58 2019 +0200
Adding HHO reconstruction operators
---
src/Makefile.am | 24 +-
src/getfem/getfem_HHO.h | 86 ++
src/getfem/getfem_fem.h | 18 +-
src/getfem_HHO.cc | 1130 ++++++++++++++++++++++++++
src/getfem_fem_composite.cc | 21 +-
src/getfem_generic_assembly_interpolation.cc | 3 +-
src/getfem_mesh_fem.cc | 26 +-
7 files changed, 1279 insertions(+), 29 deletions(-)
diff --git a/src/Makefile.am b/src/Makefile.am
index c318330..a5e5fa5 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1,4 +1,4 @@
-# Copyright (C) 1999-2017 Yves Renard
+# Copyright (C) 1999-2019 Yves Renard
#
# This file is a part of GetFEM++
#
@@ -15,7 +15,7 @@
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
-nobase_include_HEADERS = \
+nobase_include_HEADERS = \
gmm/gmm.h \
gmm/gmm_arch_config.h \
gmm/gmm_matrix.h \
@@ -40,7 +40,7 @@ nobase_include_HEADERS = \
gmm/gmm_modified_gram_schmidt.h \
gmm/gmm_dense_Householder.h \
gmm/gmm_dense_lu.h \
- gmm/gmm_dense_matrix_functions.h \
+ gmm/gmm_dense_matrix_functions.h \
gmm/gmm_dense_qr.h \
gmm/gmm_dense_sylvester.h \
gmm/gmm_tri_solve.h \
@@ -73,8 +73,8 @@ nobase_include_HEADERS = \
gmm/gmm_except.h \
gmm/gmm_feedback_management.h \
gmm/gmm_MUMPS_interface.h \
- getfem/dal_config.h \
- getfem/dal_singleton.h \
+ getfem/dal_config.h \
+ getfem/dal_singleton.h \
getfem/dal_basic.h \
getfem/dal_bit_vector.h \
getfem/dal_static_stored_objects.h \
@@ -90,19 +90,19 @@ nobase_include_HEADERS = \
getfem/bgeot_poly.h \
getfem/bgeot_geometric_trans.h \
getfem/bgeot_geotrans_inv.h \
- getfem/bgeot_kdtree.h \
+ getfem/bgeot_kdtree.h \
getfem/bgeot_mesh_structure.h \
getfem/bgeot_mesh.h \
getfem/bgeot_poly_composite.h \
- getfem/bgeot_rtree.h \
- getfem/bgeot_node_tab.h \
+ getfem/bgeot_rtree.h \
+ getfem/bgeot_node_tab.h \
getfem/bgeot_small_vector.h \
getfem/bgeot_sparse_tensors.h \
getfem/bgeot_tensor.h \
- getfem/bgeot_comma_init.h \
+ getfem/bgeot_comma_init.h \
getfem/bgeot_torus.h \
getfem/bgeot_ftool.h \
- getfem/getfem_accumulated_distro.h \
+ getfem/getfem_accumulated_distro.h \
getfem/getfem_arch_config.h \
getfem/getfem_copyable_ptr.h \
getfem/getfem_integration.h \
@@ -149,7 +149,8 @@ nobase_include_HEADERS = \
getfem/getfem_models.h \
getfem/getfem_model_solvers.h \
getfem/getfem_linearized_plates.h \
- getfem/getfem_locale.h \
+ getfem/getfem_HHO.h \
+ getfem/getfem_locale.h \
getfem/getfem_contact_and_friction_common.h \
getfem/getfem_contact_and_friction_large_sliding.h \
getfem/getfem_contact_and_friction_nodal.h \
@@ -234,6 +235,7 @@ SRC = \
getfem_fourth_order.cc \
getfem_nonlinear_elasticity.cc \
getfem_linearized_plates.cc \
+ getfem_HHO.cc \
getfem_contact_and_friction_common.cc \
getfem_contact_and_friction_nodal.cc \
getfem_contact_and_friction_integral.cc \
diff --git a/src/getfem/getfem_HHO.h b/src/getfem/getfem_HHO.h
new file mode 100644
index 0000000..6eefcbd
--- /dev/null
+++ b/src/getfem/getfem_HHO.h
@@ -0,0 +1,86 @@
+// /* -*- c++ -*- (enables emacs c++ mode) */
+/*===========================================================================
+
+ Copyright (C) 2019-2019 Yves Renard
+
+ This file is a part of GetFEM++
+
+ GetFEM++ is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version along with the GCC Runtime Library
+ Exception either version 3.1 or (at your option) any later version.
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ License and GCC Runtime Library Exception for more details.
+ You should have received a copy of the GNU Lesser General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+
+ As a special exception, you may use this file as it is a part of a free
+ software library without restriction. Specifically, if other files
+ instantiate templates or use macros or inline functions from this file,
+ or you compile this file and link it with other files to produce an
+ executable, this file does not by itself cause the resulting executable
+ to be covered by the GNU Lesser General Public License. This exception
+ does not however invalidate any other reasons why the executable file
+ might be covered by the GNU Lesser General Public License.
+
+===========================================================================*/
+/**@file getfem_HHO.h
+ @author Yves Renard <address@hidden>
+ @date August 16, 2019.
+ @brief Tools for Hybrid-High-Order methods.
+*/
+
+#ifndef GETFEM_HHO_H__
+#define GETFEM_HHO_H__
+
+#include "getfem_models.h"
+
+
+namespace getfem {
+
+ /** Add the elementary transformation corresponding to the reconstruction
+ of a gradient for HHO methods to the model.
+ The name is the name given to the elementary transformation.
+ */
+ void add_HHO_reconstructed_gradient(model &md, std::string name);
+
+
+ /** Add the elementary transformation corresponding to the reconstruction
+ of a symmetrized gradient for HHO methods to the model.
+ The name is the name given to the elementary transformation.
+ */
+ void add_HHO_reconstructed_symmetrized_gradient(model &md, std::string name);
+
+ /** Add the elementary transformation to the model corresponding to the
+ reconstruction of the variable.
+ The name is the name given to the elementary transformation.
+ */
+ void add_HHO_reconstructed_value(model &md, std::string name);
+
+ /** Add the elementary transformation to the model corresponding to the
+ reconstruction of the variable using a symmetrized gradient.
+ The name is the name given to the elementary transformation.
+ */
+ void add_HHO_reconstructed_symmetrized_value(model &md, std::string name);
+
+ /** Add the elementary transformation to the model corresponding to the
+ HHO stabilization operator.
+ The name is the name given to the elementary transformation.
+ */
+ void add_HHO_stabilization(model &md, std::string name);
+
+ /** Add the elementary transformation to the model corresponding to the
+ HHO stabilization operator using a symmetrized gradient.
+ The name is the name given to the elementary transformation.
+ */
+ void add_HHO_symmetrized_stabilization(model &md, std::string name);
+
+
+} /* end of namespace getfem. */
+
+
+#endif /* GETFEM_HHO_H__ */
diff --git a/src/getfem/getfem_fem.h b/src/getfem/getfem_fem.h
index 8e71614..97317c5 100644
--- a/src/getfem/getfem_fem.h
+++ b/src/getfem/getfem_fem.h
@@ -1,7 +1,7 @@
/* -*- c++ -*- (enables emacs c++ mode) */
/*===========================================================================
- Copyright (C) 1999-2017 Yves Renard
+ Copyright (C) 1999-2019 Yves Renard
This file is a part of GetFEM++
@@ -126,6 +126,11 @@
a standard P2 Lagrange element on its triangular faces and a Q2_INCOMPLETE
Lagrange element on its quadrangular face.
+ - "HHO(fem_interior, fem_face_1, ..., fem_face_n)" : Build a hybrid method
+ with "fem_interior" on the element itself and "fem_face_1", ...,
+ "fem_face_n" on each face. If only one method is given for the faces, it
+ is duplicated on each face.
+
*/
#ifndef GETFEM_FEM_H__
@@ -619,7 +624,8 @@ namespace getfem {
@param alpha the "inset" factor for the dof nodes: with alpha =
0, the nodes are located as usual (i.e. with node on the convex border),
- and for 0 < alpha < 1, they converge to the center of gravity of the
convex.
+ and for 0 < alpha < 1, they converge to the center of gravity of the
+ convex.
@param complete a flag which requests complete Langrange polynomial
elements even if the provided pgt is an incomplete one (e.g. 8-node
@@ -717,7 +723,9 @@ namespace getfem {
pfem_precomp is computed, and added to the pool.
@param pf a pointer to the fem object.
- @param pspt a pointer to a list of points in the reference
convex.CAUTION:
+ @param pspt a pointer to a list of points in the reference convex.
+
+ CAUTION:
this array must not be destroyed as long as the fem_precomp is used!!
Moreover pspt is supposed to identify uniquely the set of
@@ -980,6 +988,8 @@ namespace getfem {
}
}
+ /* Specific function for a HHO method to obtain the method in the interior */
+ pfem interior_fem_of_hho_method(pfem hho_method);
/* Functions allowing the add of a finite element method outside
@@ -993,7 +1003,7 @@ namespace getfem {
void add_fem_name(std::string name,
dal::naming_system<virtual_fem>::pfunction f);
-
+
/* @} */
} /* end of namespace getfem. */
diff --git a/src/getfem_HHO.cc b/src/getfem_HHO.cc
new file mode 100644
index 0000000..274bd57
--- /dev/null
+++ b/src/getfem_HHO.cc
@@ -0,0 +1,1130 @@
+/*===========================================================================
+
+ Copyright (C) 2019-2019 Yves Renard
+
+ This file is a part of GetFEM++
+
+ GetFEM++ is free software; you can redistribute it and/or modify it
+ under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version along with the GCC Runtime Library
+ Exception either version 3.1 or (at your option) any later version.
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
+ License and GCC Runtime Library Exception for more details.
+ You should have received a copy of the GNU Lesser General Public License
+ along with this program; if not, write to the Free Software Foundation,
+ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+
+===========================================================================*/
+
+
+#include "getfem/getfem_HHO.h"
+
+
+namespace getfem {
+
+ THREAD_SAFE_STATIC bgeot::geotrans_precomp_pool HHO_pgp_pool;
+ THREAD_SAFE_STATIC fem_precomp_pool HHO_pfp_pool;
+
+ class _HHO_reconstructed_gradient
+ : public virtual_elementary_transformation {
+
+ public:
+
+ virtual void give_transformation(const mesh_fem &mf1, const mesh_fem &mf2,
+ size_type cv, base_matrix &M) const {
+
+ // The reconstructed Gradient "G" is described on mf2 and computed by
+ // the formula on the element T :
+ // \int_T G.w = \int_T Grad(v_T).w + \int_{dT}(v_{dT} - v_T).(w.n)
+ // where "w" is the test function arbitrary in mf2, "v_T" is the field
+ // inside the element whose gradient is to be reconstructed,
+ // "v_{dT}" is the field on the boundary of T and "n" is the outward
+ // unit normal.
+
+ // To be optimized:
+ // - The fact that (when pf2->target_dim() = 1) the
+ // problem can be solved componentwise can be more exploited in
+ // avoiding the computation of the whole matrix M2.
+ // - The vectorization can be avoided in most cases
+
+ // Obtaining the fem descriptors
+ pfem pf1 = mf1.fem_of_element(cv);
+ pfem pf2 = mf2.fem_of_element(cv);
+ pfem pfi = interior_fem_of_hho_method(pf1);
+
+ size_type degree = std::max(pf1->estimated_degree(),
+ pf2->estimated_degree());
+ bgeot::pgeometric_trans pgt = mf1.linked_mesh().trans_of_convex(cv);
+ papprox_integration pim
+ = classical_approx_im(pgt, dim_type(2*degree))->approx_method();
+
+ base_matrix G;
+ bgeot::vectors_to_base_matrix(G, mf1.linked_mesh().points_of_convex(cv));
+
+ bgeot::pgeotrans_precomp pgp
+ = HHO_pgp_pool(pgt, pim->pintegration_points());
+ pfem_precomp pfp1 = HHO_pfp_pool(pf1, pim->pintegration_points());
+ pfem_precomp pfp2 = HHO_pfp_pool(pf2, pim->pintegration_points());
+ pfem_precomp pfpi = HHO_pfp_pool(pfi, pim->pintegration_points());
+
+ fem_interpolation_context ctx1(pgp, pfp1, 0, G, cv);
+ fem_interpolation_context ctx2(pgp, pfp2, 0, G, cv);
+ fem_interpolation_context ctxi(pgp, pfpi, 0, G, cv);
+
+ size_type Q = mf1.get_qdim(), N = mf1.linked_mesh().dim();
+ base_vector un(N);
+ size_type qmult1 = Q / pf1->target_dim();
+ size_type ndof1 = pf1->nb_dof(cv) * qmult1;
+ size_type qmult2 = Q*N / pf2->target_dim();
+ size_type ndof2 = pf2->nb_dof(cv) * qmult2;
+ size_type qmulti = Q / pfi->target_dim();
+ size_type ndofi = pfi->nb_dof(cv) * qmulti;
+
+
+ base_tensor t1, t2, ti, tv1;
+ base_matrix tv2, tv1p, tvi;
+ base_matrix M1(ndof2, ndof1), M2(ndof2, ndof2);
+ base_matrix aux2(ndof2, ndof2);
+
+ // Integrals on the element : \int_T G.w (M2) and \int_T Grad(v_T).w
(M1)
+ for (size_type ipt = 0; ipt < pim->nb_points_on_convex(); ++ipt) {
+ ctx1.set_ii(ipt); ctx2.set_ii(ipt);
+ scalar_type coeff = pim->coeff(ipt) * ctx1.J();
+
+ ctx1.grad_base_value(t1);
+ vectorize_grad_base_tensor(t1, tv1, ndof1, pf1->target_dim(), Q);
+
+ ctx2.base_value(t2);
+ vectorize_base_tensor(t2, tv2, ndof2, pf2->target_dim(), Q*N);
+
+ gmm::mult(tv2, gmm::transposed(tv2), aux2);
+ gmm::add(gmm::scaled(aux2, coeff), M2);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M1(j, i) += coeff * tv1.as_vector()[i+k*ndof1] * tv2(j, k);
+ }
+
+ // Integrals on the faces : \int_{dT}(v_{dT} - v_T).(w.n) (M1)
+ for (short_type ifc = 0; ifc < pgt->structure()->nb_faces(); ++ifc) {
+ ctx1.set_face_num(ifc); ctx2.set_face_num(ifc); ctxi.set_face_num(ifc);
+ size_type first_ind = pim->ind_first_point_on_face(ifc);
+ for (size_type ipt = 0; ipt < pim->nb_points_on_face(ifc); ++ipt) {
+ ctx1.set_ii(first_ind+ipt);
+ ctx2.set_ii(first_ind+ipt);
+ ctxi.set_ii(first_ind+ipt);
+ scalar_type coeff = pim->coeff(first_ind+ipt) * ctx1.J();
+
+ ctx2.base_value(t2);
+ vectorize_base_tensor(t2, tv2, ndof2, pf2->target_dim(), Q*N);
+
+ ctx1.base_value(t1);
+ vectorize_base_tensor(t1, tv1p, ndof1, pf1->target_dim(), Q);
+
+ ctxi.base_value(ti);
+ vectorize_base_tensor(ti, tvi, ndofi, pfi->target_dim(), Q);
+
+ gmm::mult(ctx1.B(), pgt->normals()[ifc], un);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < Q; ++k1) {
+ scalar_type b(0), a = coeff *
+ (tv1p(i, k1) - (i < ndofi ? tvi(i, k1) : 0.));
+ for (size_type k2 = 0; k2 < N; ++k2)
+ b += a * tv2(j, k1 + k2*Q) * un[k2];
+ M1(j, i) += b;
+ }
+ }
+
+ }
+ if (pf2->target_dim() == 1) {
+ gmm::sub_slice I(0, ndof2, N*Q);
+ gmm::lu_inverse(gmm::sub_matrix(M2, I, I));
+ for (size_type i = 1; i < N*Q; ++i) {
+ gmm::sub_slice I2(i, ndof2, N*Q);
+ gmm::copy(gmm::sub_matrix(M2, I, I), gmm::sub_matrix(M2, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M2);
+
+ gmm::mult(M2, M1, M);
+ }
+ };
+
+ void add_HHO_reconstructed_gradient(model &md, std::string name) {
+ pelementary_transformation
+ p = std::make_shared<_HHO_reconstructed_gradient>();
+ md.add_elementary_transformation(name, p);
+ }
+
+
+ class _HHO_reconstructed_sym_gradient
+ : public virtual_elementary_transformation {
+
+ public:
+
+ virtual void give_transformation(const mesh_fem &mf1, const mesh_fem &mf2,
+ size_type cv, base_matrix &M) const {
+
+ // The reconstructed symmetric Gradient "G" is described on mf2 and
+ // computed by the formula on the element T :
+ // \int_T G:w = (1/2)*\int_T 0.5*Grad(v_T):(w+w^T)
+ // + (1/2)*\int_{dT}(v_{dT} - v_T).((w+w^T).n)
+ // where "w" is the test function arbitrary in mf2, "v_T" is the field
+ // inside the element whose gradient is to be reconstructed,
+ // "v_{dT}" is the field on the boundary of T and "n" is the outward
+ // unit normal.
+
+ // Obtaining the fem descriptors
+ pfem pf1 = mf1.fem_of_element(cv);
+ pfem pf2 = mf2.fem_of_element(cv);
+ pfem pfi = interior_fem_of_hho_method(pf1);
+
+ size_type degree = std::max(pf1->estimated_degree(),
+ pf2->estimated_degree());
+ bgeot::pgeometric_trans pgt = mf1.linked_mesh().trans_of_convex(cv);
+ papprox_integration pim
+ = classical_approx_im(pgt, dim_type(2*degree))->approx_method();
+
+ base_matrix G;
+ bgeot::vectors_to_base_matrix(G, mf1.linked_mesh().points_of_convex(cv));
+
+ bgeot::pgeotrans_precomp pgp
+ = HHO_pgp_pool(pgt, pim->pintegration_points());
+ pfem_precomp pfp1 = HHO_pfp_pool(pf1, pim->pintegration_points());
+ pfem_precomp pfp2 = HHO_pfp_pool(pf2, pim->pintegration_points());
+ pfem_precomp pfpi = HHO_pfp_pool(pfi, pim->pintegration_points());
+
+ fem_interpolation_context ctx1(pgp, pfp1, 0, G, cv);
+ fem_interpolation_context ctx2(pgp, pfp2, 0, G, cv);
+ fem_interpolation_context ctxi(pgp, pfpi, 0, G, cv);
+
+ size_type Q = mf1.get_qdim(), N = mf1.linked_mesh().dim();
+ GMM_ASSERT1(Q == N, "This transformation works only for vector fields "
+ "having the same dimension as the domain");
+ base_vector un(N);
+ size_type qmult1 = N / pf1->target_dim();
+ size_type ndof1 = pf1->nb_dof(cv) * qmult1;
+ size_type qmult2 = N*N / pf2->target_dim();
+ size_type ndof2 = pf2->nb_dof(cv) * qmult2;
+ size_type qmulti = N / pfi->target_dim();
+ size_type ndofi = pfi->nb_dof(cv) * qmulti;
+
+
+ base_tensor t1, t2, ti, tv1;
+ base_matrix tv2, tv1p, tvi;
+ base_matrix M1(ndof2, ndof1), M2(ndof2, ndof2);
+ base_matrix aux2(ndof2, ndof2);
+
+ // Integrals on the element : \int_T G:w (M2)
+ // and (1/2)*\int_T 0.5*Grad(v_T):(w+w^T)
+ for (size_type ipt = 0; ipt < pim->nb_points_on_convex(); ++ipt) {
+ ctx1.set_ii(ipt); ctx2.set_ii(ipt);
+ scalar_type coeff = pim->coeff(ipt) * ctx1.J();
+
+ ctx1.grad_base_value(t1);
+ vectorize_grad_base_tensor(t1, tv1, ndof1, pf1->target_dim(), N);
+
+ ctx2.base_value(t2);
+ vectorize_base_tensor(t2, tv2, ndof2, pf2->target_dim(), N*N);
+
+ gmm::mult(tv2, gmm::transposed(tv2), aux2);
+ gmm::add(gmm::scaled(aux2, coeff), M2);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M1(j, i) += coeff * tv1.as_vector()[i+(k1+k2*N)*ndof1]
+ * 0.5 * (tv2(j, k1+k2*N) + tv2(j, k2+k1*N));
+ }
+
+ // Integrals on the faces : (1/2)*\int_{dT}(v_{dT} - v_T).((w+w^T).n)
(M1)
+ for (short_type ifc = 0; ifc < pgt->structure()->nb_faces(); ++ifc) {
+ ctx1.set_face_num(ifc); ctx2.set_face_num(ifc); ctxi.set_face_num(ifc);
+ size_type first_ind = pim->ind_first_point_on_face(ifc);
+ for (size_type ipt = 0; ipt < pim->nb_points_on_face(ifc); ++ipt) {
+ ctx1.set_ii(first_ind+ipt);
+ ctx2.set_ii(first_ind+ipt);
+ ctxi.set_ii(first_ind+ipt);
+ scalar_type coeff = pim->coeff(first_ind+ipt) * ctx1.J();
+
+ ctx2.base_value(t2);
+ vectorize_base_tensor(t2, tv2, ndof2, pf2->target_dim(), N*N);
+
+ ctx1.base_value(t1);
+ vectorize_base_tensor(t1, tv1p, ndof1, pf1->target_dim(), N);
+
+ ctxi.base_value(ti);
+ vectorize_base_tensor(ti, tvi, ndofi, pfi->target_dim(), N);
+
+ gmm::mult(ctx1.B(), pgt->normals()[ifc], un);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1) {
+ scalar_type b(0), a = coeff *
+ (tv1p(i, k1) - (i < ndofi ? tvi(i, k1) : 0.));
+ for (size_type k2 = 0; k2 < N; ++k2)
+ b += a*0.5*(tv2(j, k1 + k2*N) + tv2(j, k2 + k1*N)) * un[k2];
+ M1(j, i) += b;
+ }
+ }
+
+ }
+ if (pf2->target_dim() == 1) {
+ gmm::sub_slice I(0, ndof2, N*Q);
+ gmm::lu_inverse(gmm::sub_matrix(M2, I, I));
+ for (size_type i = 1; i < N*Q; ++i) {
+ gmm::sub_slice I2(i, ndof2, N*Q);
+ gmm::copy(gmm::sub_matrix(M2, I, I), gmm::sub_matrix(M2, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M2);
+ gmm::mult(M2, M1, M);
+ }
+ };
+
+ void add_HHO_reconstructed_symmetrized_gradient(model &md, std::string name)
{
+ pelementary_transformation
+ p = std::make_shared<_HHO_reconstructed_sym_gradient>();
+ md.add_elementary_transformation(name, p);
+ }
+
+
+
+ class _HHO_reconstructed_value
+ : public virtual_elementary_transformation {
+
+ public:
+
+ virtual void give_transformation(const mesh_fem &mf1, const mesh_fem &mf2,
+ size_type cv, base_matrix &M) const {
+ // The reconstructed variable "D" is described on mf2 and computed by
+ // the formula on the element T :
+ // \int_T Grad(D).Grad(w) = \int_T Grad(v_T).Grad(w)
+ // + \int_{dT}(v_{dT} - v_T).(Grad(w).n)
+ // with the constraint
+ // \int_T D = \int_T v_T
+ // where "w" is the test function arbitrary in mf2, "v_T" is the field
+ // inside the element whose gradient is to be reconstructed,
+ // "v_{dT}" is the field on the boundary of T and "n" is the outward
+ // unit normal.
+
+ // Obtaining the fem descriptors
+ pfem pf1 = mf1.fem_of_element(cv);
+ pfem pf2 = mf2.fem_of_element(cv);
+ pfem pfi = interior_fem_of_hho_method(pf1);
+
+ size_type degree = std::max(pf1->estimated_degree(),
+ pf2->estimated_degree());
+ bgeot::pgeometric_trans pgt = mf1.linked_mesh().trans_of_convex(cv);
+ papprox_integration pim
+ = classical_approx_im(pgt, dim_type(2*degree))->approx_method();
+
+ base_matrix G;
+ bgeot::vectors_to_base_matrix(G, mf1.linked_mesh().points_of_convex(cv));
+
+ bgeot::pgeotrans_precomp pgp
+ = HHO_pgp_pool(pgt, pim->pintegration_points());
+ pfem_precomp pfp1 = HHO_pfp_pool(pf1, pim->pintegration_points());
+ pfem_precomp pfp2 = HHO_pfp_pool(pf2, pim->pintegration_points());
+ pfem_precomp pfpi = HHO_pfp_pool(pfi, pim->pintegration_points());
+
+ fem_interpolation_context ctx1(pgp, pfp1, 0, G, cv);
+ fem_interpolation_context ctx2(pgp, pfp2, 0, G, cv);
+ fem_interpolation_context ctxi(pgp, pfpi, 0, G, cv);
+
+ size_type Q = mf1.get_qdim(), N = mf1.linked_mesh().dim();
+ base_vector un(N);
+ size_type qmult1 = Q / pf1->target_dim();
+ size_type ndof1 = pf1->nb_dof(cv) * qmult1;
+ size_type qmult2 = Q / pf2->target_dim();
+ size_type ndof2 = pf2->nb_dof(cv) * qmult2;
+ size_type qmulti = Q / pfi->target_dim();
+ size_type ndofi = pfi->nb_dof(cv) * qmulti;
+
+
+ base_tensor t1, t2, ti, tv1, tv2, t1p, t2p;
+ base_matrix tv1p, tv2p, tvi;
+ base_matrix M1(ndof2, ndof1), M2(ndof2, ndof2);
+ base_matrix M3(Q, ndof1), M4(Q, ndof2);
+ base_matrix aux1(ndof2, ndof1), aux2(ndof2, ndof2);
+
+ // Integrals on the element : \int_T Grad(D).Grad(w) (M2)
+ // \int_T Grad(v_T).Grad(w) (M1)
+ // \int_T D (M4) and \int_T v_T (M3)
+ for (size_type ipt = 0; ipt < pim->nb_points_on_convex(); ++ipt) {
+ ctx1.set_ii(ipt); ctx2.set_ii(ipt);
+ scalar_type coeff = pim->coeff(ipt) * ctx1.J();
+
+ ctx1.grad_base_value(t1);
+ vectorize_grad_base_tensor(t1, tv1, ndof1, pf1->target_dim(), Q);
+
+ ctx1.base_value(t1p);
+ vectorize_base_tensor(t1p, tv1p, ndof1, pf1->target_dim(), Q);
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), Q);
+
+ ctx2.base_value(t2p);
+ vectorize_base_tensor(t2p, tv2p, ndof1, pf1->target_dim(), Q);
+
+ for (size_type i = 0; i < ndof2; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M2(j, i) += coeff * tv2.as_vector()[i+k*ndof2]
+ * tv2.as_vector()[j+k*ndof2];
+
+ for (size_type i = 0; i < ndof2; ++i)
+ for (size_type k = 0; k < Q; ++k)
+ M4(k, i) += coeff * tv2p(i, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M1(j, i) += coeff * tv1.as_vector()[i+k*ndof1]
+ * tv2.as_vector()[j+k*ndof2];
+
+ for (size_type i = 0; i < ndof1; ++i)
+ for (size_type k = 0; k < Q; ++k)
+ M3(k, i) += coeff * tv1p(i, k);
+
+ }
+
+ // Integrals on the faces : \int_{dT}(v_{dT} - v_T).(Grad(w).n) (M1)
+ for (short_type ifc = 0; ifc < pgt->structure()->nb_faces(); ++ifc) {
+ ctx1.set_face_num(ifc); ctx2.set_face_num(ifc); ctxi.set_face_num(ifc);
+ size_type first_ind = pim->ind_first_point_on_face(ifc);
+ for (size_type ipt = 0; ipt < pim->nb_points_on_face(ifc); ++ipt) {
+ ctx1.set_ii(first_ind+ipt);
+ ctx2.set_ii(first_ind+ipt);
+ ctxi.set_ii(first_ind+ipt);
+ scalar_type coeff = pim->coeff(first_ind+ipt) * ctx1.J();
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), Q);
+
+ ctx1.base_value(t1);
+ vectorize_base_tensor(t1, tv1p, ndof1, pf1->target_dim(), Q);
+
+ ctxi.base_value(ti);
+ vectorize_base_tensor(ti, tvi, ndofi, pfi->target_dim(), Q);
+
+ gmm::mult(ctx1.B(), pgt->normals()[ifc], un);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < Q; ++k1) {
+ scalar_type b(0), a = coeff *
+ (tv1p(i, k1) - (i < ndofi ? tvi(i, k1) : 0.));
+ for (size_type k2 = 0; k2 < N; ++k2)
+ b += a * tv2.as_vector()[j+(k1 + k2*Q)*ndof2] * un[k2];
+ M1(j, i) += b;
+ }
+ }
+
+ }
+
+ // Add the constraint with penalization
+ gmm::mult(gmm::transposed(M4), M4, aux2);
+ gmm::add (aux2, M2);
+ gmm::mult(gmm::transposed(M4), M3, aux1);
+ gmm::add (aux1, M1);
+
+ if (pf2->target_dim() == 1 && Q > 1) {
+ gmm::sub_slice I(0, ndof2, Q);
+ gmm::lu_inverse(gmm::sub_matrix(M2, I, I));
+ for (size_type i = 1; i < Q; ++i) {
+ gmm::sub_slice I2(i, ndof2, Q);
+ gmm::copy(gmm::sub_matrix(M2, I, I), gmm::sub_matrix(M2, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M2);
+ gmm::mult(M2, M1, M);
+ }
+ };
+
+ void add_HHO_reconstructed_value(model &md, std::string name) {
+ pelementary_transformation
+ p = std::make_shared<_HHO_reconstructed_value>();
+ md.add_elementary_transformation(name, p);
+ }
+
+
+ class _HHO_reconstructed_sym_value
+ : public virtual_elementary_transformation {
+
+ public:
+
+ virtual void give_transformation(const mesh_fem &mf1, const mesh_fem &mf2,
+ size_type cv, base_matrix &M) const {
+ // The reconstructed variable "D" is described on mf2 and computed by
+ // the formula on the element T :
+ // \int_T Sym(Grad(D)).Grad(w) = \int_T Sym(Grad(v_T)).Grad(w)
+ // + \int_{dT}(v_{dT} - v_T).(Sym(Grad(w)).n)
+ // with the constraints
+ // \int_T D = \int_T v_T
+ // \int_T Skew(Grad(D)) = 0.5\int_{dT}(n x v_{dT} - v_{dT} x n)
+ // where "w" is the test function arbitrary in mf2, "v_T" is the field
+ // inside the element whose gradient is to be reconstructed,
+ // "v_{dT}" is the field on the boundary of T and "n" is the outward
+ // unit normal.
+
+ // Obtaining the fem descriptors
+ pfem pf1 = mf1.fem_of_element(cv);
+ pfem pf2 = mf2.fem_of_element(cv);
+ pfem pfi = interior_fem_of_hho_method(pf1);
+
+ size_type degree = std::max(pf1->estimated_degree(),
+ pf2->estimated_degree());
+ bgeot::pgeometric_trans pgt = mf1.linked_mesh().trans_of_convex(cv);
+ papprox_integration pim
+ = classical_approx_im(pgt, dim_type(2*degree))->approx_method();
+
+ base_matrix G;
+ bgeot::vectors_to_base_matrix(G, mf1.linked_mesh().points_of_convex(cv));
+
+ bgeot::pgeotrans_precomp pgp
+ = HHO_pgp_pool(pgt, pim->pintegration_points());
+ pfem_precomp pfp1 = HHO_pfp_pool(pf1, pim->pintegration_points());
+ pfem_precomp pfp2 = HHO_pfp_pool(pf2, pim->pintegration_points());
+ pfem_precomp pfpi = HHO_pfp_pool(pfi, pim->pintegration_points());
+
+ fem_interpolation_context ctx1(pgp, pfp1, 0, G, cv);
+ fem_interpolation_context ctx2(pgp, pfp2, 0, G, cv);
+ fem_interpolation_context ctxi(pgp, pfpi, 0, G, cv);
+
+ size_type Q = mf1.get_qdim(), N = mf1.linked_mesh().dim();
+ GMM_ASSERT1(Q == N, "This transformation works only for vector fields "
+ "having the same dimension as the domain");
+ base_vector un(N);
+ size_type qmult1 = N / pf1->target_dim();
+ size_type ndof1 = pf1->nb_dof(cv) * qmult1;
+ size_type qmult2 = N / pf2->target_dim();
+ size_type ndof2 = pf2->nb_dof(cv) * qmult2;
+ size_type qmulti = N / pfi->target_dim();
+ size_type ndofi = pfi->nb_dof(cv) * qmulti;
+
+
+ base_tensor t1, t2, ti, tv1, tv2, t1p, t2p;
+ base_matrix tv1p, tv2p, tvi;
+ base_matrix M1(ndof2, ndof1), M2(ndof2, ndof2);
+ base_matrix M3(N, ndof1), M4(N, ndof2);
+ base_matrix M5(N*N, ndof1), M6(N*N, ndof2);
+ base_matrix aux1(ndof2, ndof1), aux2(ndof2, ndof2);
+
+ // Integrals on the element : \int_T Sym(Grad(D)).Grad(w) (M2)
+ // \int_T Sym(Grad(v_T)).Grad(w) (M1)
+ // \int_T D (M4) and \int_T v_T (M3)
+ // \int_T Skew(Grad(D)) (M6)
+ for (size_type ipt = 0; ipt < pim->nb_points_on_convex(); ++ipt) {
+ ctx1.set_ii(ipt); ctx2.set_ii(ipt);
+ scalar_type coeff = pim->coeff(ipt) * ctx1.J();
+
+ ctx1.grad_base_value(t1);
+ vectorize_grad_base_tensor(t1, tv1, ndof1, pf1->target_dim(), N);
+
+ ctx1.base_value(t1p);
+ vectorize_base_tensor(t1p, tv1p, ndof1, pf1->target_dim(), N);
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), N);
+
+ ctx2.base_value(t2p);
+ vectorize_base_tensor(t2p, tv2p, ndof1, pf1->target_dim(), N);
+
+ for (size_type i = 0; i < ndof2; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M2(j, i) += coeff * tv2.as_vector()[i+(k1+k2*N)*ndof2]
+ * 0.5 * (tv2.as_vector()[j+(k1+k2*N)*ndof2] +
+ tv2.as_vector()[j+(k2+k1*N)*ndof2]);
+
+ for (size_type i = 0; i < ndof2; ++i)
+ for (size_type k = 0; k < N; ++k)
+ M4(k, i) += coeff * tv2p(i, k);
+
+ for (size_type i = 0; i < ndof2; ++i)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M6(k1+k2*N, i) += 0.5*coeff*(tv2.as_vector()[i+(k1+k2*N)*ndof2] -
+ tv2.as_vector()[i+(k2+k1*N)*ndof2]);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M1(j, i) += coeff * tv1.as_vector()[i+(k1+k2*N)*ndof1]
+ * 0.5 * (tv2.as_vector()[j+(k1+k2*N)*ndof2] +
+ tv2.as_vector()[j+(k2+k1*N)*ndof2]);
+
+ for (size_type i = 0; i < ndof1; ++i)
+ for (size_type k = 0; k < N; ++k)
+ M3(k, i) += coeff * tv1p(i, k);
+
+ }
+
+ // Integrals on the faces : \int_{dT}(v_{dT} - v_T).(Sym(Grad(w)).n) (M1)
+ // \int_{dT} Skew(n x v_{dT} - v_{dT} x n) (M5)
+ for (short_type ifc = 0; ifc < pgt->structure()->nb_faces(); ++ifc) {
+ ctx1.set_face_num(ifc); ctx2.set_face_num(ifc); ctxi.set_face_num(ifc);
+ size_type first_ind = pim->ind_first_point_on_face(ifc);
+ for (size_type ipt = 0; ipt < pim->nb_points_on_face(ifc); ++ipt) {
+ ctx1.set_ii(first_ind+ipt);
+ ctx2.set_ii(first_ind+ipt);
+ ctxi.set_ii(first_ind+ipt);
+ scalar_type coeff = pim->coeff(first_ind+ipt) * ctx1.J();
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), N);
+
+ ctx1.base_value(t1);
+ vectorize_base_tensor(t1, tv1p, ndof1, pf1->target_dim(), N);
+
+ ctxi.base_value(ti);
+ vectorize_base_tensor(ti, tvi, ndofi, pfi->target_dim(), N);
+
+ gmm::mult(ctx1.B(), pgt->normals()[ifc], un);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1) {
+ scalar_type b(0), a = coeff *
+ (tv1p(i, k1) - (i < ndofi ? tvi(i, k1) : 0.));
+ for (size_type k2 = 0; k2 < N; ++k2)
+ b += a * 0.5 * (tv2.as_vector()[j+(k1 + k2*N)*ndof2] +
+ tv2.as_vector()[j+(k2 + k1*N)*ndof2])*
un[k2];
+ M1(j, i) += b;
+ }
+
+ for (size_type i = 0; i < ndof1; ++i)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M5(k1+k2*N, i) += 0.5 * coeff * (tv1p(i, k2) * un[k1] -
+ tv1p(i, k1) * un[k2]);
+ }
+ }
+
+ // Add the constraint with penalization
+ gmm::mult(gmm::transposed(M4), M4, aux2);
+ gmm::add (aux2, M2);
+ gmm::mult(gmm::transposed(M6), M6, aux2);
+ gmm::add (aux2, M2);
+ gmm::mult(gmm::transposed(M4), M3, aux1);
+ gmm::add (aux1, M1);
+ gmm::mult(gmm::transposed(M6), M5, aux1);
+ gmm::add (aux1, M1);
+
+ if (pf2->target_dim() == 1 && Q > 1) {
+ gmm::sub_slice I(0, ndof2, Q);
+ gmm::lu_inverse(gmm::sub_matrix(M2, I, I));
+ for (size_type i = 1; i < Q; ++i) {
+ gmm::sub_slice I2(i, ndof2, Q);
+ gmm::copy(gmm::sub_matrix(M2, I, I), gmm::sub_matrix(M2, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M2);
+ gmm::mult(M2, M1, M);
+ }
+ };
+
+ void add_HHO_reconstructed_symmetrized_value(model &md, std::string name) {
+ pelementary_transformation
+ p = std::make_shared<_HHO_reconstructed_sym_value>();
+ md.add_elementary_transformation(name, p);
+ }
+
+
+ class _HHO_stabilization
+ : public virtual_elementary_transformation {
+
+ public:
+
+ virtual void give_transformation(const mesh_fem &mf1, const mesh_fem &mf2,
+ size_type cv, base_matrix &M) const {
+ // The reconstructed variable "S" is described on mf2 and computed by
+ // S(v) = P_{\dT}(v_{dT} - D(v) - P_T(v_T - D(v)))
+ // where P__{\dT} et P_T are L2 projections on the boundary and on the
+ // interior of T on the corresponding discrete spaces.
+ // Note that P_{\dT}(v_{dT}) = v_{dT} and P_T(v_T) = v_T and D is
+ // the reconstructed value on P^{k+1} given by the formula:
+ // \int_T Grad(D).Grad(w) = \int_T Grad(v_T).Grad(w)
+ // + \int_{dT}(v_{dT} - v_T).(Grad(w).n)
+ // with the constraint
+ // \int_T D = \int_T v_T
+ // where "w" is the test function arbitrary in mf2, "v_T" is the field
+ // inside the element whose gradient is to be reconstructed,
+ // "v_{dT}" is the field on the boundary of T and "n" is the outward
+ // unit normal.
+ // The implemented formula is
+ // S(v) = v_{dT} - P_{\dT}D(v) - P_{\dT}(v_T) + P_{\dT}(P_T(D(v)))
+ // by the mean of the projection matrix from P^{k+1} to the original
space
+ // and the projection matrix from interior space to the boundary space.
+ // As it is built, S(v) is zero on interior dofs.
+
+ GMM_ASSERT1(&mf1 == &mf2, "The HHO stabilization transformation is "
+ "only defined on the HHO space to itself");
+
+ // Obtaining the fem descriptors
+ pfem pf1 = mf1.fem_of_element(cv);
+ short_type degree = pf1->estimated_degree();
+ bgeot::pgeometric_trans pgt = mf1.linked_mesh().trans_of_convex(cv);
+ pfem pf2 = classical_fem(pgt, short_type(degree + 1)); // Should be
changed to an
+ // interior PK method
+ pfem pfi = interior_fem_of_hho_method(pf1);
+
+ papprox_integration pim
+ = classical_approx_im(pgt, dim_type(2*degree+2))->approx_method();
+
+ base_matrix G;
+ bgeot::vectors_to_base_matrix(G, mf1.linked_mesh().points_of_convex(cv));
+
+ bgeot::pgeotrans_precomp pgp
+ = HHO_pgp_pool(pgt, pim->pintegration_points());
+ pfem_precomp pfp1 = HHO_pfp_pool(pf1, pim->pintegration_points());
+ pfem_precomp pfp2 = HHO_pfp_pool(pf2, pim->pintegration_points());
+ pfem_precomp pfpi = HHO_pfp_pool(pfi, pim->pintegration_points());
+
+ fem_interpolation_context ctx1(pgp, pfp1, 0, G, cv);
+ fem_interpolation_context ctx2(pgp, pfp2, 0, G, cv);
+ fem_interpolation_context ctxi(pgp, pfpi, 0, G, cv);
+
+ size_type Q = mf1.get_qdim(), N = mf1.linked_mesh().dim();
+ base_vector un(N);
+ size_type qmult1 = Q / pf1->target_dim();
+ size_type ndof1 = pf1->nb_dof(cv) * qmult1;
+ size_type qmult2 = Q / pf2->target_dim();
+ size_type ndof2 = pf2->nb_dof(cv) * qmult2;
+ size_type qmulti = Q / pfi->target_dim();
+ size_type ndofi = pfi->nb_dof(cv) * qmulti;
+
+
+ base_tensor t1, t2, ti, tv1, tv2, t1p, t2p;
+ base_matrix tv1p, tv2p, tvi;
+ base_matrix M1(ndof2, ndof1), M2(ndof2, ndof2);
+ base_matrix M3(Q, ndof1), M4(Q, ndof2);
+ base_matrix aux1(ndof2, ndof1), aux2(ndof2, ndof2);
+ base_matrix M7(ndof1, ndof1), M8(ndof1, ndof2);
+ base_matrix M9(ndof1, ndof1), MD(ndof2, ndof1);
+
+ // Integrals on the element : \int_T Grad(D).Grad(w) (M2)
+ // \int_T Grad(v_T).Grad(w) (M1)
+ // \int_T D (M4) and \int_T v_T (M3)
+ for (size_type ipt = 0; ipt < pim->nb_points_on_convex(); ++ipt) {
+ ctx1.set_ii(ipt); ctx2.set_ii(ipt);
+ scalar_type coeff = pim->coeff(ipt) * ctx1.J();
+
+ ctx1.grad_base_value(t1);
+ vectorize_grad_base_tensor(t1, tv1, ndof1, pf1->target_dim(), Q);
+
+ ctx1.base_value(t1p);
+ vectorize_base_tensor(t1p, tv1p, ndof1, pf1->target_dim(), Q);
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), Q);
+
+ ctx2.base_value(t2p);
+ vectorize_base_tensor(t2p, tv2p, ndof1, pf1->target_dim(), Q);
+
+ for (size_type i = 0; i < ndof2; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M2(j, i) += coeff * tv2.as_vector()[i+k*ndof2]
+ * tv2.as_vector()[j+k*ndof2];
+
+ for (size_type i = 0; i < ndof2; ++i)
+ for (size_type k = 0; k < Q; ++k)
+ M4(k, i) += coeff * tv2p(i, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M1(j, i) += coeff * tv1.as_vector()[i+k*ndof1]
+ * tv2.as_vector()[j+k*ndof2];
+
+ for (size_type i = 0; i < ndof1; ++i)
+ for (size_type k = 0; k < Q; ++k)
+ M3(k, i) += coeff * tv1p(i, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof1; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M7(i, j) += coeff * tv1p(i,k) * tv1p(j, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M8(i, j) += coeff * tv1p(i,k) * tv2p(j, k);
+
+ }
+
+ // Integrals on the faces : \int_{dT}(v_{dT} - v_T).(Grad(w).n) (M1)
+ for (short_type ifc = 0; ifc < pgt->structure()->nb_faces(); ++ifc) {
+ ctx1.set_face_num(ifc); ctx2.set_face_num(ifc); ctxi.set_face_num(ifc);
+ size_type first_ind = pim->ind_first_point_on_face(ifc);
+ for (size_type ipt = 0; ipt < pim->nb_points_on_face(ifc); ++ipt) {
+ ctx1.set_ii(first_ind+ipt);
+ ctx2.set_ii(first_ind+ipt);
+ ctxi.set_ii(first_ind+ipt);
+ scalar_type coeff = pim->coeff(first_ind+ipt) * ctx1.J();
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), Q);
+
+ ctx2.base_value(t2p);
+ vectorize_base_tensor(t2p, tv2p, ndof2, pf2->target_dim(), Q);
+
+ ctx1.base_value(t1);
+ vectorize_base_tensor(t1, tv1p, ndof1, pf1->target_dim(), Q);
+
+ ctxi.base_value(ti);
+ vectorize_base_tensor(ti, tvi, ndofi, pfi->target_dim(), Q);
+
+ gmm::mult(ctx1.B(), pgt->normals()[ifc], un);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < Q; ++k1) {
+ scalar_type b(0), a = coeff *
+ (tv1p(i, k1) - (i < ndofi ? tvi(i, k1) : 0.));
+ for (size_type k2 = 0; k2 < N; ++k2)
+ b += a * tv2.as_vector()[j+(k1 + k2*Q)*ndof2] * un[k2];
+ M1(j, i) += b;
+ }
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof1; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M7(i, j) += coeff * tv1p(i,k) * tv1p(j, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M8(i, j) += coeff * tv1p(i,k) * tv2p(j, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndofi; ++j)
+ for (size_type k = 0; k < Q*N; ++k)
+ M9(i, j) += coeff * tv1p(i,k) * tvi(j, k);
+
+ }
+
+ }
+
+ // Add the constraint with penalization
+ gmm::mult(gmm::transposed(M4), M4, aux2);
+ gmm::add (aux2, M2);
+ gmm::mult(gmm::transposed(M4), M3, aux1);
+ gmm::add (aux1, M1);
+
+ if (pf2->target_dim() == 1 && Q > 1) {
+ gmm::sub_slice I(0, ndof2, Q);
+ gmm::lu_inverse(gmm::sub_matrix(M2, I, I));
+ for (size_type i = 1; i < Q; ++i) {
+ gmm::sub_slice I2(i, ndof2, Q);
+ gmm::copy(gmm::sub_matrix(M2, I, I), gmm::sub_matrix(M2, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M2);
+
+ if (pf1->target_dim() == 1 && Q > 1) {
+ gmm::sub_slice I(0, ndof1, Q);
+ gmm::lu_inverse(gmm::sub_matrix(M7, I, I));
+ for (size_type i = 1; i < Q; ++i) {
+ gmm::sub_slice I2(i, ndof1, Q);
+ gmm::copy(gmm::sub_matrix(M7, I, I), gmm::sub_matrix(M7, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M7);
+
+ gmm::mult(M2, M1, MD);
+
+ // S = (I - inv(M7)*M9)(I - inv(M7)*M8*MD)
+ base_matrix MPB(ndof1, ndof1);
+ gmm::mult(M7, M9, MPB);
+ gmm::copy(gmm::identity_matrix(), M9);
+ gmm::add(gmm::scaled(MPB, scalar_type(-1)), M9);
+
+ base_matrix MPC(ndof1, ndof1), MPD(ndof1, ndof1);
+ gmm::mult(M8, MD, MPC);
+ gmm::mult(M7, MPC, MPD);
+ gmm::copy(gmm::identity_matrix(), M7);
+ gmm::add(gmm::scaled(MPD, scalar_type(-1)), M7);
+
+ gmm::mult(M9, M7, M);
+ }
+ };
+
+ void add_HHO_stabilization(model &md, std::string name) {
+ pelementary_transformation
+ p = std::make_shared<_HHO_stabilization>();
+ md.add_elementary_transformation(name, p);
+ }
+
+
+ class _HHO_symmetrized_stabilization
+ : public virtual_elementary_transformation {
+
+ public:
+
+ virtual void give_transformation(const mesh_fem &mf1, const mesh_fem &mf2,
+ size_type cv, base_matrix &M) const {
+ // The reconstructed variable "S" is described on mf2 and computed by
+ // S(v) = P_{\dT}(v_{dT} - D(v) - P_T(v_T - D(v)))
+ // where P__{\dT} et P_T are L2 projections on the boundary and on the
+ // interior of T on the corresponding discrete spaces.
+ // Note that P_{\dT}(v_{dT}) = v_{dT} and P_T(v_T) = v_T and D is
+ // the reconstructed value on P^{k+1} given by the formula:
+ // \int_T Sym(Grad(D)).Grad(w) = \int_T Sym(Grad(v_T)).Grad(w)
+ // + \int_{dT}(v_{dT} - v_T).(Sym(Grad(w)).n)
+ // with the constraints
+ // \int_T D = \int_T v_T
+ // \int_T Skew(Grad(D)) = 0.5\int_{dT}(n x v_{dT} - v_{dT} x n)
+ // where "w" is the test function arbitrary in mf2, "v_T" is the field
+ // inside the element whose gradient is to be reconstructed,
+ // "v_{dT}" is the field on the boundary of T and "n" is the outward
+ // unit normal.
+ // The implemented formula is
+ // S(v) = v_{dT} - P_{\dT}D(v) - P_{\dT}(v_T) + P_{\dT}(P_T(D(v)))
+ // by the mean of the projection matrix from P^{k+1} to the original
space
+ // and the projection matrix from interior space to the boundary space.
+ // As it is built, S(v) is zero on interior dofs.
+
+ GMM_ASSERT1(&mf1 == &mf2, "The HHO stabilization transformation is "
+ "only defined on the HHO space to itself");
+
+ // Obtaining the fem descriptors
+ pfem pf1 = mf1.fem_of_element(cv);
+ short_type degree = pf1->estimated_degree();
+ bgeot::pgeometric_trans pgt = mf1.linked_mesh().trans_of_convex(cv);
+ pfem pf2 = classical_fem(pgt, short_type(degree + 1)); // Should be
changed to an
+ // interior PK method
+ pfem pfi = interior_fem_of_hho_method(pf1);
+
+ papprox_integration pim
+ = classical_approx_im(pgt, dim_type(2*degree+2))->approx_method();
+
+ base_matrix G;
+ bgeot::vectors_to_base_matrix(G, mf1.linked_mesh().points_of_convex(cv));
+
+ bgeot::pgeotrans_precomp pgp
+ = HHO_pgp_pool(pgt, pim->pintegration_points());
+ pfem_precomp pfp1 = HHO_pfp_pool(pf1, pim->pintegration_points());
+ pfem_precomp pfp2 = HHO_pfp_pool(pf2, pim->pintegration_points());
+ pfem_precomp pfpi = HHO_pfp_pool(pfi, pim->pintegration_points());
+
+ fem_interpolation_context ctx1(pgp, pfp1, 0, G, cv);
+ fem_interpolation_context ctx2(pgp, pfp2, 0, G, cv);
+ fem_interpolation_context ctxi(pgp, pfpi, 0, G, cv);
+
+ size_type Q = mf1.get_qdim(), N = mf1.linked_mesh().dim();
+ GMM_ASSERT1(Q == N, "This transformation works only for vector fields "
+ "having the same dimension as the domain");
+ base_vector un(N);
+ size_type qmult1 = N / pf1->target_dim();
+ size_type ndof1 = pf1->nb_dof(cv) * qmult1;
+ size_type qmult2 = N / pf2->target_dim();
+ size_type ndof2 = pf2->nb_dof(cv) * qmult2;
+ size_type qmulti = N / pfi->target_dim();
+ size_type ndofi = pfi->nb_dof(cv) * qmulti;
+
+
+ base_tensor t1, t2, ti, tv1, tv2, t1p, t2p;
+ base_matrix tv1p, tv2p, tvi;
+ base_matrix M1(ndof2, ndof1), M2(ndof2, ndof2);
+ base_matrix M3(N, ndof1), M4(N, ndof2);
+ base_matrix aux1(ndof2, ndof1), aux2(ndof2, ndof2);
+ base_matrix M5(N*N, ndof1), M6(N*N, ndof2);
+ base_matrix M7(ndof1, ndof1), M8(ndof1, ndof2);
+ base_matrix M9(ndof1, ndof1), MD(ndof2, ndof1);
+
+ // Integrals on the element : \int_T Sym(Grad(D)).Grad(w) (M2)
+ // \int_T Sym(Grad(v_T)).Grad(w) (M1)
+ // \int_T D (M4) and \int_T v_T (M3)
+ // \int_T Skew(Grad(D)) (M6)
+ for (size_type ipt = 0; ipt < pim->nb_points_on_convex(); ++ipt) {
+ ctx1.set_ii(ipt); ctx2.set_ii(ipt);
+ scalar_type coeff = pim->coeff(ipt) * ctx1.J();
+
+ ctx1.grad_base_value(t1);
+ vectorize_grad_base_tensor(t1, tv1, ndof1, pf1->target_dim(), N);
+
+ ctx1.base_value(t1p);
+ vectorize_base_tensor(t1p, tv1p, ndof1, pf1->target_dim(), N);
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), N);
+
+ ctx2.base_value(t2p);
+ vectorize_base_tensor(t2p, tv2p, ndof1, pf1->target_dim(), N);
+
+ for (size_type i = 0; i < ndof2; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M2(j, i) += coeff * tv2.as_vector()[i+(k1+k2*N)*ndof2]
+ * 0.5 * (tv2.as_vector()[j+(k1+k2*N)*ndof2] +
+ tv2.as_vector()[j+(k2+k1*N)*ndof2]);
+
+ for (size_type i = 0; i < ndof2; ++i)
+ for (size_type k = 0; k < N; ++k)
+ M4(k, i) += coeff * tv2p(i, k);
+
+ for (size_type i = 0; i < ndof2; ++i)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M6(k1+k2*N, i) += 0.5*coeff*(tv2.as_vector()[i+(k1+k2*N)*ndof2] -
+ tv2.as_vector()[i+(k2+k1*N)*ndof2]);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M1(j, i) += coeff * tv1.as_vector()[i+(k1+k2*N)*ndof1]
+ * 0.5 * (tv2.as_vector()[j+(k1+k2*N)*ndof2] +
+ tv2.as_vector()[j+(k2+k1*N)*ndof2]);
+
+ for (size_type i = 0; i < ndof1; ++i)
+ for (size_type k = 0; k < N; ++k)
+ M3(k, i) += coeff * tv1p(i, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof1; ++j)
+ for (size_type k = 0; k < N*N; ++k)
+ M7(i, j) += coeff * tv1p(i,k) * tv1p(j, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < N*N; ++k)
+ M8(i, j) += coeff * tv1p(i,k) * tv2p(j, k);
+
+ }
+
+ // Integrals on the faces : \int_{dT}(v_{dT} - v_T).(Grad(w).n) (M1)
+ // \int_{dT} Skew(n x v_{dT} - v_{dT} x n) (M5)
+ for (short_type ifc = 0; ifc < pgt->structure()->nb_faces(); ++ifc) {
+ ctx1.set_face_num(ifc); ctx2.set_face_num(ifc); ctxi.set_face_num(ifc);
+ size_type first_ind = pim->ind_first_point_on_face(ifc);
+ for (size_type ipt = 0; ipt < pim->nb_points_on_face(ifc); ++ipt) {
+ ctx1.set_ii(first_ind+ipt);
+ ctx2.set_ii(first_ind+ipt);
+ ctxi.set_ii(first_ind+ipt);
+ scalar_type coeff = pim->coeff(first_ind+ipt) * ctx1.J();
+
+ ctx2.grad_base_value(t2);
+ vectorize_grad_base_tensor(t2, tv2, ndof2, pf2->target_dim(), N);
+
+ ctx2.base_value(t2p);
+ vectorize_base_tensor(t2p, tv2p, ndof2, pf2->target_dim(), N);
+
+ ctx1.base_value(t1);
+ vectorize_base_tensor(t1, tv1p, ndof1, pf1->target_dim(), N);
+
+ ctxi.base_value(ti);
+ vectorize_base_tensor(ti, tvi, ndofi, pfi->target_dim(), N);
+
+ gmm::mult(ctx1.B(), pgt->normals()[ifc], un);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k1 = 0; k1 < N; ++k1) {
+ scalar_type b(0), a = coeff *
+ (tv1p(i, k1) - (i < ndofi ? tvi(i, k1) : 0.));
+ for (size_type k2 = 0; k2 < N; ++k2)
+ b += a * 0.5 * (tv2.as_vector()[j+(k1 + k2*N)*ndof2] +
+ tv2.as_vector()[j+(k2 + k1*N)*ndof2])*
un[k2];
+ M1(j, i) += b;
+ }
+
+ for (size_type i = 0; i < ndof1; ++i)
+ for (size_type k1 = 0; k1 < N; ++k1)
+ for (size_type k2 = 0; k2 < N; ++k2)
+ M5(k1+k2*N, i) += 0.5 * coeff * (tv1p(i, k2) * un[k1] -
+ tv1p(i, k1) * un[k2]);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof1; ++j)
+ for (size_type k = 0; k < N*N; ++k)
+ M7(i, j) += coeff * tv1p(i,k) * tv1p(j, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndof2; ++j)
+ for (size_type k = 0; k < N*N; ++k)
+ M8(i, j) += coeff * tv1p(i,k) * tv2p(j, k);
+
+ for (size_type i = 0; i < ndof1; ++i) // To be optimized
+ for (size_type j = 0; j < ndofi; ++j)
+ for (size_type k = 0; k < N*N; ++k)
+ M9(i, j) += coeff * tv1p(i,k) * tvi(j, k);
+
+ }
+
+ }
+
+ // Add the constraint with penalization
+ gmm::mult(gmm::transposed(M4), M4, aux2);
+ gmm::add (aux2, M2);
+ gmm::mult(gmm::transposed(M6), M6, aux2);
+ gmm::add (aux2, M2);
+ gmm::mult(gmm::transposed(M4), M3, aux1);
+ gmm::add (aux1, M1);
+ gmm::mult(gmm::transposed(M6), M5, aux1);
+ gmm::add (aux1, M1);
+
+ if (pf2->target_dim() == 1 && Q > 1) {
+ gmm::sub_slice I(0, ndof2, Q);
+ gmm::lu_inverse(gmm::sub_matrix(M2, I, I));
+ for (size_type i = 1; i < Q; ++i) {
+ gmm::sub_slice I2(i, ndof2, Q);
+ gmm::copy(gmm::sub_matrix(M2, I, I), gmm::sub_matrix(M2, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M2);
+
+ if (pf1->target_dim() == 1 && Q > 1) {
+ gmm::sub_slice I(0, ndof1, Q);
+ gmm::lu_inverse(gmm::sub_matrix(M7, I, I));
+ for (size_type i = 1; i < Q; ++i) {
+ gmm::sub_slice I2(i, ndof1, Q);
+ gmm::copy(gmm::sub_matrix(M7, I, I), gmm::sub_matrix(M7, I2, I2));
+ }
+ } else
+ gmm::lu_inverse(M7);
+
+ gmm::mult(M2, M1, MD);
+
+ // S = (I - inv(M7)*M9)(I - inv(M7)*M8*MD)
+ base_matrix MPB(ndof1, ndof1);
+ gmm::mult(M7, M9, MPB);
+ gmm::copy(gmm::identity_matrix(), M9);
+ gmm::add(gmm::scaled(MPB, scalar_type(-1)), M9);
+
+ base_matrix MPC(ndof1, ndof1), MPD(ndof1, ndof1);
+ gmm::mult(M8, MD, MPC);
+ gmm::mult(M7, MPC, MPD);
+ gmm::copy(gmm::identity_matrix(), M7);
+ gmm::add(gmm::scaled(MPD, scalar_type(-1)), M7);
+
+ gmm::mult(M9, M7, M);
+ }
+ };
+
+ void add_HHO_symmetrized_stabilization(model &md, std::string name) {
+ pelementary_transformation
+ p = std::make_shared<_HHO_symmetrized_stabilization>();
+ md.add_elementary_transformation(name, p);
+ }
+
+
+
+
+
+} /* end of namespace getfem. */
+
diff --git a/src/getfem_fem_composite.cc b/src/getfem_fem_composite.cc
index 628b33f..74d8187 100644
--- a/src/getfem_fem_composite.cc
+++ b/src/getfem_fem_composite.cc
@@ -802,7 +802,12 @@ namespace getfem {
/* HHO methods: First method for the interior of the elements and */
/* a method for each face (or a single method for all faces) */
/* ******************************************************************** */
-
+ /* It is guaranted (and used) that the sub-element of index 0 is the */
+ /* element itself and the faces follows in their usual order. */
+ /* It has also to be guaranted that the internal degrees of freedom are */
+ /* first. This is ensred by the dof enumeration of mesh_fem object */
+ /* since the interior element has the index 0. */
+
pfem hho_method(fem_param_list ¶ms,
std::vector<dal::pstatic_stored_object> &dependencies) {
GMM_ASSERT1(params.size() >= 2, "Bad number of parameters : "
@@ -858,7 +863,21 @@ namespace getfem {
return p;
}
+ pfem interior_fem_of_hho_method(pfem hho_method) {
+
+ const polynomial_composite_fem *phho
+ = dynamic_cast<const polynomial_composite_fem*>(hho_method.get());
+ if (phho) {
+ pfem pf0 = phho->mf.fem_of_element(0);
+ pfem pf1 = phho->mf.fem_of_element(1);
+ if (pf1 && (pf1->dim()+1 == pf0->dim()))
+ return phho->mf.fem_of_element(0);
+ }
+
+ GMM_WARNING2("probably not a HHO method");
+ return hho_method;
+ }
diff --git a/src/getfem_generic_assembly_interpolation.cc
b/src/getfem_generic_assembly_interpolation.cc
index 9bdae33..1e00d24 100644
--- a/src/getfem_generic_assembly_interpolation.cc
+++ b/src/getfem_generic_assembly_interpolation.cc
@@ -410,7 +410,8 @@ namespace getfem {
virtual const mesh &linked_mesh() { return sl.linked_mesh(); }
- ga_interpolation_context_mesh_slice(const stored_mesh_slice &sl_,
base_vector &r)
+ ga_interpolation_context_mesh_slice(const stored_mesh_slice &sl_,
+ base_vector &r)
: result(r), sl(sl_), initialized(false) { }
};
diff --git a/src/getfem_mesh_fem.cc b/src/getfem_mesh_fem.cc
index a86cc98..cdfd355 100644
--- a/src/getfem_mesh_fem.cc
+++ b/src/getfem_mesh_fem.cc
@@ -882,31 +882,33 @@ namespace getfem {
"tensorised fem is not supported");
gmm::resize(vt, ndof, N);
ndof = (ndof*qdim)/N;
- if (qdim == 1) {
+
+ if (qdim == N) {
+ gmm::copy(t.as_vector(), vt.as_vector());
+ } else if (qdim == 1) {
gmm::clear(vt);
base_tensor::const_iterator it = t.begin();
for (size_type i = 0; i < ndof; ++i, ++it)
for (size_type j = 0; j < N; ++j) vt(i*N+j, j) = *it;
- } else if (qdim == N) {
- gmm::copy(t.as_vector(), vt.as_vector());
}
}
void vectorize_grad_base_tensor(const base_tensor &t, base_tensor &vt,
- size_type ndof, size_type qdim,
- size_type N) {
- GMM_ASSERT1(qdim == N || qdim == 1, "mixed intrinsic vector and "
+ size_type ndof, size_type qdim, size_type Q)
{
+ size_type N = t.sizes()[2];
+ GMM_ASSERT1(qdim == Q || qdim == 1, "mixed intrinsic vector and "
"tensorised fem is not supported");
- vt.adjust_sizes(bgeot::multi_index(ndof, N, N));
- ndof = (ndof*qdim)/N;
- if (qdim == 1) {
+ vt.adjust_sizes(bgeot::multi_index(ndof, Q, N));
+ ndof = (ndof*qdim)/Q;
+
+ if (qdim == Q) {
+ gmm::copy(t.as_vector(), vt.as_vector());
+ } else if (qdim == 1) {
gmm::clear(vt.as_vector());
base_tensor::const_iterator it = t.begin();
for (size_type k = 0; k < N; ++k)
for (size_type i = 0; i < ndof; ++i, ++it)
- for (size_type j = 0; j < N; ++j) vt(i*N+j, j, k) = *it;
- } else if (qdim == N) {
- gmm::copy(t.as_vector(), vt.as_vector());
+ for (size_type j = 0; j < Q; ++j) vt(i*Q+j, j, k) = *it;
}
}
- [Getfem-commits] [getfem-commits] master updated (860f866 -> e920df7), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject),
Yves Renard <=
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27
- [Getfem-commits] (no subject), Yves Renard, 2019/08/27