Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
Classes
Constraints on degrees of freedom
Collaboration diagram for Constraints on degrees of freedom:

Classes

class  AffineConstraints< number >
 

Sparsity pattern generation

template<typename DoFHandlerType , typename SparsityPatternType , typename number = double>
void DoFTools::make_sparsity_pattern (const DoFHandlerType &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
 
template<typename DoFHandlerType , typename SparsityPatternType , typename number = double>
void DoFTools::make_sparsity_pattern (const DoFHandlerType &dof_handler, const Table< 2, Coupling > &coupling, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
 
template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_flux_sparsity_pattern (const DoFHandlerType &dof_handler, SparsityPatternType &sparsity_pattern)
 
template<typename DoFHandlerType , typename SparsityPatternType , typename number >
void DoFTools::make_flux_sparsity_pattern (const DoFHandlerType &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
 
template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_flux_sparsity_pattern (const DoFHandlerType &dof, SparsityPatternType &sparsity, const Table< 2, Coupling > &cell_integrals_mask, const Table< 2, Coupling > &face_integrals_mask, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
 
template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_sparsity_pattern (const DoFHandlerType &dof_row, const DoFHandlerType &dof_col, SparsityPatternType &sparsity)
 
template<typename DoFHandlerType , typename SparsityPatternType , typename number >
void DoFTools::make_flux_sparsity_pattern (const DoFHandlerType &dof, SparsityPatternType &sparsity, const AffineConstraints< number > &constraints, const bool keep_constrained_dofs, const Table< 2, Coupling > &couplings, const Table< 2, Coupling > &face_couplings, const types::subdomain_id subdomain_id)
 
template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_boundary_sparsity_pattern (const DoFHandlerType &dof, const std::vector< types::global_dof_index > &dof_to_boundary_mapping, SparsityPatternType &sparsity_pattern)
 
template<typename DoFHandlerType , typename SparsityPatternType , typename number >
void DoFTools::make_boundary_sparsity_pattern (const DoFHandlerType &dof, const std::map< types::boundary_id, const Function< DoFHandlerType::space_dimension, number > *> &boundary_ids, const std::vector< types::global_dof_index > &dof_to_boundary_mapping, SparsityPatternType &sparsity)
 

Hanging nodes and other constraints

template<typename DoFHandlerType , typename number >
void DoFTools::make_hanging_node_constraints (const DoFHandlerType &dof_handler, AffineConstraints< number > &constraints)
 
template<int dim, int spacedim>
void DoFTools::compute_intergrid_constraints (const DoFHandler< dim, spacedim > &coarse_grid, const unsigned int coarse_component, const DoFHandler< dim, spacedim > &fine_grid, const unsigned int fine_component, const InterGridMap< DoFHandler< dim, spacedim >> &coarse_to_fine_grid_map, AffineConstraints< double > &constraints)
 
template<int dim, int spacedim>
void DoFTools::compute_intergrid_transfer_representation (const DoFHandler< dim, spacedim > &coarse_grid, const unsigned int coarse_component, const DoFHandler< dim, spacedim > &fine_grid, const unsigned int fine_component, const InterGridMap< DoFHandler< dim, spacedim >> &coarse_to_fine_grid_map, std::vector< std::map< types::global_dof_index, float >> &transfer_representation)
 

Miscellaneous

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void DoFTools::make_zero_boundary_constraints (const DoFHandlerType< dim, spacedim > &dof, const types::boundary_id boundary_id, AffineConstraints< number > &zero_boundary_constraints, const ComponentMask &component_mask=ComponentMask())
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void DoFTools::make_zero_boundary_constraints (const DoFHandlerType< dim, spacedim > &dof, AffineConstraints< number > &zero_boundary_constraints, const ComponentMask &component_mask=ComponentMask())
 

Indirectly applying constraints to LinearOperator

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)
 
template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > project_to_constrained_linear_operator (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)
 
template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > constrained_linear_operator (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &linop)
 
template<typename Range , typename Domain , typename Payload >
PackagedOperation< Range > constrained_right_hand_side (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &linop, const Range &right_hand_side)
 
template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)
 
template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > project_to_constrained_linear_operator (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &exemplar)
 
template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > constrained_linear_operator (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &linop)
 
template<typename Range , typename Domain , typename Payload >
PackagedOperation< Range > constrained_right_hand_side (const AffineConstraints< typename Range::value_type > &constraints, const LinearOperator< Range, Domain, Payload > &linop, const Range &right_hand_side)
 

Interpolation and projection

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values (const Mapping< dim, spacedim > &mapping, const DoFHandlerType< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > *> &function_map, AffineConstraints< number > &constraints, const ComponentMask &component_mask=ComponentMask())
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values (const Mapping< dim, spacedim > &mapping, const DoFHandlerType< dim, spacedim > &dof, const types::boundary_id boundary_component, const Function< spacedim, number > &boundary_function, AffineConstraints< number > &constraints, const ComponentMask &component_mask=ComponentMask())
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values (const DoFHandlerType< dim, spacedim > &dof, const types::boundary_id boundary_component, const Function< spacedim, number > &boundary_function, AffineConstraints< number > &constraints, const ComponentMask &component_mask=ComponentMask())
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values (const DoFHandlerType< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > *> &function_map, AffineConstraints< number > &constraints, const ComponentMask &component_mask=ComponentMask())
 
template<int dim, int spacedim, typename number >
void VectorTools::project_boundary_values (const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > *> &boundary_functions, const Quadrature< dim - 1 > &q, AffineConstraints< number > &constraints, std::vector< unsigned int > component_mapping={})
 
template<int dim, int spacedim, typename number >
void VectorTools::project_boundary_values (const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > *> &boundary_function, const Quadrature< dim - 1 > &q, AffineConstraints< number > &constraints, std::vector< unsigned int > component_mapping={})
 
template<int dim>
void VectorTools::project_boundary_values_curl_conforming (const DoFHandler< dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, double > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< double > &constraints, const Mapping< dim > &mapping=StaticMappingQ1< dim >::mapping)
 
template<int dim>
void VectorTools::project_boundary_values_curl_conforming (const hp::DoFHandler< dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, double > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< double > &constraints, const hp::MappingCollection< dim, dim > &mapping_collection=hp::StaticMappingQ1< dim >::mapping_collection)
 
template<int dim>
void VectorTools::project_boundary_values_curl_conforming_l2 (const DoFHandler< dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, double > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< double > &constraints, const Mapping< dim > &mapping=StaticMappingQ1< dim >::mapping)
 
template<int dim>
void VectorTools::project_boundary_values_curl_conforming_l2 (const hp::DoFHandler< dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, double > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< double > &constraints, const hp::MappingCollection< dim, dim > &mapping_collection=hp::StaticMappingQ1< dim >::mapping_collection)
 
template<int dim>
void VectorTools::project_boundary_values_div_conforming (const DoFHandler< dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, double > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< double > &constraints, const Mapping< dim > &mapping=StaticMappingQ1< dim >::mapping)
 
template<int dim>
void VectorTools::project_boundary_values_div_conforming (const hp::DoFHandler< dim > &dof_handler, const unsigned int first_vector_component, const Function< dim, double > &boundary_function, const types::boundary_id boundary_component, AffineConstraints< double > &constraints, const hp::MappingCollection< dim, dim > &mapping_collection=hp::StaticMappingQ1< dim >::mapping_collection)
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_nonzero_normal_flux_constraints (const DoFHandlerType< dim, spacedim > &dof_handler, const unsigned int first_vector_component, const std::set< types::boundary_id > &boundary_ids, const std::map< types::boundary_id, const Function< spacedim > *> &function_map, AffineConstraints< double > &constraints, const Mapping< dim, spacedim > &mapping=StaticMappingQ1< dim >::mapping)
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_no_normal_flux_constraints (const DoFHandlerType< dim, spacedim > &dof_handler, const unsigned int first_vector_component, const std::set< types::boundary_id > &boundary_ids, AffineConstraints< double > &constraints, const Mapping< dim, spacedim > &mapping=StaticMappingQ1< dim >::mapping)
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_nonzero_tangential_flux_constraints (const DoFHandlerType< dim, spacedim > &dof_handler, const unsigned int first_vector_component, const std::set< types::boundary_id > &boundary_ids, const std::map< types::boundary_id, const Function< spacedim > *> &function_map, AffineConstraints< double > &constraints, const Mapping< dim, spacedim > &mapping=StaticMappingQ1< dim >::mapping)
 
template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_normal_flux_constraints (const DoFHandlerType< dim, spacedim > &dof_handler, const unsigned int first_vector_component, const std::set< types::boundary_id > &boundary_ids, AffineConstraints< double > &constraints, const Mapping< dim, spacedim > &mapping=StaticMappingQ1< dim >::mapping)
 

Detailed Description

This module deals with constraints on degrees of freedom. The central class to deal with constraints is the AffineConstraints class.

Constraints typically come from several sources, for example:

In all of these examples, constraints on degrees of freedom are linear and possibly inhomogeneous. In other words, they always have the form \(x_{i_1} = \sum_{j=2}^M a_{i_j} x_{i_j} + b_i\). The deal.II class that deals with storing and using these constraints is AffineConstraints.

Eliminating constraints

When building the global system matrix and the right hand sides, one can build them without taking care of the constraints, i.e. by simply looping over cells and adding the local contributions to the global matrix and right hand side objects. In order to do actual calculations, you have to 'condense' the linear system: eliminate constrained degrees of freedom and distribute the appropriate values to the unconstrained dofs. This changes the sparsity pattern of the sparse matrices used in finite element calculations and is thus a quite expensive operation. The general scheme of things is then that you build your system, you eliminate (condense) away constrained nodes using the AffineConstraints::condense() functions, then you solve the remaining system, and finally you compute the values of constrained nodes from the values of the unconstrained ones using the AffineConstraints::distribute() function. Note that the AffineConstraints::condense() function is applied to matrix and right hand side of the linear system, while the AffineConstraints::distribute() function is applied to the solution vector. This is the method used in the first few tutorial programs, see for example step-6.

This scheme of first building a linear system and then eliminating constrained degrees of freedom is inefficient, and a bottleneck if there are many constraints and matrices are full, i.e. especially for 3d and/or higher order or hp finite elements. Furthermore, it is impossible to implement for parallel computations where a process may not have access to elements of the matrix. We therefore offer a second way of building linear systems, using the AffineConstraints::add_entries_local_to_global() and AffineConstraints::distribute_local_to_global() functions discussed below. The resulting linear systems are equivalent to those one gets after calling the AffineConstraints::condense() functions.

Note
Both ways of applying constraints set the value of the matrix diagonals to constrained entries to a positive entry of the same magnitude as the other entries in the matrix. As a consequence, you need to set up your problem such that the weak form describing the main matrix contribution is not negative definite. Otherwise, iterative solvers such as CG will break down or be considerably slower as GMRES.
While these two ways are equivalent, i.e., the solution of linear systems computed via either approach is the same, the linear systems themselves do not necessarily have the same matrix and right hand side vector entries. Specifically, the matrix diagonal and right hand side entries corresponding to constrained degrees of freedom may be different as a result of the way in which we compute them; they are, however, always chosen in such a way that the solution to the linear system is the same.

Condensing matrices and sparsity patterns

As mentioned above, the first way of using constraints is to build linear systems without regards to constraints and then "condensing" them away. Condensation of a matrix is done in four steps:

In the condensation process, we are not actually changing the number of rows or columns of the sparsity pattern, matrix, and vectors. Instead, the condense functions add nonzero entries to the sparsity pattern of the matrix (with constrained nodes in it) where the condensation process of the matrix will create additional nonzero elements. In the condensation process itself, rows and columns subject to constraints are distributed to the rows and columns of unconstrained nodes. The constrained degrees of freedom remain in place. In order not to disturb the solution process, these rows and columns are filled with zeros and an appropriate positive value on the main diagonal (we choose an average of the magnitudes of the other diagonal elements, so as to make sure that the new diagonal entry has the same order of magnitude as the other entries; this preserves the scaling properties of the matrix). The corresponding value in the right hand sides is set to zero. This way, the constrained node will always get the value zero upon solution of the equation system and will not couple to other nodes any more.

Keeping the entries in the matrix has the advantage over creating a new and smaller matrix, that only one matrix and sparsity pattern is needed thus less memory is required. Additionally, the condensation process is less expensive, since not all but only constrained values in the matrix have to be copied. On the other hand, the solution process will take a bit longer, since matrix vector multiplications will incur multiplications with zeroes in the lines subject to constraints. Additionally, the vector size is larger, resulting in more memory consumption for those iterative solution methods using a larger number of auxiliary vectors (e.g. methods using explicit orthogonalization procedures). Nevertheless, this process is more efficient due to its lower memory consumption and is discussed in the first few programs of the Tutorial programs , for example in step-6.

The condensation functions exist for different argument types: SparsityPattern, SparseMatrix and BlockSparseMatrix. Note that there are no versions for arguments of type PETScWrappers::SparseMatrix() or any of the other PETSc or Trilinos matrix wrapper classes. This is due to the fact that it is relatively hard to get a representation of the sparsity structure of PETSc matrices, and to modify them efficiently; this holds in particular, if the matrix is actually distributed across a cluster of computers. If you want to use PETSc/Trilinos matrices, you can either copy an already condensed deal.II matrix, or assemble the PETSc/Trilinos matrix in the already condensed form, see the discussion below.

Condensing vectors

Condensing vectors works exactly as described above for matrices. Note that condensation is an idempotent operation, i.e. doing it more than once on a vector or matrix yields the same result as doing it only once: once an object has been condensed, further condensation operations don't change it any more.

In contrast to the matrix condensation functions, the vector condensation functions exist in variants for PETSc and Trilinos vectors. However, using them is typically expensive, and should be avoided. You should use the same techniques as mentioned above to avoid their use.

Avoiding explicit condensation

Sometimes, one wants to avoid explicit condensation of a linear system after it has been built at all. There are two main reasons for wanting to do so:

In this case, one possibility is to distribute local entries to the final destinations right at the moment of transferring them into the global matrices and vectors, and similarly build a sparsity pattern in the condensed form at the time it is set up originally.

The AffineConstraints class offers support for these operations as well. For example, the AffineConstraints::add_entries_local_to_global() function adds nonzero entries to a sparsity pattern object. It not only adds a given entry, but also all entries that we will have to write to if the current entry corresponds to a constrained degree of freedom that will later be eliminated. Similarly, one can use the AffineConstraints::distribute_local_to_global() functions to directly distribute entries in vectors and matrices when copying local contributions into a global matrix or vector. These calls make a subsequent call to AffineConstraints::condense() unnecessary. For examples of their use see the tutorial programs referenced above.

Note that, despite their name which describes what the function really does, the AffineConstraints::distribute_local_to_global() functions has to be applied to matrices and right hand side vectors, whereas the AffineConstraints::distribute() function discussed below is applied to the solution vector after solving the linear system.

Distributing constraints

After solving the condensed system of equations, the solution vector has to be "distributed": the modification to the original linear system that results from calling AffineConstraints::condense() leads to a linear system that solves correctly for all degrees of freedom that are unconstrained but leaves the values of constrained degrees of freedom undefined. To get the correct values also for these degrees of freedom, you need to "distribute" the unconstrained values also to their constrained colleagues. This is done by the AffineConstraints::distribute() function. The operation of distribution undoes the condensation process in some sense, but it should be noted that it is not the inverse operation. Basically, distribution sets the values of the constrained nodes to the value that is computed from the constraint given the values of the unconstrained nodes plus possible inhomogeneities.

Treatment of inhomogeneous constraints

In case some constraint lines have inhomogeneities (which is typically the case if the constraint comes from implementation of inhomogeneous boundary conditions), the situation is a bit more complicated than if the only constraints were due to hanging nodes alone. This is because the elimination of the non-diagonal values in the matrix generate contributions in the eliminated rows in the vector. This means that inhomogeneities can only be handled with functions that act simultaneously on a matrix and a vector. This means that all inhomogeneities are ignored in case the respective condense function is called without any matrix (or if the matrix has already been condensed before).

The use of the AffineConstraints class for implementing Dirichlet boundary conditions is discussed in the step-22 tutorial program. A further example that utilizes AffineConstraints is step-41. The situation here is little more complicated, because there we have some constraints which are not at the boundary. There are two ways to apply inhomogeneous constraints after creating an AffineConstraints object:

First approach:

Second approach:

Of course, both approaches lead to the same final answer but in different ways. Using the first approach (i.e., when using use_inhomogeneities_for_rhs = false in AffineConstraints::distribute_local_to_global()), the linear system we build has zero entries in the right hand side in all those places where a degree of freedom is constrained, and some positive value on the matrix diagonal of these lines. Consequently, the solution vector of the linear system will have a zero value for inhomogeneously constrained degrees of freedom and we need to call AffineConstraints::distribute() to give these degrees of freedom their correct nonzero values.

On the other hand, in the second approach, the matrix diagonal element and corresponding right hand side entry for inhomogeneously constrained degrees of freedom are so that the solution of the linear system already has the correct value (e.g., if the constraint is that \(x_{13}=42\) then row \(13\) if the matrix is empty with the exception of the diagonal entry, and \(b_{13}/A_{13,13}=42\) so that the solution of \(Ax=b\) must satisfy \(x_{13}=42\) as desired). As a consequence, we do not need to call AffineConstraints::distribute() after solving to fix up inhomogeneously constrained components of the solution, though there is also no harm in doing so.

There remains the question of which of the approaches to take and why we need to set to zero the values of the solution vector in the first approach. The answer to both questions has to do with how iterative solvers solve the linear system. To this end, consider that we typically stop iterations when the residual has dropped below a certain fraction of the norm of the right hand side, or, alternatively, a certain fraction of the norm of the initial residual. Now consider this:

In addition to these considerations, consider the case where we have inhomogeneous constraints of the kind \(x_{3}=\tfrac 12 x_1 + \tfrac 12\), e.g., from a hanging node constraint of the form \(x_{3}=\tfrac 12 (x_1 + x_2)\) where \(x_2\) is itself constrained by boundary values to \(x_2=1\). In this case, the AffineConstraints container can of course not figure out what the final value of \(x_3\) should be and, consequently, can not set the solution vector's third component correctly. Thus, the second approach will not work and you should take the first.

Dealing with conflicting constraints

There are situations where degrees of freedom are constrained in more than one way, and sometimes in conflicting ways. Consider, for example the following situation:

conflicting_constraints.png

Here, degree of freedom \(x_0\) marked in blue is a hanging node. If we used trilinear finite elements, i.e. FE_Q(1), then it would carry the constraint \(x_0=\frac 12 (x_{1}+x_{2})\). On the other hand, it is at the boundary, and if we have imposed boundary conditions \(u|_{\partial\Omega}=g\) then we will have the constraint \(x_0=g_0\) where \(g_0\) is the value of the boundary function \(g(\mathbf x)\) at the location of this degree of freedom.

So, which one will win? Or maybe: which one should win? There is no good answer to this question:

That said, what should you do if you know what you want is this:

Either behavior can also be achieved by building two separate AffineConstraints objects and calling AffineConstraints::merge() function with a particular second argument.

Applying constraints indirectly with a LinearOperator

Sometimes it is either not desirable, or not possible to directly condense, or eliminate constraints from a system of linear equations. In particular if there is no underlying matrix object that could be condensed (or taken care of constraints during assembly). This is usually the case if the system is described by a LinearOperator.

In this case we can solve the modified system

\[ (C^T A C + Id_c) \tilde x = C^T (b - A\,k) \]

instead [1] (M. S. Shephard. Linear multipoint constraints applied via transformation as part of a direct stiffness assembly process. International Journal for Numerical Methods in Engineering 20(11):2107-2112, 1985).

Here, \(A\) is a given (unconstrained) system matrix for which we only assume that we can apply it to a vector but can not necessarily access individual matrix entries. \(b\) is the corresponding right hand side of a system of linear equations \(A\,x=b\). The matrix \(C\) describes the homogeneous part of the linear constraints stored in an AffineConstraints object and the vector \(k\) is the vector of corresponding inhomogeneities. More precisely, the AffineConstraints::distribute() operation applied on a vector \(x\) is the operation

\[ x \leftarrow C\,x+k. \]

And finally, \(Id_c\) denotes the identity on the subspace of constrained degrees of freedom.

The corresponding solution of \(A\,x=b\) that obeys these constraints is then recovered by distributing constraints: \(x=C\tilde x+k\).

The whole system can be set up and solved with the following snippet of code:

#include <deal.II/lac/constrained_linear_operator.h>
// ...
// system_matrix - unconstrained and assembled system matrix
// right_hand_side - unconstrained and assembled right hand side
// affine_constraints - an AffineConstraints object
// solver - an appropriate, iterative solver
// preconditioner - a preconditioner
const auto op_a = linear_operator(system_matrix);
const auto op_amod = constrained_linear_operator(affine_constraints, op_a);
Vector<double> rhs_mod = constrained_right_hand_side(affine_constraints,
op_a,
right_hand_side);
solver.solve(op_amod, solution, rhs_mod, preconditioner);
affine_constraints.distribute(solution);

Function Documentation

◆ make_sparsity_pattern() [1/3]

template<typename DoFHandlerType , typename SparsityPatternType , typename number = double>
void DoFTools::make_sparsity_pattern ( const DoFHandlerType &  dof_handler,
SparsityPatternType &  sparsity_pattern,
const AffineConstraints< number > &  constraints = AffineConstraints<number>(),
const bool  keep_constrained_dofs = true,
const types::subdomain_id  subdomain_id = numbers::invalid_subdomain_id 
)

Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations.

This function computes the possible positions of non-zero entries in the global system matrix by simulating which entries one would write to during the actual assembly of a matrix. For this, the function assumes that each finite element basis function is non-zero on a cell only if its degree of freedom is associated with the interior, a face, an edge or a vertex of this cell. As a result, a matrix entry \(A_{ij}\) that is computed from two basis functions \(\varphi_i\) and \(\varphi_j\) with (global) indices \(i\) and \(j\) (for example, using a bilinear form \(A_{ij}=a(\varphi_i,\varphi_j)\)) can be non-zero only if these shape functions correspond to degrees of freedom that are defined on at least one common cell. Therefore, this function just loops over all cells, figures out the global indices of all degrees of freedom, and presumes that all matrix entries that couple any of these indices will result in a nonzero matrix entry. These will then be added to the sparsity pattern. As this process of generating the sparsity pattern does not take into account the equation to be solved later on, the resulting sparsity pattern is symmetric.

This algorithm makes no distinction between shape functions on each cell, i.e., it simply couples all degrees of freedom on a cell with all other degrees of freedom on a cell. This is often the case, and always a safe assumption. However, if you know something about the structure of your operator and that it does not couple certain shape functions with certain test functions, then you can get a sparser sparsity pattern by calling a variant of the current function described below that allows to specify which vector components couple with which other vector components.

The method described above lives on the assumption that coupling between degrees of freedom only happens if shape functions overlap on at least one cell. This is the case with most usual finite element formulations involving conforming elements. However, for formulations such as the Discontinuous Galerkin finite element method, the bilinear form contains terms on interfaces between cells that couple shape functions that live on one cell with shape functions that live on a neighboring cell. The current function would not see these couplings, and would consequently not allocate entries in the sparsity pattern. You would then get into trouble during matrix assembly because you try to write into matrix entries for which no space has been allocated in the sparsity pattern. This can be avoided by calling the DoFTools::make_flux_sparsity_pattern() function instead, which takes into account coupling between degrees of freedom on neighboring cells.

There are other situations where bilinear forms contain non-local terms, for example in treating integral equations. These require different methods for building the sparsity patterns that depend on the exact formulation of the problem. You will have to do this yourself then.

Parameters
[in]dof_handlerThe DoFHandler or hp::DoFHandler object that describes which degrees of freedom live on which cells.
[out]sparsity_patternThe sparsity pattern to be filled with entries.
[in]constraintsThe process for generating entries described above is purely local to each cell. Consequently, the sparsity pattern does not provide for matrix entries that will only be written into during the elimination of hanging nodes or other constraints. They have to be taken care of by a subsequent call to AffineConstraints::condense(). Alternatively, the constraints on degrees of freedom can already be taken into account at the time of creating the sparsity pattern. For this, pass the AffineConstraints object as the third argument to the current function. No call to AffineConstraints::condense() is then necessary. This process is explained in step-6, step-27, and other tutorial programs.
[in]keep_constrained_dofsIn case the constraints are already taken care of in this function by passing in a AffineConstraints object, it is possible to abandon some off-diagonal entries in the sparsity pattern if these entries will also not be written into during the actual assembly of the matrix this sparsity pattern later serves. Specifically, when using an assembly method that uses AffineConstraints::distribute_local_to_global(), no entries will ever be written into those matrix rows or columns that correspond to constrained degrees of freedom. In such cases, you can set the argument keep_constrained_dofs to false to avoid allocating these entries in the sparsity pattern.
[in]subdomain_idIf specified, the sparsity pattern is built only on cells that have a subdomain_id equal to the given argument. This is useful in parallel contexts where the matrix and sparsity pattern (for example a TrilinosWrappers::SparsityPattern) may be distributed and not every MPI process needs to build the entire sparsity pattern; in that case, it is sufficient if every process only builds that part of the sparsity pattern that corresponds to the subdomain_id for which it is responsible. This feature is used in step-32. (This argument is not usually needed for objects of type parallel::distributed::Triangulation because the current function only loops over locally owned cells anyway; thus, this argument typically only makes sense if you want to use the subdomain_id for anything other than indicating which processor owns a cell, for example which geometric component of the domain a cell belongs to.)
Note
The actual type of the sparsity pattern may be SparsityPattern, DynamicSparsityPattern, BlockSparsityPattern, BlockDynamicSparsityPattern, or any other class that satisfies similar requirements. It is assumed that the size of the sparsity pattern matches the number of degrees of freedom and that enough unused nonzero entries are left to fill the sparsity pattern if the sparsity pattern is of "static" kind (see Sparsity patterns for more information on what this means). The nonzero entries generated by this function are added to possible previous content of the object, i.e., previously added entries are not removed.
If the sparsity pattern is represented by an object of type SparsityPattern (as opposed to, for example, DynamicSparsityPattern), you need to remember using SparsityPattern::compress() after generating the pattern.

Definition at line 64 of file dof_tools_sparsity.cc.

◆ make_sparsity_pattern() [2/3]

template<typename DoFHandlerType , typename SparsityPatternType , typename number = double>
void DoFTools::make_sparsity_pattern ( const DoFHandlerType &  dof_handler,
const Table< 2, Coupling > &  coupling,
SparsityPatternType &  sparsity_pattern,
const AffineConstraints< number > &  constraints = AffineConstraints<number>(),
const bool  keep_constrained_dofs = true,
const types::subdomain_id  subdomain_id = numbers::invalid_subdomain_id 
)

Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations.

This function is a simple variation on the previous make_sparsity_pattern() function (see there for a description of all of the common arguments), but it provides functionality for vector finite elements that allows to be more specific about which variables couple in which equation.

For example, if you wanted to solve the Stokes equations,

\begin{align*} -\Delta \mathbf u + \nabla p &= 0,\\ \text{div}\ u &= 0 \end{align*}

in two space dimensions, using stable Q2/Q1 mixed elements (using the FESystem class), then you don't want all degrees of freedom to couple in each equation. More specifically, in the first equation, only \(u_x\) and \(p\) appear; in the second equation, only \(u_y\) and \(p\) appear; and in the third equation, only \(u_x\) and \(u_y\) appear. (Note that this discussion only talks about vector components of the solution variable and the different equation, and has nothing to do with degrees of freedom, or in fact with any kind of discretization.) We can describe this by the following pattern of "couplings":

\[ \left[ \begin{array}{ccc} 1 & 0 & 1 \\ 0 & 1 & 1 \\ 1 & 1 & 0 \end{array} \right] \]

where "1" indicates that two variables (i.e., vector components of the FESystem) couple in the respective equation, and a "0" means no coupling. These zeros imply that upon discretization via a standard finite element formulation, we will not write entries into the matrix that, for example, couple pressure test functions with pressure shape functions (and similar for the other zeros above). It is then a waste to allocate memory for these entries in the matrix and the sparsity pattern, and you can avoid this by creating a mask such as the one above that describes this to the (current) function that computes the sparsity pattern. As stated above, the mask shown above refers to components of the composed FESystem, rather than to degrees of freedom or shape functions.

This function is designed to accept a coupling pattern, like the one shown above, through the couplings parameter, which contains values of type Coupling. It builds the matrix structure just like the previous function, but does not create matrix elements if not specified by the coupling pattern. If the couplings are symmetric, then so will be the resulting sparsity pattern.

There is a complication if some or all of the shape functions of the finite element in use are non-zero in more than one component (in deal.II speak: they are non-primitive finite elements). In this case, the coupling element corresponding to the first non-zero component is taken and additional ones for this component are ignored.

Definition at line 123 of file dof_tools_sparsity.cc.

◆ make_flux_sparsity_pattern() [1/4]

template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_flux_sparsity_pattern ( const DoFHandlerType &  dof_handler,
SparsityPatternType &  sparsity_pattern 
)

Compute which entries of a matrix built on the given dof_handler may possibly be nonzero, and create a sparsity pattern object that represents these nonzero locations. This function is a variation of the make_sparsity_pattern() functions above in that it assumes that the bilinear form you want to use to generate the matrix also contains terms that integrate over the faces between cells (i.e., it contains "fluxes" between cells, explaining the name of the function).

This function is useful for Discontinuous Galerkin methods where the standard make_sparsity_pattern() function would only create nonzero entries for all degrees of freedom on one cell coupling to all other degrees of freedom on the same cell; however, in DG methods, all or some degrees of freedom on each cell also couple to the degrees of freedom on other cells connected to the current one by a common face. The current function also creates the nonzero entries in the matrix resulting from these additional couplings. In other words, this function computes a strict super-set of nonzero entries compared to the work done by make_sparsity_pattern().

Parameters
[in]dof_handlerThe DoFHandler or hp::DoFHandler object that describes which degrees of freedom live on which cells.
[out]sparsity_patternThe sparsity pattern to be filled with entries.
Note
The actual type of the sparsity pattern may be SparsityPattern, DynamicSparsityPattern, BlockSparsityPattern, BlockDynamicSparsityPattern, or any other class that satisfies similar requirements. It is assumed that the size of the sparsity pattern matches the number of degrees of freedom and that enough unused nonzero entries are left to fill the sparsity pattern if the sparsity pattern is of "static" kind (see Sparsity patterns for more information on what this means). The nonzero entries generated by this function are added to possible previous content of the object, i.e., previously added entries are not removed.
If the sparsity pattern is represented by an object of type SparsityPattern (as opposed to, for example, DynamicSparsityPattern), you need to remember using SparsityPattern::compress() after generating the pattern.

Definition at line 693 of file dof_tools_sparsity.cc.

◆ make_flux_sparsity_pattern() [2/4]

template<typename DoFHandlerType , typename SparsityPatternType , typename number >
void DoFTools::make_flux_sparsity_pattern ( const DoFHandlerType &  dof_handler,
SparsityPatternType &  sparsity_pattern,
const AffineConstraints< number > &  constraints,
const bool  keep_constrained_dofs = true,
const types::subdomain_id  subdomain_id = numbers::invalid_subdomain_id 
)

This function does essentially the same as the other make_flux_sparsity_pattern() function but allows the specification of a number of additional arguments. These carry the same meaning as discussed in the first make_sparsity_pattern() function above.

Definition at line 523 of file dof_tools_sparsity.cc.

◆ make_flux_sparsity_pattern() [3/4]

template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_flux_sparsity_pattern ( const DoFHandlerType &  dof,
SparsityPatternType &  sparsity,
const Table< 2, Coupling > &  cell_integrals_mask,
const Table< 2, Coupling > &  face_integrals_mask,
const types::subdomain_id  subdomain_id = numbers::invalid_subdomain_id 
)

This function does essentially the same as the other make_flux_sparsity_pattern() function but allows the specification of coupling matrices that state which components of the solution variable couple in each of the equations you are discretizing. This works in complete analogy as discussed in the second make_sparsity_pattern() function above.

In fact, this function takes two such masks, one describing which variables couple with each other in the cell integrals that make up your bilinear form, and which variables couple with each other in the face integrals. If you passed masks consisting of only 1s to both of these, then you would get the same sparsity pattern as if you had called the first of the make_sparsity_pattern() functions above. By setting some of the entries of these masks to zeros, you can get a sparser sparsity pattern.

Definition at line 1311 of file dof_tools_sparsity.cc.

◆ make_hanging_node_constraints()

template<typename DoFHandlerType , typename number >
void DoFTools::make_hanging_node_constraints ( const DoFHandlerType &  dof_handler,
AffineConstraints< number > &  constraints 
)

Compute the constraints resulting from the presence of hanging nodes. Hanging nodes are best explained using a small picture:

hanging_nodes.png

In order to make a finite element function globally continuous, we have to make sure that the dark red nodes have values that are compatible with the adjacent yellow nodes, so that the function has no jump when coming from the small cells to the large one at the top right. We therefore have to add conditions that constrain those "hanging nodes".

The object into which these are inserted is later used to condense the global system matrix and right hand side, and to extend the solution vectors from the true degrees of freedom also to the constraint nodes. This function is explained in detail in the step-6 tutorial program and is used in almost all following programs as well.

This function does not clear the AffineConstraints object before use, in order to allow adding constraints from different sources to the same object. You therefore need to make sure it contains only constraints you still want; otherwise call the AffineConstraints::clear() function. Since this function does not check if it would add cycles in constraints, it is recommended to call this function prior to other functions that constrain DoFs with respect to others such as make_periodicity_constraints(). This function does not close the object since you may want to enter other constraints later on yourself.

In the hp-case, i.e. when the argument is of type hp::DoFHandler, we consider constraints due to different finite elements used on two sides of a face between cells as hanging nodes as well. In other words, for hp finite elements, this function computes all constraints due to differing mesh sizes (h) or polynomial degrees (p) between adjacent cells.

The template argument (and by consequence the type of the first argument to this function) can be either DoFHandler or hp::DoFHandler.

Definition at line 1772 of file dof_tools_constraints.cc.

◆ make_zero_boundary_constraints() [1/2]

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void DoFTools::make_zero_boundary_constraints ( const DoFHandlerType< dim, spacedim > &  dof,
const types::boundary_id  boundary_id,
AffineConstraints< number > &  zero_boundary_constraints,
const ComponentMask component_mask = ComponentMask() 
)

Add constraints to zero_boundary_constraints corresponding to enforcing a zero boundary condition on the given boundary indicator.

This function constrains all degrees of freedom on the given part of the boundary.

A variant of this function with different arguments is used in step-36.

Parameters
dofThe DoFHandler to work on.
boundary_idThe indicator of that part of the boundary for which constraints should be computed. If this number equals numbers::invalid_boundary_id then all boundaries of the domain will be treated.
zero_boundary_constraintsThe constraint object into which the constraints will be written. The new constraints due to zero boundary values will simply be added, preserving any other constraints previously present. However, this will only work if the previous content of that object consists of constraints on degrees of freedom that are not located on the boundary treated here. If there are previously existing constraints for degrees of freedom located on the boundary, then this would constitute a conflict. See the Constraints on degrees of freedom module for handling the case where there are conflicting constraints on individual degrees of freedom.
component_maskAn optional component mask that restricts the functionality of this function to a subset of an FESystem. For non- primitive shape functions, any degree of freedom is affected that belongs to a shape function where at least one of its nonzero components is affected by the component mask (see GlossComponentMask). If this argument is omitted, all components of the finite element with degrees of freedom at the boundary will be considered.
See also
Glossary entry on boundary indicators

Definition at line 3355 of file dof_tools_constraints.cc.

◆ make_zero_boundary_constraints() [2/2]

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void DoFTools::make_zero_boundary_constraints ( const DoFHandlerType< dim, spacedim > &  dof,
AffineConstraints< number > &  zero_boundary_constraints,
const ComponentMask component_mask = ComponentMask() 
)

Do the same as the previous function, except do it for all parts of the boundary, not just those with a particular boundary indicator. This function is then equivalent to calling the previous one with numbers::invalid_boundary_id as second argument.

This function is used in step-36, for example.

Definition at line 3446 of file dof_tools_constraints.cc.

◆ distribute_constraints_linear_operator() [1/2]

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  exemplar 
)

This function takes an AffineConstraints object constraints and an operator exemplar exemplar (this exemplar is usually a linear operator that describes the system matrix - it is only used to create domain and range vectors of appropriate sizes, its action vmult is never used). A LinearOperator object associated with the "homogeneous action" of the underlying AffineConstraints object is returned:

Applying the LinearOperator object on a vector u results in a vector v that stores the result of calling AffineConstraints::distribute() on u - with one important difference: inhomogeneities are not applied, but always treated as 0 instead.

The LinearOperator object created by this function is primarily used internally in constrained_linear_operator() to build up a modified system of linear equations. How to solve a linear system of equations with this approach is explained in detail in the Constraints on degrees of freedom module.

Author
Mauro Bardelloni, Matthias Maier, 2015
Note
Currently, this function may not work correctly for distributed data structures.

Definition at line 64 of file constrained_linear_operator.h.

◆ project_to_constrained_linear_operator() [1/2]

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > project_to_constrained_linear_operator ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  exemplar 
)

Given a AffineConstraints constraints and an operator exemplar exemplar, return a LinearOperator that is the projection to the subspace of constrained degrees of freedom, i.e. all entries of the result vector that correspond to unconstrained degrees of freedom are set to zero.

Author
Mauro Bardelloni, Matthias Maier, 2015

Definition at line 160 of file constrained_linear_operator.h.

◆ constrained_linear_operator() [1/2]

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > constrained_linear_operator ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  linop 
)

Given a AffineConstraints object constraints and a LinearOperator linop, this function creates a LinearOperator object consisting of the composition of three operations and a regularization:

Ct * linop * C + Id_c;

with

and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

\[ (C^T A C + Id_c) x = C^T (b - A\,k) \]

with a given (unconstrained) system matrix \(A\), right hand side \(b\), and linear constraints \(C\) with inhomogeneities \(k\).

A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

Author
Mauro Bardelloni, Matthias Maier, 2015
Note
Currently, this function may not work correctly for distributed data structures.

Definition at line 251 of file constrained_linear_operator.h.

◆ constrained_right_hand_side() [1/2]

template<typename Range , typename Domain , typename Payload >
PackagedOperation< Range > constrained_right_hand_side ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  linop,
const Range &  right_hand_side 
)

Given a AffineConstraints object constraints, a LinearOperator linop and a right-hand side right_hand_side, this function creates a PackagedOperation that stores the following computation:

Ct * (right_hand_side - linop * k)

with

This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

\[ (C^T A C + Id_c) x = C^T (b - A\,k) \]

with a given (unconstrained) system matrix \(A\), right hand side \(b\), and linear constraints \(C\) with inhomogeneities \(k\).

A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

Author
Mauro Bardelloni, Matthias Maier, 2015
Note
Currently, this function may not work correctly for distributed data structures.

Definition at line 298 of file constrained_linear_operator.h.

◆ interpolate_boundary_values() [1/4]

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values ( const Mapping< dim, spacedim > &  mapping,
const DoFHandlerType< dim, spacedim > &  dof,
const std::map< types::boundary_id, const Function< spacedim, number > *> &  function_map,
AffineConstraints< number > &  constraints,
const ComponentMask component_mask = ComponentMask() 
)

Insert the (algebraic) constraints due to Dirichlet boundary conditions into a AffineConstraints constraints. This function identifies the degrees of freedom subject to Dirichlet boundary conditions, adds them to the list of constrained DoFs in constraints and sets the respective inhomogeneity to the value interpolated around the boundary. If this routine encounters a DoF that already is constrained (for instance by a hanging node constraint, see below, or any other type of constraint, e.g. from periodic boundary conditions), the old setting of the constraint (dofs the entry is constrained to, inhomogeneities) is kept and nothing happens.

Note
When combining adaptively refined meshes with hanging node constraints and boundary conditions like from the current function within one AffineConstraints object, the hanging node constraints should always be set first, and then the boundary conditions since boundary conditions are not set in the second operation on degrees of freedom that are already constrained. This makes sure that the discretization remains conforming as is needed. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

The parameter boundary_component corresponds to the number boundary_id of the face.

The flags in the last parameter, component_mask denote which components of the finite element space shall be interpolated. If it is left as specified by the default value (i.e. an empty array), all components are interpolated. If it is different from the default value, it is assumed that the number of entries equals the number of components in the boundary functions and the finite element, and those components in the given boundary function will be used for which the respective flag was set in the component mask. See also GlossComponentMask. As an example, assume that you are solving the Stokes equations in 2d, with variables \((u,v,p)\) and that you only want to interpolate boundary values for the pressure, then the component mask should correspond to (true,true,false).

Note
Whether a component mask has been specified or not, the number of components of the functions in function_map must match that of the finite element used by dof. In other words, for the example above, you need to provide a Function object that has 3 components (the two velocities and the pressure), even though you are only interested in the first two of them. interpolate_boundary_values() will then call this function to obtain a vector of 3 values at each interpolation point but only take the first two and discard the third. In other words, you are free to return whatever you like in the third component of the vector returned by Function::vector_value, but the Function object must state that it has 3 components.

If the finite element used has shape functions that are non-zero in more than one component (in deal.II speak: they are non-primitive), then these components can presently not be used for interpolating boundary values. Thus, the elements in the component mask corresponding to the components of these non-primitive shape functions must be false.

See the general documentation of this namespace for more information.

◆ interpolate_boundary_values() [2/4]

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values ( const Mapping< dim, spacedim > &  mapping,
const DoFHandlerType< dim, spacedim > &  dof,
const types::boundary_id  boundary_component,
const Function< spacedim, number > &  boundary_function,
AffineConstraints< number > &  constraints,
const ComponentMask component_mask = ComponentMask() 
)

Same function as above, but taking only one pair of boundary indicator and corresponding boundary function. The same comments apply as for the previous function, in particular about the use of the component mask and the requires size of the function object.

See also
Glossary entry on boundary indicators

◆ interpolate_boundary_values() [3/4]

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values ( const DoFHandlerType< dim, spacedim > &  dof,
const types::boundary_id  boundary_component,
const Function< spacedim, number > &  boundary_function,
AffineConstraints< number > &  constraints,
const ComponentMask component_mask = ComponentMask() 
)

Call the other interpolate_boundary_values() function, see above, with mapping=MappingQGeneric<dim,spacedim>(1). The same comments apply as for the previous function, in particular about the use of the component mask and the requires size of the function object.

See also
Glossary entry on boundary indicators

◆ interpolate_boundary_values() [4/4]

template<int dim, int spacedim, template< int, int > class DoFHandlerType, typename number >
void VectorTools::interpolate_boundary_values ( const DoFHandlerType< dim, spacedim > &  dof,
const std::map< types::boundary_id, const Function< spacedim, number > *> &  function_map,
AffineConstraints< number > &  constraints,
const ComponentMask component_mask = ComponentMask() 
)

Call the other interpolate_boundary_values() function, see above, with mapping=MappingQGeneric<dim,spacedim>(1). The same comments apply as for the previous function, in particular about the use of the component mask and the requires size of the function object.

◆ project_boundary_values() [1/2]

template<int dim, int spacedim, typename number >
void VectorTools::project_boundary_values ( const Mapping< dim, spacedim > &  mapping,
const DoFHandler< dim, spacedim > &  dof,
const std::map< types::boundary_id, const Function< spacedim, number > *> &  boundary_functions,
const Quadrature< dim - 1 > &  q,
AffineConstraints< number > &  constraints,
std::vector< unsigned int >  component_mapping = {} 
)

Project a function to the boundary of the domain, using the given quadrature formula for the faces. This function identifies the degrees of freedom subject to Dirichlet boundary conditions, adds them to the list of constrained DoFs in constraints and sets the respective inhomogeneity to the value resulting from the projection operation. If this routine encounters a DoF that already is constrained (for instance by a hanging node constraint, see below, or any other type of constraint, e.g. from periodic boundary conditions), the old setting of the constraint (dofs the entry is constrained to, inhomogeneities) is kept and nothing happens.

Note
When combining adaptively refined meshes with hanging node constraints and boundary conditions like from the current function within one AffineConstraints object, the hanging node constraints should always be set first, and then the boundary conditions since boundary conditions are not set in the second operation on degrees of freedom that are already constrained. This makes sure that the discretization remains conforming as is needed. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

If component_mapping is empty, it is assumed that the number of components of boundary_function matches that of the finite element used by dof.

In 1d, projection equals interpolation. Therefore, interpolate_boundary_values is called.

  • component_mapping: if the components in boundary_functions and dof do not coincide, this vector allows them to be remapped. If the vector is not empty, it has to have one entry for each component in dof. This entry is the component number in boundary_functions that should be used for this component in dof. By default, no remapping is applied.

◆ project_boundary_values() [2/2]

template<int dim, int spacedim, typename number >
void VectorTools::project_boundary_values ( const DoFHandler< dim, spacedim > &  dof,
const std::map< types::boundary_id, const Function< spacedim, number > *> &  boundary_function,
const Quadrature< dim - 1 > &  q,
AffineConstraints< number > &  constraints,
std::vector< unsigned int >  component_mapping = {} 
)

Call the project_boundary_values() function, see above, with mapping=MappingQGeneric<dim,spacedim>(1).

◆ project_boundary_values_curl_conforming() [1/2]

template<int dim>
void VectorTools::project_boundary_values_curl_conforming ( const DoFHandler< dim > &  dof_handler,
const unsigned int  first_vector_component,
const Function< dim, double > &  boundary_function,
const types::boundary_id  boundary_component,
AffineConstraints< double > &  constraints,
const Mapping< dim > &  mapping = StaticMappingQ1< dim >::mapping 
)

Compute constraints that correspond to boundary conditions of the form \(\vec{n}\times\vec{u}=\vec{n}\times\vec{f}\), i.e. the tangential components of \(u\) and \(f\) shall coincide.

If the AffineConstraints constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

This function is explicitly written to use with the FE_Nedelec elements. Thus it throws an exception, if it is called with other finite elements.

The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we want to solve Maxwell's equations in 3d and the finite element has components \((E_x,E_y,E_z,B_x,B_y,B_z)\) and we want the boundary conditions \(\vec{n}\times\vec{B}=\vec{n}\times\vec{f}\), then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. \(x\)-, \(y\)-, and finally \(z\)-component.

The parameter boundary_component corresponds to the number boundary_id of the face. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces.

The last argument is denoted to compute the normal vector \(\vec{n}\) at the boundary points.

Computing constraints

To compute the constraints we use projection-based interpolation as proposed in Solin, Segeth and Dolezel (Higher order finite elements, Chapman&Hall, 2004) on every face located at the boundary.

First one projects \(\vec{f}\) on the lowest-order edge shape functions. Then the remaining part \((I-P_0)\vec{f}\) of the function is projected on the remaining higher-order edge shape functions. In the last step we project \((I-P_0-P_e)\vec{f}\) on the bubble shape functions defined on the face.

Deprecated:
Use the project_boundary_values_curl_conforming_l2() function instead of this one.
See also
Glossary entry on boundary indicators

◆ project_boundary_values_curl_conforming() [2/2]

template<int dim>
void VectorTools::project_boundary_values_curl_conforming ( const hp::DoFHandler< dim > &  dof_handler,
const unsigned int  first_vector_component,
const Function< dim, double > &  boundary_function,
const types::boundary_id  boundary_component,
AffineConstraints< double > &  constraints,
const hp::MappingCollection< dim, dim > &  mapping_collection = hp::StaticMappingQ1< dim >::mapping_collection 
)

Same as above for the hp-namespace.

Deprecated:
Use the project_boundary_values_curl_conforming_l2() function instead of this one.
See also
Glossary entry on boundary indicators

◆ project_boundary_values_curl_conforming_l2() [1/2]

template<int dim>
void VectorTools::project_boundary_values_curl_conforming_l2 ( const DoFHandler< dim > &  dof_handler,
const unsigned int  first_vector_component,
const Function< dim, double > &  boundary_function,
const types::boundary_id  boundary_component,
AffineConstraints< double > &  constraints,
const Mapping< dim > &  mapping = StaticMappingQ1< dim >::mapping 
)

This function is an updated version of the project_boundary_values_curl_conforming function. The intention is to fix a problem when using the previous function in conjunction with non- rectangular geometries (i.e. elements with non-rectangular faces). The L2-projection method used has been taken from the paper "Electromagnetic scattering simulation using an H (curl) conforming hp finite element method in three dimensions" by PD Ledger, K Morgan and O Hassan ( Int. J. Num. Meth. Fluids, Volume 53, Issue 8, pages 1267–1296).

This function will compute constraints that correspond to Dirichlet boundary conditions of the form \(\vec{n}\times\vec{E}=\vec{n}\times\vec{F}\) i.e. the tangential components of \(\vec{E}\) and \(f\) shall coincide.

Computing constraints

To compute the constraints we use a projection method based upon the paper mentioned above. In 2D this is done in a single stage for the edge- based shape functions, regardless of the order of the finite element. In 3D this is done in two stages, edges first and then faces.

For each cell, each edge, \(e\), is projected by solving the linear system \(Ax=b\) where \(x\) is the vector of constraints on degrees of freedom on the edge and

\(A_{ij} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{s}_{j}\cdot\vec{t}) dS\)

\(b_{i} = \int_{e} (\vec{s}_{i}\cdot\vec{t})(\vec{F}\cdot\vec{t}) dS\)

with \(\vec{s}_{i}\) the \(i^{th}\) shape function and \(\vec{t}\) the tangent vector.

Once all edge constraints, \(x\), have been computed, we may compute the face constraints in a similar fashion, taking into account the residuals from the edges.

For each face on the cell, \(f\), we solve the linear system \(By=c\) where \(y\) is the vector of constraints on degrees of freedom on the face and

\(B_{ij} = \int_{f} (\vec{n} \times \vec{s}_{i}) \cdot (\vec{n} \times \vec{s}_{j}) dS\)

\(c_{i} = \int_{f} (\vec{n} \times \vec{r}) \cdot (\vec{n} \times \vec{s}_i) dS\)

and \(\vec{r} = \vec{F} - \sum_{e \in f} \sum{i \in e} x_{i}\vec{s}_i\), the edge residual.

The resulting constraints are then given in the solutions \(x\) and \(y\).

If the AffineConstraints constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

Arguments to this function

This function is explicitly for use with FE_Nedelec elements, or with FESystem elements which contain FE_Nedelec elements. It will throw an exception if called with any other finite element. The user must ensure that FESystem elements are correctly setup when using this function as this check not possible in this case.

The second argument of this function denotes the first vector component of the finite element which corresponds to the vector function that you wish to constrain. For example, if we are solving Maxwell's equations in 3D and have components \((E_x,E_y,E_z,B_x,B_y,B_z)\) and we want the boundary conditions \(\vec{n}\times\vec{B}=\vec{n}\times\vec{f}\), then first_vector_component would be 3. The boundary_function must return 6 components in this example, with the first 3 corresponding to \(\vec{E}\) and the second 3 corresponding to \(\vec{B}\). Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. \(x\)-, \(y\)-, and finally \(z\)-component.

The parameter boundary_component corresponds to the number boundary_id of the face. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces.

The last argument is denoted to compute the normal vector \(\vec{n}\) at the boundary points.

See also
Glossary entry on boundary indicators

◆ project_boundary_values_curl_conforming_l2() [2/2]

template<int dim>
void VectorTools::project_boundary_values_curl_conforming_l2 ( const hp::DoFHandler< dim > &  dof_handler,
const unsigned int  first_vector_component,
const Function< dim, double > &  boundary_function,
const types::boundary_id  boundary_component,
AffineConstraints< double > &  constraints,
const hp::MappingCollection< dim, dim > &  mapping_collection = hp::StaticMappingQ1< dim >::mapping_collection 
)

hp-namespace version of project_boundary_values_curl_conforming_l2 (above).

◆ project_boundary_values_div_conforming() [1/2]

template<int dim>
void VectorTools::project_boundary_values_div_conforming ( const DoFHandler< dim > &  dof_handler,
const unsigned int  first_vector_component,
const Function< dim, double > &  boundary_function,
const types::boundary_id  boundary_component,
AffineConstraints< double > &  constraints,
const Mapping< dim > &  mapping = StaticMappingQ1< dim >::mapping 
)

Compute constraints that correspond to boundary conditions of the form \(\vec{n}^T\vec{u}=\vec{n}^T\vec{f}\), i.e. the normal components of the solution \(u\) and a given \(f\) shall coincide. The function \(f\) is given by boundary_function and the resulting constraints are added to constraints for faces with boundary indicator boundary_component.

This function is explicitly written to use with the FE_RaviartThomas elements. Thus it throws an exception, if it is called with other finite elements.

If the AffineConstraints object constraints contained values or other constraints before, the new ones are added or the old ones overwritten, if a node of the boundary part to be used was already in the list of constraints. This is handled by using inhomogeneous constraints. Please note that when combining adaptive meshes and this kind of constraints, the Dirichlet conditions should be set first, and then completed by hanging node constraints, in order to make sure that the discretization remains consistent. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

The argument first_vector_component denotes the first vector component in the finite element that corresponds to the vector function \(\vec{u}\) that you want to constrain. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e., \(x\)-, \(y\)-, and finally \(z\)-component.

The parameter boundary_component corresponds to the boundary_id of the faces where the boundary conditions are applied. numbers::internal_face_boundary_id is an illegal value, since it is reserved for interior faces. The mapping is used to compute the normal vector \(\vec{n}\) at the boundary points.

Computing constraints

To compute the constraints we use interpolation operator proposed in Brezzi, Fortin (Mixed and Hybrid (Finite Element Methods, Springer, 1991) on every face located at the boundary.

See also
Glossary entry on boundary indicators

◆ project_boundary_values_div_conforming() [2/2]

template<int dim>
void VectorTools::project_boundary_values_div_conforming ( const hp::DoFHandler< dim > &  dof_handler,
const unsigned int  first_vector_component,
const Function< dim, double > &  boundary_function,
const types::boundary_id  boundary_component,
AffineConstraints< double > &  constraints,
const hp::MappingCollection< dim, dim > &  mapping_collection = hp::StaticMappingQ1< dim >::mapping_collection 
)

Same as above for the hp-namespace.

See also
Glossary entry on boundary indicators

◆ compute_nonzero_normal_flux_constraints()

template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_nonzero_normal_flux_constraints ( const DoFHandlerType< dim, spacedim > &  dof_handler,
const unsigned int  first_vector_component,
const std::set< types::boundary_id > &  boundary_ids,
const std::map< types::boundary_id, const Function< spacedim > *> &  function_map,
AffineConstraints< double > &  constraints,
const Mapping< dim, spacedim > &  mapping = StaticMappingQ1< dim >::mapping 
)

This function computes the constraints that correspond to boundary conditions of the form \(\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n\), i.e., normal flux constraints where \(\vec u\) is a vector-valued solution variable and \(\vec u_\Gamma\) is a prescribed vector field whose normal component we want to be equal to the normal component of the solution. These conditions have exactly the form handled by the AffineConstraints class, in that they relate a linear combination of boundary degrees of freedom to a corresponding value (the inhomogeneity of the constraint). Consequently, the current function creates a list of constraints that are written into an AffineConstraints container. This object may already have some content, for example from hanging node constraints, that remains untouched. These constraints have to be applied to the linear system like any other such constraints, i.e., you have to condense the linear system with the constraints before solving, and you have to distribute the solution vector afterwards.

This function treats a more general case than VectorTools::compute_no_normal_flux_constraints() (which can only handle the case where \(\vec u_\Gamma \cdot \vec n = 0\), and is used in step-31 and step-32). However, because everything that would apply to that function also applies as a special case to the current function, the following discussion is relevant to both.

Note
This function doesn't make much sense in 1d, so it throws an exception if dim equals one.

Arguments to this function

The second argument of this function denotes the first vector component in the finite element that corresponds to the vector function that you want to constrain. For example, if we were solving a Stokes equation in 2d and the finite element had components \((u,v,p)\), then first_vector_component needs to be zero if you intend to constraint the vector \((u,v)^T \cdot \vec n = \vec u_\Gamma \cdot \vec n\). On the other hand, if we solved the Maxwell equations in 3d and the finite element has components \((E_x,E_y,E_z,B_x,B_y,B_z)\) and we want the boundary condition \(\vec B\cdot \vec n=\vec B_\Gamma\cdot \vec n\), then first_vector_component would be 3. Vectors are implicitly assumed to have exactly dim components that are ordered in the same way as we usually order the coordinate directions, i.e. \(x\)-, \(y\)-, and finally \(z\)-component. The function assumes, but can't check, that the vector components in the range [first_vector_component,first_vector_component+dim) come from the same base finite element. For example, in the Stokes example above, it would not make sense to use a FESystem<dim>(FE_Q<dim>(2), 1, FE_Q<dim>(1), dim) (note that the first velocity vector component is a \(Q_2\) element, whereas all the other ones are \(Q_1\) elements) as there would be points on the boundary where the \(x\)-velocity is defined but no corresponding \(y\)- or \(z\)-velocities.

The third argument denotes the set of boundary indicators on which the boundary condition is to be enforced. Note that, as explained below, this is one of the few functions where it makes a difference where we call the function multiple times with only one boundary indicator, or whether we call the function once with the whole set of boundary indicators at once.

Argument four (function_map) describes the boundary function \(\vec u_\Gamma\) for each boundary id. The function function_map[id] is used on boundary with id id taken from the set boundary_ids. Each function in function_map is expected to have dim components, which are used independent of first_vector_component.

The mapping argument is used to compute the boundary points at which the function needs to request the normal vector \(\vec n\) from the boundary description.

Note
When combining adaptively refined meshes with hanging node constraints and boundary conditions like from the current function within one AffineConstraints object, the hanging node constraints should always be set first, and then the boundary conditions since boundary conditions are not set in the second operation on degrees of freedom that are already constrained. This makes sure that the discretization remains conforming as is needed. See the discussion on conflicting constraints in the module on Constraints on degrees of freedom.

Computing constraints in 2d

Computing these constraints requires some smarts. The main question revolves around the question what the normal vector is. Consider the following situation:

no_normal_flux_1.png

Here, we have two cells that use a bilinear mapping (i.e., MappingQGeneric(1)). Consequently, for each of the cells, the normal vector is perpendicular to the straight edge. If the two edges at the top and right are meant to approximate a curved boundary (as indicated by the dashed line), then neither of the two computed normal vectors are equal to the exact normal vector (though they approximate it as the mesh is refined further). What is worse, if we constrain \(\vec u \cdot \vec n= \vec u_\Gamma \cdot \vec n\) at the common vertex with the normal vector from both cells, then we constrain the vector \(\vec u\) with respect to two linearly independent vectors; consequently, the constraint would be \(\vec u=\vec u_\Gamma\) at this point (i.e. all components of the vector), which is not what we wanted.

To deal with this situation, the algorithm works in the following way: at each point where we want to constrain \(\vec u\), we first collect all normal vectors that adjacent cells might compute at this point. We then do not constrain \(\vec u \cdot \vec n=\vec u_\Gamma \cdot \vec n\) for each of these normal vectors but only for the average of the normal vectors. In the example above, we therefore record only a single constraint \(\vec u \cdot \vec {\bar n}=\vec u_\Gamma \cdot \vec {\bar n}\), where \(\vec {\bar n}\) is the average of the two indicated normal vectors.

Unfortunately, this is not quite enough. Consider the situation here:

no_normal_flux_2.png

If again the top and right edges approximate a curved boundary, and the left boundary a separate boundary (for example straight) so that the exact boundary has indeed a corner at the top left vertex, then the above construction would not work: here, we indeed want the constraint that \(\vec u\) at this point (because the normal velocities with respect to both the left normal as well as the top normal vector should be zero), not that the velocity in the direction of the average normal vector is zero.

Consequently, we use the following heuristic to determine whether all normal vectors computed at one point are to be averaged: if two normal vectors for the same point are computed on different cells, then they are to be averaged. This covers the first example above. If they are computed from the same cell, then the fact that they are different is considered indication that they come from different parts of the boundary that might be joined by a real corner, and must not be averaged.

There is one problem with this scheme. If, for example, the same domain we have considered above, is discretized with the following mesh, then we get into trouble:

no_normal_flux_3.png

Here, the algorithm assumes that the boundary does not have a corner at the point where faces \(F1\) and \(F2\) join because at that point there are two different normal vectors computed from different cells. If you intend for there to be a corner of the exact boundary at this point, the only way to deal with this is to assign the two parts of the boundary different boundary indicators and call this function twice, once for each boundary indicators; doing so will yield only one normal vector at this point per invocation (because we consider only one boundary part at a time), with the result that the normal vectors will not be averaged. This situation also needs to be taken into account when using this function around reentrant corners on Cartesian meshes. If normal-flux boundary conditions are to be enforced on non-Cartesian meshes around reentrant corners, one may even get cycles in the constraints as one will in general constrain different components from the two sides. In that case, set a no-slip constraint on the reentrant vertex first.

Computing constraints in 3d

The situation is more complicated in 3d. Consider the following case where we want to compute the constraints at the marked vertex:

no_normal_flux_4.png

Here, we get four different normal vectors, one from each of the four faces that meet at the vertex. Even though they may form a complete set of vectors, it is not our intent to constrain all components of the vector field at this point. Rather, we would like to still allow tangential flow, where the term "tangential" has to be suitably defined.

In a case like this, the algorithm proceeds as follows: for each cell that has computed two tangential vectors at this point, we compute the unconstrained direction as the outer product of the two tangential vectors (if necessary multiplied by minus one). We then average these tangential vectors. Finally, we compute constraints for the two directions perpendicular to this averaged tangential direction.

There are cases where one cell contributes two tangential directions and another one only one; for example, this would happen if both top and front faces of the left cell belong to the boundary selected whereas only the top face of the right cell belongs to it, maybe indicating that the entire front part of the domain is a smooth manifold whereas the top really forms two separate manifolds that meet in a ridge, and that normal-flux boundary conditions are only desired on the front manifold and the right one on top. In cases like these, it's difficult to define what should happen. The current implementation simply ignores the one contribution from the cell that only contributes one normal vector. In the example shown, this is acceptable because the normal vector for the front face of the left cell is the same as the normal vector provided by the front face of the right cell (the surface is planar) but it would be a problem if the front manifold would be curved. Regardless, it is unclear how one would proceed in this case and ignoring the single cell is likely the best one can do.

Results

Because it makes for good pictures, here are two images of vector fields on a circle and on a sphere to which the constraints computed by this function have been applied (for illustration purposes, we enforce zero normal flux, which can more easily be computed using VectorTools::compute_no_normal_flux_constraints(), as this must lead to a tangential vector field):

no_normal_flux_5.png
no_normal_flux_6.png

The vectors fields are not physically reasonable but the tangentiality constraint is clearly enforced. The fact that the vector fields are zero at some points on the boundary is an artifact of the way it is created, it is not constrained to be zero at these points.

See also
Glossary entry on boundary indicators

◆ compute_no_normal_flux_constraints()

template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_no_normal_flux_constraints ( const DoFHandlerType< dim, spacedim > &  dof_handler,
const unsigned int  first_vector_component,
const std::set< types::boundary_id > &  boundary_ids,
AffineConstraints< double > &  constraints,
const Mapping< dim, spacedim > &  mapping = StaticMappingQ1< dim >::mapping 
)

This function does the same as the compute_nonzero_normal_flux_constraints() function (see there for more information), but for the simpler case of homogeneous normal-flux constraints, i.e., for imposing the condition \(\vec u \cdot \vec n= 0\). This function is used in step-31 and step-32.

See also
Glossary entry on boundary indicators

◆ compute_nonzero_tangential_flux_constraints()

template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_nonzero_tangential_flux_constraints ( const DoFHandlerType< dim, spacedim > &  dof_handler,
const unsigned int  first_vector_component,
const std::set< types::boundary_id > &  boundary_ids,
const std::map< types::boundary_id, const Function< spacedim > *> &  function_map,
AffineConstraints< double > &  constraints,
const Mapping< dim, spacedim > &  mapping = StaticMappingQ1< dim >::mapping 
)

Compute the constraints that correspond to boundary conditions of the form \(\vec u \times \vec n=\vec u_\Gamma \times \vec n\), i.e., tangential flow constraints where \(\vec u\) is a vector-valued solution variable and \(\vec u_\Gamma\) is prescribed vector field whose tangential component(s) we want to be equal to the tangential component(s) of the solution. This function constrains exactly those dim-1 vector-valued components that are left unconstrained by VectorTools::compute_no_normal_flux_constraints(), and leaves the one component unconstrained that is constrained by that function.

See also
Glossary entry on boundary indicators

◆ compute_normal_flux_constraints()

template<int dim, int spacedim, template< int, int > class DoFHandlerType>
void VectorTools::compute_normal_flux_constraints ( const DoFHandlerType< dim, spacedim > &  dof_handler,
const unsigned int  first_vector_component,
const std::set< types::boundary_id > &  boundary_ids,
AffineConstraints< double > &  constraints,
const Mapping< dim, spacedim > &  mapping = StaticMappingQ1< dim >::mapping 
)

Same as above for homogeneous tangential-flux constraints.

See also
Glossary entry on boundary indicators

◆ distribute_constraints_linear_operator() [2/2]

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > distribute_constraints_linear_operator ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  exemplar 
)
related

This function takes an AffineConstraints object constraints and an operator exemplar exemplar (this exemplar is usually a linear operator that describes the system matrix - it is only used to create domain and range vectors of appropriate sizes, its action vmult is never used). A LinearOperator object associated with the "homogeneous action" of the underlying AffineConstraints object is returned:

Applying the LinearOperator object on a vector u results in a vector v that stores the result of calling AffineConstraints::distribute() on u - with one important difference: inhomogeneities are not applied, but always treated as 0 instead.

The LinearOperator object created by this function is primarily used internally in constrained_linear_operator() to build up a modified system of linear equations. How to solve a linear system of equations with this approach is explained in detail in the Constraints on degrees of freedom module.

Author
Mauro Bardelloni, Matthias Maier, 2015
Note
Currently, this function may not work correctly for distributed data structures.

Definition at line 64 of file constrained_linear_operator.h.

◆ project_to_constrained_linear_operator() [2/2]

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > project_to_constrained_linear_operator ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  exemplar 
)
related

Given a AffineConstraints constraints and an operator exemplar exemplar, return a LinearOperator that is the projection to the subspace of constrained degrees of freedom, i.e. all entries of the result vector that correspond to unconstrained degrees of freedom are set to zero.

Author
Mauro Bardelloni, Matthias Maier, 2015

Definition at line 160 of file constrained_linear_operator.h.

◆ constrained_linear_operator() [2/2]

template<typename Range , typename Domain , typename Payload >
LinearOperator< Range, Domain, Payload > constrained_linear_operator ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  linop 
)
related

Given a AffineConstraints object constraints and a LinearOperator linop, this function creates a LinearOperator object consisting of the composition of three operations and a regularization:

Ct * linop * C + Id_c;

with

and Id_c is the projection to the subspace consisting of all vector entries associated with constrained degrees of freedom.

This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

\[ (C^T A C + Id_c) x = C^T (b - A\,k) \]

with a given (unconstrained) system matrix \(A\), right hand side \(b\), and linear constraints \(C\) with inhomogeneities \(k\).

A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

Author
Mauro Bardelloni, Matthias Maier, 2015
Note
Currently, this function may not work correctly for distributed data structures.

Definition at line 251 of file constrained_linear_operator.h.

◆ constrained_right_hand_side() [2/2]

template<typename Range , typename Domain , typename Payload >
PackagedOperation< Range > constrained_right_hand_side ( const AffineConstraints< typename Range::value_type > &  constraints,
const LinearOperator< Range, Domain, Payload > &  linop,
const Range &  right_hand_side 
)
related

Given a AffineConstraints object constraints, a LinearOperator linop and a right-hand side right_hand_side, this function creates a PackagedOperation that stores the following computation:

Ct * (right_hand_side - linop * k)

with

This LinearOperator object is used together with constrained_right_hand_side() to build up the following modified system of linear equations:

\[ (C^T A C + Id_c) x = C^T (b - A\,k) \]

with a given (unconstrained) system matrix \(A\), right hand side \(b\), and linear constraints \(C\) with inhomogeneities \(k\).

A detailed explanation of this approach is given in the Constraints on degrees of freedom module.

Author
Mauro Bardelloni, Matthias Maier, 2015
Note
Currently, this function may not work correctly for distributed data structures.

Definition at line 298 of file constrained_linear_operator.h.

◆ make_sparsity_pattern() [3/3]

template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_sparsity_pattern ( const DoFHandlerType &  dof_row,
const DoFHandlerType &  dof_col,
SparsityPatternType &  sparsity 
)

Construct a sparsity pattern that allows coupling degrees of freedom on two different but related meshes.

The idea is that if the two given DoFHandler objects correspond to two different meshes (and potentially to different finite elements used on these cells), but that if the two triangulations they are based on are derived from the same coarse mesh through hierarchical refinement, then one may set up a problem where one would like to test shape functions from one mesh against the shape functions from another mesh. In particular, this means that shape functions from a cell on the first mesh are tested against those on the second cell that are located on the corresponding cell; this correspondence is something that the IntergridMap class can determine.

This function then constructs a sparsity pattern for which the degrees of freedom that represent the rows come from the first given DoFHandler, whereas the ones that correspond to columns come from the second DoFHandler.

Definition at line 214 of file dof_tools_sparsity.cc.

◆ make_flux_sparsity_pattern() [4/4]

template<typename DoFHandlerType , typename SparsityPatternType , typename number >
void DoFTools::make_flux_sparsity_pattern ( const DoFHandlerType &  dof,
SparsityPatternType &  sparsity,
const AffineConstraints< number > &  constraints,
const bool  keep_constrained_dofs,
const Table< 2, Coupling > &  couplings,
const Table< 2, Coupling > &  face_couplings,
const types::subdomain_id  subdomain_id 
)

This function does essentially the same as the previous make_flux_sparsity_pattern() function but allows the application of an AffineConstraints object. This is useful in the case where some components of a finite element are continuous and some discontinuous, allowing constraints to be imposed on the continuous part while also building the flux terms needed for the discontinuous part.

Definition at line 1334 of file dof_tools_sparsity.cc.

◆ make_boundary_sparsity_pattern() [1/2]

template<typename DoFHandlerType , typename SparsityPatternType >
void DoFTools::make_boundary_sparsity_pattern ( const DoFHandlerType &  dof,
const std::vector< types::global_dof_index > &  dof_to_boundary_mapping,
SparsityPatternType &  sparsity_pattern 
)

Create the sparsity pattern for boundary matrices. See the general documentation of this class for more information.

The function does essentially what the other make_sparsity_pattern() functions do, but assumes that the bilinear form that is used to build the matrix does not consist of domain integrals, but only of integrals over the boundary of the domain.

Definition at line 356 of file dof_tools_sparsity.cc.

◆ make_boundary_sparsity_pattern() [2/2]

template<typename DoFHandlerType , typename SparsityPatternType , typename number >
void DoFTools::make_boundary_sparsity_pattern ( const DoFHandlerType &  dof,
const std::map< types::boundary_id, const Function< DoFHandlerType::space_dimension, number > *> &  boundary_ids,
const std::vector< types::global_dof_index > &  dof_to_boundary_mapping,
SparsityPatternType &  sparsity 
)

This function is a variation of the previous make_boundary_sparsity_pattern() function in which we assume that the boundary integrals that will give rise to the matrix extends only over those parts of the boundary whose boundary indicators are listed in the boundary_ids argument to this function.

This function could have been written by passing a set of boundary_id numbers. However, most of the functions throughout deal.II dealing with boundary indicators take a mapping of boundary indicators and the corresponding boundary function, i.e., a std::map<types::boundary_id, const Function<spacedim,number>*> argument. Correspondingly, this function does the same, though the actual boundary function is ignored here. (Consequently, if you don't have any such boundary functions, just create a map with the boundary indicators you want and set the function pointers to null pointers).

Definition at line 427 of file dof_tools_sparsity.cc.

◆ compute_intergrid_constraints()

template<int dim, int spacedim>
void DoFTools::compute_intergrid_constraints ( const DoFHandler< dim, spacedim > &  coarse_grid,
const unsigned int  coarse_component,
const DoFHandler< dim, spacedim > &  fine_grid,
const unsigned int  fine_component,
const InterGridMap< DoFHandler< dim, spacedim >> &  coarse_to_fine_grid_map,
AffineConstraints< double > &  constraints 
)

This function is used when different variables in a problem are discretized on different grids, where one grid is strictly coarser than the other. An example are optimization problems where the control variable is often discretized on a coarser mesh than the state variable.

The function's result can be stated as follows mathematically: Let \({\cal T}_0\) and \({\cal T}_1\) be two meshes where \({\cal T}_1\) results from \({\cal T}_0\) strictly by refining or leaving alone the cells of \({\cal T}_0\). Using the same finite element on both, there are function spaces \({\cal V}_0\) and \({\cal V}_1\) associated with these meshes. Then every function \(v_0 \in {\cal V}_0\) can of course also be represented exactly in \({\cal V}_1\) since by construction \({\cal V}_0 \subset {\cal V}_1\). However, not every function in \({\cal V}_1\) can be expressed as a linear combination of the shape functions of \({\cal V}_0\). The functions that can be represented lie in a homogeneous subspace of \({\cal V}_1\) (namely, \({\cal V}_0\), of course) and this subspace can be represented by a linear constraint of the form \(CV=0\) where \(V\) is the vector of nodal values of functions \(v\in {\cal V}_1\). In other words, every function \(v_h=\sum_j V_j \varphi_j^{(1)} \in {\cal V}_1\) that also satisfies \(v_h\in {\cal V}_0\) automatically satisfies \(CV=0\). This function computes the matrix \(C\) in the form of a AffineConstraints object.

The construction of these constraints is done as follows: for each of the degrees of freedom (i.e. shape functions) on the coarse grid, we compute its representation on the fine grid, i.e. how the linear combination of shape functions on the fine grid looks like that resembles the shape function on the coarse grid. From this information, we can then compute the constraints which have to hold if a solution of a linear equation on the fine grid shall be representable on the coarse grid. The exact algorithm how these constraints can be computed is rather complicated and is best understood by reading the source code, which contains many comments.

The use of this function is as follows: it accepts as parameters two DoF Handlers, the first of which refers to the coarse grid and the second of which is the fine grid. On both, a finite element is represented by the DoF handler objects, which will usually have several vector components, which may belong to different base elements. The second and fourth parameter of this function therefore state which vector component on the coarse grid shall be used to restrict the stated component on the fine grid. The finite element used for the respective components on the two grids needs to be the same. An example may clarify this: consider an optimization problem with controls \(q\) discretized on a coarse mesh and a state variable \(u\) (and corresponding Lagrange multiplier \(\lambda\)) discretized on the fine mesh. These are discretized using piecewise constant discontinuous, continuous linear, and continuous linear elements, respectively. Only the parameter \(q\) is represented on the coarse grid, thus the DoFHandler object on the coarse grid represents only one variable, discretized using piecewise constant discontinuous elements. Then, the parameter denoting the vector component on the coarse grid would be zero (the only possible choice, since the variable on the coarse grid is scalar). If the ordering of variables in the fine mesh FESystem is \(u, q, \lambda\), then the fourth argument of the function corresponding to the vector component would be one (corresponding to the variable \(q\); zero would be \(u\), two would be \(\lambda\)).

The function also requires an object of type IntergridMap representing how to get from the coarse mesh cells to the corresponding cells on the fine mesh. This could in principle be generated by the function itself from the two DoFHandler objects, but since it is probably available anyway in programs that use different meshes, the function simply takes it as an argument.

The computed constraints are entered into a variable of type AffineConstraints; previous contents are not deleted.

Definition at line 3078 of file dof_tools_constraints.cc.

◆ compute_intergrid_transfer_representation()

template<int dim, int spacedim>
void DoFTools::compute_intergrid_transfer_representation ( const DoFHandler< dim, spacedim > &  coarse_grid,
const unsigned int  coarse_component,
const DoFHandler< dim, spacedim > &  fine_grid,
const unsigned int  fine_component,
const InterGridMap< DoFHandler< dim, spacedim >> &  coarse_to_fine_grid_map,
std::vector< std::map< types::global_dof_index, float >> &  transfer_representation 
)

This function generates a matrix such that when a vector of data with as many elements as there are degrees of freedom of this component on the coarse grid is multiplied to this matrix, we obtain a vector with as many elements as there are global degrees of freedom on the fine grid. All the elements of the other vector components of the finite element fields on the fine grid are not touched.

Triangulation of the fine grid can be distributed. When called in parallel, each process has to have a copy of the coarse grid. In this case, function returns transfer representation for a set of locally owned cells.

The output of this function is a compressed format that can be used to construct corresponding sparse transfer matrix.

Definition at line 3256 of file dof_tools_constraints.cc.