1340 *
template <
int dim>
1341 *
void RightHandSide<dim>::value_list(
const std::vector<
Point<dim>> &vp,
1344 *
for (
unsigned int c = 0; c < vp.size(); ++c)
1346 *
values[c] = RightHandSide<dim>::value(vp[c]);
1354 * <a name=
"Linearsolversandpreconditioners"></a>
1355 * <h3>Linear solvers and preconditioners</h3>
1359 * The linear solvers and preconditioners are discussed extensively in the
1360 * introduction. Here, we create the respective objects that will be used.
1365 * <a name=
"ThecodeInverseMatrixcodeclasstemplate"></a>
1366 * <h4>The <code>InverseMatrix</code>
class template</h4>
1367 * The <code>InverseMatrix</code>
class represents the data structure for an
1368 * inverse
matrix. Unlike @ref step_20
"step-20", we implement
this with a
class instead of
1369 * the helper function inverse_linear_operator() we will apply this class to
1370 * different kinds of matrices that will require different preconditioners
1371 * (in @ref step_20 "step-20" we only used a non-
identity preconditioner for the mass
1372 * matrix). The
types of matrix and preconditioner are passed to this class
1373 * via template parameters, and matrix and preconditioner objects of these
1374 *
types will then be passed to the constructor when an
1375 * <code>InverseMatrix</code>
object is created. The member function
1376 * <code>vmult</code> is obtained by solving a linear system:
1379 * template <class MatrixType, class PreconditionerType>
1383 * InverseMatrix(
const MatrixType & m,
1384 *
const PreconditionerType &preconditioner);
1394 *
template <
class MatrixType,
class PreconditionerType>
1395 * InverseMatrix<MatrixType, PreconditionerType>::InverseMatrix(
1396 *
const MatrixType & m,
1397 *
const PreconditionerType &preconditioner)
1399 * , preconditioner(&preconditioner)
1405 * This is the implementation of the <code>vmult</code> function.
1409 * In
this class we use a rather large tolerance for the solver control. The
1410 * reason
for this is that the function is used very frequently, and hence,
1411 * any additional effort to make the residual in the CG solve smaller makes
1412 * the solution more expensive. Note that we
do not only use
this class as a
1413 * preconditioner
for the Schur complement, but also when forming the
1414 * inverse of the Laplace
matrix – which is hence directly responsible
1415 *
for the accuracy of the solution itself, so we can
't choose a too large
1416 * tolerance, either.
1419 * template <class MatrixType, class PreconditionerType>
1420 * void InverseMatrix<MatrixType, PreconditionerType>::vmult(
1421 * Vector<double> & dst,
1422 * const Vector<double> &src) const
1424 * SolverControl solver_control(src.size(), 1e-6 * src.l2_norm());
1425 * SolverCG<Vector<double>> cg(solver_control);
1429 * cg.solve(*matrix, dst, src, *preconditioner);
1436 * <a name="ThecodeSchurComplementcodeclasstemplate"></a>
1437 * <h4>The <code>SchurComplement</code> class template</h4>
1441 * This class implements the Schur complement discussed in the introduction.
1442 * It is in analogy to @ref step_20 "step-20". Though, we now call it with a template
1443 * parameter <code>PreconditionerType</code> in order to access that when
1444 * specifying the respective type of the inverse matrix class. As a
1445 * consequence of the definition above, the declaration
1446 * <code>InverseMatrix</code> now contains the second template parameter for
1447 * a preconditioner class as above, which affects the
1448 * <code>SmartPointer</code> object <code>m_inverse</code> as well.
1451 * template <class PreconditionerType>
1452 * class SchurComplement : public Subscriptor
1456 * const BlockSparseMatrix<double> &system_matrix,
1457 * const InverseMatrix<SparseMatrix<double>, PreconditionerType> &A_inverse);
1459 * void vmult(Vector<double> &dst, const Vector<double> &src) const;
1462 * const SmartPointer<const BlockSparseMatrix<double>> system_matrix;
1463 * const SmartPointer<
1464 * const InverseMatrix<SparseMatrix<double>, PreconditionerType>>
1467 * mutable Vector<double> tmp1, tmp2;
1472 * template <class PreconditionerType>
1473 * SchurComplement<PreconditionerType>::SchurComplement(
1474 * const BlockSparseMatrix<double> &system_matrix,
1475 * const InverseMatrix<SparseMatrix<double>, PreconditionerType> &A_inverse)
1476 * : system_matrix(&system_matrix)
1477 * , A_inverse(&A_inverse)
1478 * , tmp1(system_matrix.block(0, 0).m())
1479 * , tmp2(system_matrix.block(0, 0).m())
1483 * template <class PreconditionerType>
1485 * SchurComplement<PreconditionerType>::vmult(Vector<double> & dst,
1486 * const Vector<double> &src) const
1488 * system_matrix->block(0, 1).vmult(tmp1, src);
1489 * A_inverse->vmult(tmp2, tmp1);
1490 * system_matrix->block(1, 0).vmult(dst, tmp2);
1497 * <a name="StokesProblemclassimplementation"></a>
1498 * <h3>StokesProblem class implementation</h3>
1503 * <a name="StokesProblemStokesProblem"></a>
1504 * <h4>StokesProblem::StokesProblem</h4>
1508 * The constructor of this class looks very similar to the one of
1509 * @ref step_20 "step-20". The constructor initializes the variables for the polynomial
1510 * degree, triangulation, finite element system and the dof handler. The
1511 * underlying polynomial functions are of order <code>degree+1</code> for
1512 * the vector-valued velocity components and of order <code>degree</code>
1513 * for the pressure. This gives the LBB-stable element pair
1514 * @f$Q_{degree+1}^d\times Q_{degree}@f$, often referred to as the Taylor-Hood
1519 * Note that we initialize the triangulation with a MeshSmoothing argument,
1520 * which ensures that the refinement of cells is done in a way that the
1521 * approximation of the PDE solution remains well-behaved (problems arise if
1522 * grids are too unstructured), see the documentation of
1523 * <code>Triangulation::MeshSmoothing</code> for details.
1526 * template <int dim>
1527 * StokesProblem<dim>::StokesProblem(const unsigned int degree)
1529 * , triangulation(Triangulation<dim>::maximum_smoothing)
1530 * , fe(FE_Q<dim>(degree + 1), dim, FE_Q<dim>(degree), 1)
1531 * , dof_handler(triangulation)
1538 * <a name="StokesProblemsetup_dofs"></a>
1539 * <h4>StokesProblem::setup_dofs</h4>
1543 * Given a mesh, this function associates the degrees of freedom with it and
1544 * creates the corresponding matrices and vectors. At the beginning it also
1545 * releases the pointer to the preconditioner object (if the shared pointer
1546 * pointed at anything at all at this point) since it will definitely not be
1547 * needed any more after this point and will have to be re-computed after
1548 * assembling the matrix, and unties the sparse matrices from their sparsity
1553 * We then proceed with distributing degrees of freedom and renumbering
1554 * them: In order to make the ILU preconditioner (in 3D) work efficiently,
1555 * it is important to enumerate the degrees of freedom in such a way that it
1556 * reduces the bandwidth of the matrix, or maybe more importantly: in such a
1557 * way that the ILU is as close as possible to a real LU decomposition. On
1558 * the other hand, we need to preserve the block structure of velocity and
1559 * pressure already seen in @ref step_20 "step-20" and @ref step_21 "step-21". This is done in two
1560 * steps: First, all dofs are renumbered to improve the ILU and then we
1561 * renumber once again by components. Since
1562 * <code>DoFRenumbering::component_wise</code> does not touch the
1563 * renumbering within the individual blocks, the basic renumbering from the
1564 * first step remains. As for how the renumber degrees of freedom to improve
1565 * the ILU: deal.II has a number of algorithms that attempt to find
1566 * orderings to improve ILUs, or reduce the bandwidth of matrices, or
1567 * optimize some other aspect. The DoFRenumbering namespace shows a
1568 * comparison of the results we obtain with several of these algorithms
1569 * based on the testcase discussed here in this tutorial program. Here, we
1570 * will use the traditional Cuthill-McKee algorithm already used in some of
1571 * the previous tutorial programs. In the <a href="#improved-ilu">section
1572 * on improved ILU</a> we're going to discuss
this issue in more detail.
1576 * There is one more change compared to previous tutorial programs: There is
1577 * no reason in sorting the <code>dim</code> velocity components
1578 * individually. In fact, rather than
first enumerating all @f$x@f$-velocities,
1579 * then all @f$y@f$-velocities, etc, we would like to keep all velocities at the
1580 * same location together and only separate between velocities (all
1581 * components) and pressures. By
default,
this is not what the
1583 * component separately; what we have to
do is
group several components into
1584 *
"blocks" and pass
this block structure to that function. Consequently, we
1585 * allocate a vector <code>block_component</code> with as many elements as
1586 * there are components and describe all velocity components to correspond
1587 * to block 0,
while the pressure component will form block 1:
1590 *
template <
int dim>
1591 *
void StokesProblem<dim>::setup_dofs()
1593 * A_preconditioner.reset();
1594 * system_matrix.clear();
1595 * preconditioner_matrix.clear();
1597 * dof_handler.distribute_dofs(fe);
1600 * std::vector<unsigned int> block_component(dim + 1, 0);
1601 * block_component[dim] = 1;
1606 * Now comes the implementation of Dirichlet boundary conditions, which
1607 * should be evident after the discussion in the introduction. All that
1608 * changed is that the function already appears in the setup
functions,
1609 * whereas we were used to see it in some assembly routine. Further down
1610 * below where we
set up the mesh, we will associate the top boundary
1611 * where we impose Dirichlet boundary conditions with boundary indicator
1612 * 1. We will have to pass
this boundary indicator as
second argument to
1613 * the function below interpolating boundary
values. There is one more
1614 * thing, though. The function describing the Dirichlet conditions was
1615 * defined
for all components, both velocity and pressure. However, the
1616 * Dirichlet conditions are to be
set for the velocity only. To
this end,
1617 * we use a
ComponentMask that only selects the velocity components. The
1618 * component
mask is obtained from the finite element by specifying the
1619 * particular components we want. Since we use adaptively refined grids,
1620 * the
affine constraints
object needs to be
first filled with hanging node
1621 * constraints generated from the DoF handler. Note the order of the two
1622 *
functions — we
first compute the hanging node constraints, and
1623 * then
insert the boundary
values into the constraints
object. This makes
1624 * sure that we respect H<sup>1</sup> conformity on boundaries with
1625 * hanging nodes (in three space dimensions), where the hanging node needs
1626 * to dominate the Dirichlet boundary
values.
1630 * constraints.clear();
1636 * BoundaryValues<dim>(),
1638 * fe.component_mask(velocities));
1641 * constraints.close();
1645 * In analogy to @ref step_20
"step-20", we count the dofs in the individual components.
1646 * We could
do this in the same way as there, but we want to operate on
1647 * the block structure we used already
for the renumbering: The function
1650 * velocity and pressure block via <code>block_component</code>.
1653 *
const std::vector<types::global_dof_index> dofs_per_block =
1658 * std::cout <<
" Number of active cells: " <<
triangulation.n_active_cells()
1660 * <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
1661 * <<
" (" << n_u <<
'+' << n_p <<
')' << std::endl;
1665 * The next task is to allocate a sparsity pattern
for the system
matrix we
1666 * will create and one
for the preconditioner
matrix. We could
do this in
1667 * the same way as in @ref step_20
"step-20", i.e. directly build an
object of type
1669 * is a major reason not to
do so:
1670 * In 3D, the function DoFTools::max_couplings_between_dofs yields a
1671 * conservative but rather large number
for the coupling between the
1672 * individual dofs, so that the memory initially provided
for the creation
1673 * of the sparsity pattern of the
matrix is far too much -- so much actually
1674 * that the
initial sparsity pattern won
't even fit into the physical memory
1675 * of most systems already for moderately-sized 3D problems, see also the
1676 * discussion in @ref step_18 "step-18". Instead, we first build temporary objects that use
1677 * a different data structure that doesn't require allocating more memory
1678 * than necessary but isn
't suitable for use as a basis of SparseMatrix or
1679 * BlockSparseMatrix objects; in a second step we then copy these objects
1680 * into objects of type BlockSparsityPattern. This is entirely analogous to
1681 * what we already did in @ref step_11 "step-11" and @ref step_18 "step-18". In particular, we make use of
1682 * the fact that we will never write into the @f$(1,1)@f$ block of the system
1683 * matrix and that this is the only block to be filled for the
1684 * preconditioner matrix.
1688 * All this is done inside new scopes, which means that the memory of
1689 * <code>dsp</code> will be released once the information has been copied to
1690 * <code>sparsity_pattern</code>.
1694 * BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);
1696 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
1697 * for (unsigned int c = 0; c < dim + 1; ++c)
1698 * for (unsigned int d = 0; d < dim + 1; ++d)
1699 * if (!((c == dim) && (d == dim)))
1700 * coupling[c][d] = DoFTools::always;
1702 * coupling[c][d] = DoFTools::none;
1704 * DoFTools::make_sparsity_pattern(
1705 * dof_handler, coupling, dsp, constraints, false);
1707 * sparsity_pattern.copy_from(dsp);
1711 * BlockDynamicSparsityPattern preconditioner_dsp(dofs_per_block,
1714 * Table<2, DoFTools::Coupling> preconditioner_coupling(dim + 1, dim + 1);
1715 * for (unsigned int c = 0; c < dim + 1; ++c)
1716 * for (unsigned int d = 0; d < dim + 1; ++d)
1717 * if (((c == dim) && (d == dim)))
1718 * preconditioner_coupling[c][d] = DoFTools::always;
1720 * preconditioner_coupling[c][d] = DoFTools::none;
1722 * DoFTools::make_sparsity_pattern(dof_handler,
1723 * preconditioner_coupling,
1724 * preconditioner_dsp,
1728 * preconditioner_sparsity_pattern.copy_from(preconditioner_dsp);
1733 * Finally, the system matrix, the preconsitioner matrix, the solution and
1734 * the right hand side vector are created from the block structure similar
1735 * to the approach in @ref step_20 "step-20":
1738 * system_matrix.reinit(sparsity_pattern);
1739 * preconditioner_matrix.reinit(preconditioner_sparsity_pattern);
1741 * solution.reinit(dofs_per_block);
1742 * system_rhs.reinit(dofs_per_block);
1749 * <a name="StokesProblemassemble_system"></a>
1750 * <h4>StokesProblem::assemble_system</h4>
1754 * The assembly process follows the discussion in @ref step_20 "step-20" and in the
1755 * introduction. We use the well-known abbreviations for the data structures
1756 * that hold the local matrices, right hand side, and global numbering of the
1757 * degrees of freedom for the present cell.
1760 * template <int dim>
1761 * void StokesProblem<dim>::assemble_system()
1763 * system_matrix = 0;
1765 * preconditioner_matrix = 0;
1767 * QGauss<dim> quadrature_formula(degree + 2);
1769 * FEValues<dim> fe_values(fe,
1770 * quadrature_formula,
1771 * update_values | update_quadrature_points |
1772 * update_JxW_values | update_gradients);
1774 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1776 * const unsigned int n_q_points = quadrature_formula.size();
1778 * FullMatrix<double> local_matrix(dofs_per_cell, dofs_per_cell);
1779 * FullMatrix<double> local_preconditioner_matrix(dofs_per_cell,
1781 * Vector<double> local_rhs(dofs_per_cell);
1783 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1785 * const RightHandSide<dim> right_hand_side;
1786 * std::vector<Tensor<1, dim>> rhs_values(n_q_points, Tensor<1, dim>());
1790 * Next, we need two objects that work as extractors for the FEValues
1791 * object. Their use is explained in detail in the report on @ref
1795 * const FEValuesExtractors::Vector velocities(0);
1796 * const FEValuesExtractors::Scalar pressure(dim);
1800 * As an extension over @ref step_20 "step-20" and @ref step_21 "step-21", we include a few optimizations
1801 * that make assembly much faster for this particular problem. The
1802 * improvements are based on the observation that we do a few calculations
1803 * too many times when we do as in @ref step_20 "step-20": The symmetric gradient actually
1804 * has <code>dofs_per_cell</code> different values per quadrature point, but
1805 * we extract it <code>dofs_per_cell*dofs_per_cell</code> times from the
1806 * FEValues object - for both the loop over <code>i</code> and the inner
1807 * loop over <code>j</code>. In 3d, that means evaluating it @f$89^2=7921@f$
1808 * instead of @f$89@f$ times, a not insignificant difference.
1812 * So what we're going to
do here is to avoid such repeated calculations
1813 * by getting a vector of rank-2 tensors (and similarly
for the divergence
1814 * and the basis function value on pressure) at the quadrature
point prior
1815 * to starting the
loop over the dofs on the cell. First, we create the
1816 * respective objects that will hold these
values. Then, we start the
loop
1817 * over all cells and the
loop over the quadrature points, where we
first
1818 *
extract these
values. There is one more optimization we implement here:
1819 * the local
matrix (as well as the global one) is going to be
symmetric,
1820 * since all the operations involved are
symmetric with respect to @f$i@f$ and
1821 * @f$j@f$. This is implemented by simply running the inner
loop not to
1822 * <code>dofs_per_cell</code>, but only up to <code>i</code>, the
index of
1826 * std::vector<SymmetricTensor<2, dim>> symgrad_phi_u(dofs_per_cell);
1827 * std::vector<double> div_phi_u(dofs_per_cell);
1828 * std::vector<Tensor<1, dim>> phi_u(dofs_per_cell);
1829 * std::vector<double> phi_p(dofs_per_cell);
1831 *
for (
const auto &cell : dof_handler.active_cell_iterators())
1833 * fe_values.reinit(cell);
1835 * local_preconditioner_matrix = 0;
1838 * right_hand_side.value_list(fe_values.get_quadrature_points(),
1841 *
for (
unsigned int q = 0; q < n_q_points; ++q)
1843 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
1845 * symgrad_phi_u[k] =
1846 * fe_values[velocities].symmetric_gradient(k, q);
1847 * div_phi_u[k] = fe_values[velocities].divergence(k, q);
1848 * phi_u[k] = fe_values[velocities].value(k, q);
1849 * phi_p[k] = fe_values[pressure].value(k, q);
1854 * Now
finally for the bilinear forms of both the system
matrix and
1855 * the
matrix we use
for the preconditioner. Recall that the
1856 * formulas
for these two are
1858 * A_{ij} &= a(\varphi_i,\varphi_j)
1859 * \\ &= \underbrace{2(\varepsilon(\varphi_{i,\textbf{u}}),
1860 * \varepsilon(\varphi_{j,\textbf{u}}))_{\Omega}}
1863 * \underbrace{- (\textrm{div}\; \varphi_{i,\textbf{u}},
1864 * \varphi_{j,p})_{\Omega}}
1867 * \underbrace{- (\varphi_{i,p},
1869 * \varphi_{j,\textbf{u}})_{\Omega}}
1874 * M_{ij} &= \underbrace{(\varphi_{i,p},
1875 * \varphi_{j,p})_{\Omega}}
1878 * respectively, where @f$\varphi_{i,\textbf{u}}@f$ and @f$\varphi_{i,p}@f$
1879 * are the velocity and pressure components of the @f$i@f$th shape
1880 * function. The various terms above are then easily recognized in
1881 * the following implementation:
1884 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1886 *
for (
unsigned int j = 0; j <= i; ++j)
1888 * local_matrix(i, j) +=
1889 * (2 * (symgrad_phi_u[i] * symgrad_phi_u[j])
1890 * - div_phi_u[i] * phi_p[j]
1891 * - phi_p[i] * div_phi_u[j])
1892 * * fe_values.JxW(q);
1894 * local_preconditioner_matrix(i, j) +=
1895 * (phi_p[i] * phi_p[j])
1896 * * fe_values.JxW(q);
1900 * Note that in the implementation of (1) above, `operator*`
1901 * is overloaded for symmetric tensors, yielding the scalar
1902 * product between the two tensors.
1906 * For the right-hand side, we need to multiply the (vector of)
1907 * velocity shape functions with the vector of body force
1908 * right-hand side components, both evaluated at the current
1909 * quadrature point. We have implemented the body forces as a
1911 * are already tensors for which the application of `operator*`
1912 * against the velocity components of the shape function results
1913 * in the dot product, as intended.
1916 * local_rhs(i) += phi_u[i]
1918 * * fe_values.JxW(q);
1924 * Before we can write the local data into the global matrix (and
1926 * Dirichlet boundary conditions and eliminate hanging node constraints,
1927 * as we discussed in the introduction), we have to be careful about one
1928 * thing, though. We have only built half of the local matrices
1929 * because of symmetry, but we're going to save the full matrices
1930 * in order to use the standard functions for solving. This is done
1931 * by flipping the indices in case we are pointing into the empty part
1932 * of the local matrices.
1935 * for (
unsigned int i = 0; i < dofs_per_cell; ++i)
1936 * for (
unsigned int j = i + 1; j < dofs_per_cell; ++j)
1938 * local_matrix(i, j) = local_matrix(j, i);
1939 * local_preconditioner_matrix(i, j) =
1940 * local_preconditioner_matrix(j, i);
1943 * cell->get_dof_indices(local_dof_indices);
1944 * constraints.distribute_local_to_global(local_matrix,
1946 * local_dof_indices,
1949 * constraints.distribute_local_to_global(local_preconditioner_matrix,
1950 * local_dof_indices,
1951 * preconditioner_matrix);
1956 * Before we
're going to solve this linear system, we generate a
1957 * preconditioner for the velocity-velocity matrix, i.e.,
1958 * <code>block(0,0)</code> in the system matrix. As mentioned above, this
1959 * depends on the spatial dimension. Since the two classes described by
1960 * the <code>InnerPreconditioner::type</code> alias have the same
1961 * interface, we do not have to do anything different whether we want to
1962 * use a sparse direct solver or an ILU:
1965 * std::cout << " Computing preconditioner..." << std::endl << std::flush;
1967 * A_preconditioner =
1968 * std::make_shared<typename InnerPreconditioner<dim>::type>();
1969 * A_preconditioner->initialize(
1970 * system_matrix.block(0, 0),
1971 * typename InnerPreconditioner<dim>::type::AdditionalData());
1979 * <a name="StokesProblemsolve"></a>
1980 * <h4>StokesProblem::solve</h4>
1984 * After the discussion in the introduction and the definition of the
1985 * respective classes above, the implementation of the <code>solve</code>
1986 * function is rather straight-forward and done in a similar way as in
1987 * @ref step_20 "step-20". To start with, we need an object of the
1988 * <code>InverseMatrix</code> class that represents the inverse of the
1989 * matrix A. As described in the introduction, the inverse is generated with
1990 * the help of an inner preconditioner of type
1991 * <code>InnerPreconditioner::type</code>.
1994 * template <int dim>
1995 * void StokesProblem<dim>::solve()
1997 * const InverseMatrix<SparseMatrix<double>,
1998 * typename InnerPreconditioner<dim>::type>
1999 * A_inverse(system_matrix.block(0, 0), *A_preconditioner);
2000 * Vector<double> tmp(solution.block(0).size());
2004 * This is as in @ref step_20 "step-20". We generate the right hand side @f$B A^{-1} F - G@f$
2005 * for the Schur complement and an object that represents the respective
2006 * linear operation @f$B A^{-1} B^T@f$, now with a template parameter
2007 * indicating the preconditioner - in accordance with the definition of
2012 * Vector<double> schur_rhs(solution.block(1).size());
2013 * A_inverse.vmult(tmp, system_rhs.block(0));
2014 * system_matrix.block(1, 0).vmult(schur_rhs, tmp);
2015 * schur_rhs -= system_rhs.block(1);
2017 * SchurComplement<typename InnerPreconditioner<dim>::type> schur_complement(
2018 * system_matrix, A_inverse);
2022 * The usual control structures for the solver call are created...
2025 * SolverControl solver_control(solution.block(1).size(),
2026 * 1e-6 * schur_rhs.l2_norm());
2027 * SolverCG<Vector<double>> cg(solver_control);
2031 * Now to the preconditioner to the Schur complement. As explained in
2032 * the introduction, the preconditioning is done by a mass matrix in the
2033 * pressure variable.
2037 * Actually, the solver needs to have the preconditioner in the form
2038 * @f$P^{-1}@f$, so we need to create an inverse operation. Once again, we
2039 * use an object of the class <code>InverseMatrix</code>, which
2040 * implements the <code>vmult</code> operation that is needed by the
2041 * solver. In this case, we have to invert the pressure mass matrix. As
2042 * it already turned out in earlier tutorial programs, the inversion of
2043 * a mass matrix is a rather cheap and straight-forward operation
2044 * (compared to, e.g., a Laplace matrix). The CG method with ILU
2045 * preconditioning converges in 5-10 steps, independently on the mesh
2046 * size. This is precisely what we do here: We choose another ILU
2047 * preconditioner and take it along to the InverseMatrix object via the
2048 * corresponding template parameter. A CG solver is then called within
2049 * the vmult operation of the inverse matrix.
2053 * An alternative that is cheaper to build, but needs more iterations
2054 * afterwards, would be to choose a SSOR preconditioner with factor
2055 * 1.2. It needs about twice the number of iterations, but the costs for
2056 * its generation are almost negligible.
2059 * SparseILU<double> preconditioner;
2060 * preconditioner.initialize(preconditioner_matrix.block(1, 1),
2061 * SparseILU<double>::AdditionalData());
2063 * InverseMatrix<SparseMatrix<double>, SparseILU<double>> m_inverse(
2064 * preconditioner_matrix.block(1, 1), preconditioner);
2068 * With the Schur complement and an efficient preconditioner at hand, we
2069 * can solve the respective equation for the pressure (i.e. block 0 in
2070 * the solution vector) in the usual way:
2073 * cg.solve(schur_complement, solution.block(1), schur_rhs, m_inverse);
2077 * After this first solution step, the hanging node constraints have to
2078 * be distributed to the solution in order to achieve a consistent
2082 * constraints.distribute(solution);
2084 * std::cout << " " << solver_control.last_step()
2085 * << " outer CG Schur complement iterations for pressure"
2091 * As in @ref step_20 "step-20", we finally need to solve for the velocity equation where
2092 * we plug in the solution to the pressure equation. This involves only
2093 * objects we already know - so we simply multiply @f$p@f$ by @f$B^T@f$, subtract
2094 * the right hand side and multiply by the inverse of @f$A@f$. At the end, we
2095 * need to distribute the constraints from hanging nodes in order to
2096 * obtain a consistent flow field:
2100 * system_matrix.block(0, 1).vmult(tmp, solution.block(1));
2102 * tmp += system_rhs.block(0);
2104 * A_inverse.vmult(solution.block(0), tmp);
2106 * constraints.distribute(solution);
2114 * <a name="StokesProblemoutput_results"></a>
2115 * <h4>StokesProblem::output_results</h4>
2119 * The next function generates graphical output. In this example, we are
2120 * going to use the VTK file format. We attach names to the individual
2121 * variables in the problem: <code>velocity</code> to the <code>dim</code>
2122 * components of velocity and <code>pressure</code> to the pressure.
2126 * Not all visualization programs have the ability to group individual
2127 * vector components into a vector to provide vector plots; in particular,
2128 * this holds for some VTK-based visualization programs. In this case, the
2129 * logical grouping of components into vectors should already be described
2130 * in the file containing the data. In other words, what we need to do is
2131 * provide our output writers with a way to know which of the components of
2132 * the finite element logically form a vector (with @f$d@f$ components in @f$d@f$
2133 * space dimensions) rather than letting them assume that we simply have a
2134 * bunch of scalar fields. This is achieved using the members of the
2135 * <code>DataComponentInterpretation</code> namespace: as with the filename,
2136 * we create a vector in which the first <code>dim</code> components refer
2137 * to the velocities and are given the tag
2138 * DataComponentInterpretation::component_is_part_of_vector; we
2139 * finally push one tag
2140 * DataComponentInterpretation::component_is_scalar to describe
2141 * the grouping of the pressure variable.
2145 * The rest of the function is then the same as in @ref step_20 "step-20".
2148 * template <int dim>
2150 * StokesProblem<dim>::output_results(const unsigned int refinement_cycle) const
2152 * std::vector<std::string> solution_names(dim, "velocity");
2153 * solution_names.emplace_back("pressure");
2155 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
2156 * data_component_interpretation(
2157 * dim, DataComponentInterpretation::component_is_part_of_vector);
2158 * data_component_interpretation.push_back(
2159 * DataComponentInterpretation::component_is_scalar);
2161 * DataOut<dim> data_out;
2162 * data_out.attach_dof_handler(dof_handler);
2163 * data_out.add_data_vector(solution,
2165 * DataOut<dim>::type_dof_data,
2166 * data_component_interpretation);
2167 * data_out.build_patches();
2169 * std::ofstream output(
2170 * "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtk");
2171 * data_out.write_vtk(output);
2178 * <a name="StokesProblemrefine_mesh"></a>
2179 * <h4>StokesProblem::refine_mesh</h4>
2183 * This is the last interesting function of the <code>StokesProblem</code>
2184 * class. As indicated by its name, it takes the solution to the problem
2185 * and refines the mesh where this is needed. The procedure is the same as
2186 * in the respective step in @ref step_6 "step-6", with the exception that we base the
2187 * refinement only on the change in pressure, i.e., we call the Kelly error
2188 * estimator with a mask object of type ComponentMask that selects the
2189 * single scalar component for the pressure that we are interested in (we
2190 * get such a mask from the finite element class by specifying the component
2191 * we want). Additionally, we do not coarsen the grid again:
2194 * template <int dim>
2195 * void StokesProblem<dim>::refine_mesh()
2197 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
2199 * const FEValuesExtractors::Scalar pressure(dim);
2200 * KellyErrorEstimator<dim>::estimate(
2202 * QGauss<dim - 1>(degree + 1),
2203 * std::map<types::boundary_id, const Function<dim> *>(),
2205 * estimated_error_per_cell,
2206 * fe.component_mask(pressure));
2208 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2209 * estimated_error_per_cell,
2212 * triangulation.execute_coarsening_and_refinement();
2219 * <a name="StokesProblemrun"></a>
2220 * <h4>StokesProblem::run</h4>
2224 * The last step in the Stokes class is, as usual, the function that
2225 * generates the initial grid and calls the other functions in the
2230 * We start off with a rectangle of size @f$4 \times 1@f$ (in 2d) or @f$4 \times 1
2231 * \times 1@f$ (in 3d), placed in @f$R^2/R^3@f$ as @f$(-2,2)\times(-1,0)@f$ or
2232 * @f$(-2,2)\times(0,1)\times(-1,0)@f$, respectively. It is natural to start
2233 * with equal mesh size in each direction, so we subdivide the initial
2234 * rectangle four times in the first coordinate direction. To limit the
2235 * scope of the variables involved in the creation of the mesh to the range
2236 * where we actually need them, we put the entire block between a pair of
2240 * template <int dim>
2241 * void StokesProblem<dim>::run()
2244 * std::vector<unsigned int> subdivisions(dim, 1);
2245 * subdivisions[0] = 4;
2247 * const Point<dim> bottom_left = (dim == 2 ?
2248 * Point<dim>(-2, -1) : // 2d case
2249 * Point<dim>(-2, 0, -1)); // 3d case
2251 * const Point<dim> top_right = (dim == 2 ?
2252 * Point<dim>(2, 0) : // 2d case
2253 * Point<dim>(2, 1, 0)); // 3d case
2255 * GridGenerator::subdivided_hyper_rectangle(triangulation,
2263 * A boundary indicator of 1 is set to all boundaries that are subject to
2264 * Dirichlet boundary conditions, i.e. to faces that are located at 0 in
2265 * the last coordinate direction. See the example description above for
2269 * for (const auto &cell : triangulation.active_cell_iterators())
2270 * for (const auto &face : cell->face_iterators())
2271 * if (face->center()[dim - 1] == 0)
2272 * face->set_all_boundary_ids(1);
2277 * We then apply an initial refinement before solving for the first
2278 * time. In 3D, there are going to be more degrees of freedom, so we
2279 * refine less there:
2282 * triangulation.refine_global(4 - dim);
2286 * As first seen in @ref step_6 "step-6", we cycle over the different refinement levels
2287 * and refine (except for the first cycle), setup the degrees of freedom
2288 * and matrices, assemble, solve and create output:
2291 * for (unsigned int refinement_cycle = 0; refinement_cycle < 6;
2292 * ++refinement_cycle)
2294 * std::cout << "Refinement cycle " << refinement_cycle << std::endl;
2296 * if (refinement_cycle > 0)
2301 * std::cout << " Assembling..." << std::endl << std::flush;
2302 * assemble_system();
2304 * std::cout << " Solving..." << std::flush;
2307 * output_results(refinement_cycle);
2309 * std::cout << std::endl;
2312 * } // namespace Step22
2318 * <a name="Thecodemaincodefunction"></a>
2319 * <h3>The <code>main</code> function</h3>
2323 * The main function is the same as in @ref step_20 "step-20". We pass the element degree as
2324 * a parameter and choose the space dimension at the well-known template slot.
2331 * using namespace Step22;
2333 * StokesProblem<2> flow_problem(1);
2334 * flow_problem.run();
2336 * catch (std::exception &exc)
2338 * std::cerr << std::endl
2340 * << "----------------------------------------------------"
2342 * std::cerr << "Exception on processing: " << std::endl
2343 * << exc.what() << std::endl
2344 * << "Aborting!" << std::endl
2345 * << "----------------------------------------------------"
2352 * std::cerr << std::endl
2354 * << "----------------------------------------------------"
2356 * std::cerr << "Unknown exception!" << std::endl
2357 * << "Aborting!" << std::endl
2358 * << "----------------------------------------------------"
2366<a name="Results"></a>
2367<a name="Results"></a><h1>Results</h1>
2370<a name="Outputoftheprogramandgraphicalvisualization"></a><h3>Output of the program and graphical visualization</h3>
2373<a name="2Dcalculations"></a><h4>2D calculations</h4>
2376Running the program with the space dimension set to 2 in the <code>main</code>
2377function yields the following output (in "release mode",
2378See also <a href="http://www.math.colostate.edu/~bangerth/videos.676.18.html">video lecture 18</a>.):
2380examples/step-22> make run
2382 Number of active cells: 64
2383 Number of degrees of freedom: 679 (594+85)
2385 Computing preconditioner...
2386 Solving... 11 outer CG Schur complement iterations for pressure
2389 Number of active cells: 160
2390 Number of degrees of freedom: 1683 (1482+201)
2392 Computing preconditioner...
2393 Solving... 11 outer CG Schur complement iterations for pressure
2396 Number of active cells: 376
2397 Number of degrees of freedom: 3813 (3370+443)
2399 Computing preconditioner...
2400 Solving... 11 outer CG Schur complement iterations for pressure
2403 Number of active cells: 880
2404 Number of degrees of freedom: 8723 (7722+1001)
2406 Computing preconditioner...
2407 Solving... 11 outer CG Schur complement iterations for pressure
2410 Number of active cells: 2008
2411 Number of degrees of freedom: 19383 (17186+2197)
2413 Computing preconditioner...
2414 Solving... 11 outer CG Schur complement iterations for pressure
2417 Number of active cells: 4288
2418 Number of degrees of freedom: 40855 (36250+4605)
2420 Computing preconditioner...
2421 Solving... 11 outer CG Schur complement iterations for pressure
2424The entire computation above takes about 2 seconds on a reasonably
2425quick (for 2015 standards) machine.
2427What we see immediately from this is that the number of (outer)
2428iterations does not increase as we refine the mesh. This confirms the
2429statement in the introduction that preconditioning the Schur
2430complement with the mass matrix indeed yields a matrix spectrally
2431equivalent to the identity matrix (i.e. with eigenvalues bounded above
2432and below independently of the mesh size or the relative sizes of
2433cells). In other words, the mass matrix and the Schur complement are
2434spectrally equivalent.
2436In the images below, we show the grids for the first six refinement
2437steps in the program. Observe how the grid is refined in regions
2438where the solution rapidly changes: On the upper boundary, we have
2439Dirichlet boundary conditions that are -1 in the left half of the line
2440and 1 in the right one, so there is an abrupt change at @f$x=0@f$. Likewise,
2441there are changes from Dirichlet to Neumann data in the two upper
2442corners, so there is need for refinement there as well:
2444<table width="60%" align="center">
2447 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-0.png" alt="">
2450 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-1.png" alt="">
2455 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-2.png" alt="">
2458 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-3.png" alt="">
2463 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-4.png" alt="">
2466 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-5.png" alt="">
2471Finally, following is a plot of the flow field. It shows fluid
2472transported along with the moving upper boundary and being replaced by
2473material coming from below:
2475<img src="https://www.dealii.org/images/steps/developer/step-22.2d.solution.png" alt="">
2477This plot uses the capability of VTK-based visualization programs (in
2478this case of VisIt) to show vector data; this is the result of us
2479declaring the velocity components of the finite element in use to be a
2480set of vector components, rather than independent scalar components in
2481the <code>StokesProblem@<dim@>::%output_results</code> function of this
2486<a name="3Dcalculations"></a><h4>3D calculations</h4>
2489In 3d, the screen output of the program looks like this:
2493 Number of active cells: 32
2494 Number of degrees of freedom: 1356 (1275+81)
2496 Computing preconditioner...
2497 Solving... 13 outer CG Schur complement iterations for pressure.
2500 Number of active cells: 144
2501 Number of degrees of freedom: 5088 (4827+261)
2503 Computing preconditioner...
2504 Solving... 14 outer CG Schur complement iterations for pressure.
2507 Number of active cells: 704
2508 Number of degrees of freedom: 22406 (21351+1055)
2510 Computing preconditioner...
2511 Solving... 14 outer CG Schur complement iterations for pressure.
2514 Number of active cells: 3168
2515 Number of degrees of freedom: 93176 (89043+4133)
2517 Computing preconditioner...
2518 Solving... 15 outer CG Schur complement iterations for pressure.
2521 Number of active cells: 11456
2522 Number of degrees of freedom: 327808 (313659+14149)
2524 Computing preconditioner...
2525 Solving... 15 outer CG Schur complement iterations for pressure.
2528 Number of active cells: 45056
2529 Number of degrees of freedom: 1254464 (1201371+53093)
2531 Computing preconditioner...
2532 Solving... 14 outer CG Schur complement iterations for pressure.
2535Again, we see that the number of outer iterations does not increase as
2536we refine the mesh. Nevertheless, the compute time increases
2537significantly: for each of the iterations above separately, it takes about
25380.14 seconds, 0.63 seconds, 4.8 seconds, 35 seconds, 2 minutes and 33 seconds,
2539and 13 minutes and 12 seconds. This overall superlinear (in the number of
2540unknowns) increase in runtime is due to the fact that our inner solver is not
2541@f${\cal O}(N)@f$: a simple experiment shows that as we keep refining the mesh, the
2542average number of ILU-preconditioned CG iterations to invert the
2543velocity-velocity block @f$A@f$ increases.
2545We will address the question of how possibly to improve our solver <a
2546href="#improved-solver">below</a>.
2548As for the graphical output, the grids generated during the solution
2551<table width="60%" align="center">
2554 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-0.png" alt="">
2557 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-1.png" alt="">
2562 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-2.png" alt="">
2565 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-3.png" alt="">
2570 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-4.png" alt="">
2573 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-5.png" alt="">
2578Again, they show essentially the location of singularities introduced
2579by boundary conditions. The vector field computed makes for an
2582<img src="https://www.dealii.org/images/steps/developer/step-22.3d.solution.png" alt="">
2584The isocontours shown here as well are those of the pressure
2585variable, showing the singularity at the point of discontinuous
2586velocity boundary conditions.
2590<a name="Sparsitypattern"></a><h3>Sparsity pattern</h3>
2593As explained during the generation of the sparsity pattern, it is
2594important to have the numbering of degrees of freedom in mind when
2595using preconditioners like incomplete LU decompositions. This is most
2596conveniently visualized using the distribution of nonzero elements in
2597the stiffness matrix.
2599If we don't
do anything special to renumber degrees of freedom (i.e.,
2602appropriately sorted into their corresponding blocks of the matrix and
2603vector), then we get the following image after the
first adaptive
2604refinement in two dimensions:
2606<img src=
"https://www.dealii.org/images/steps/developer/step-22.2d.sparsity-nor.png" alt=
"">
2608In order to generate such a graph, you have to
insert a piece of
2609code like the following to the
end of the setup step.
2612 std::ofstream out (
"sparsity_pattern.gpl");
2613 sparsity_pattern.print_gnuplot(out);
2617It is clearly visible that the
nonzero entries are spread over almost the
2618whole
matrix. This makes preconditioning by ILU inefficient: ILU generates a
2619Gaussian elimination (LU decomposition) without fill-in elements, which means
2620that more tentative fill-ins left out will result in a worse approximation of
2621the complete decomposition.
2623In
this program, we have thus chosen a more advanced renumbering of
2625the components into velocity and pressure yields the following output:
2627<img src=
"https://www.dealii.org/images/steps/developer/step-22.2d.sparsity-ren.png" alt=
"">
2629It is apparent that the situation has improved a lot. Most of the elements are
2630now concentrated around the
diagonal in the (0,0) block in the matrix. Similar
2631effects are also visible for the other blocks. In this case, the ILU
2632decomposition will be much closer to the full LU decomposition, which improves
2633the quality of the preconditioner. (It may be interesting to note that the
2634sparse direct solver UMFPACK does some %
internal renumbering of the equations
2635before actually generating a sparse LU decomposition; that procedure leads to
2636a very similar pattern to the one we got from the Cuthill-McKee algorithm.)
2638Finally, we want to have a closer
2639look at a sparsity pattern in 3D. We show only the (0,0) block of the
2640matrix, again after one adaptive refinement. Apart from the fact that the matrix
2641size has increased, it is also visible that there are many more entries
2642in the matrix. Moreover, even for the optimized renumbering, there will be a
2643considerable amount of tentative fill-in elements. This illustrates why UMFPACK
2644is not a good choice in 3D - a full decomposition needs many new entries that
2645 eventually won't fit into the physical memory (RAM):
2651<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2654<a name="improved-solver">
2655<a name="Improvedlinearsolverin3D"></a><h4>Improved linear solver in 3D</h4>
2659We have seen in the section of computational results that the number of outer
2660iterations does not depend on the mesh size, which is optimal in a sense of
2661scalability. This does, however, not apply to the solver as a whole, as
2663We did not look at the number of inner iterations when generating the inverse of
2664the matrix @f$A@f$ and the mass matrix @f$M_p@f$. Of course, this is unproblematic in
2665the 2D case where we precondition @f$A@f$ with a direct solver and the
2666<code>vmult</code> operation of the inverse matrix structure will converge in
2667one single CG step, but this changes in 3D where we only use an ILU
2668preconditioner. There, the number of required preconditioned CG steps to
2669invert @f$A@f$ increases as the mesh is refined, and each <code>vmult</code>
2670operation involves on average approximately 14, 23, 36, 59, 75 and 101 inner
2671CG iterations in the refinement steps shown above. (On the other hand,
2672the number of iterations for applying the inverse pressure mass matrix is
2673always around five, both in two and three dimensions.) To summarize, most work
2674is spent on solving linear systems with the same matrix @f$A@f$ over and over again.
2675What makes this look even worse is the fact that we
2676actually
invert a matrix that is about 95 percent the size of the total system
2677matrix and stands for 85 percent of the non-zero entries in the sparsity
2678pattern. Hence, the natural question is whether it is reasonable to solve a
2679linear system with matrix @f$A@f$ for about 15 times when calculating the solution
2682The answer is, of course, that we can do that in a few other (most of the time
2684Nevertheless, it has to be remarked that an indefinite system as the one
2685at hand puts indeed much higher
2686demands on the linear algebra than standard elliptic problems as we have seen
2687in the early tutorial programs. The improvements are still rather
2688unsatisfactory, if one compares with an elliptic problem of similar
2689size. Either way, we will introduce below a number of improvements to the
2690linear solver, a discussion that we will re-consider again with additional
2691options in the @ref step_31 "step-31" program.
2693<a name="improved-ilu">
2694<a name="BetterILUdecompositionbysmartreordering"></a><h5>Better ILU decomposition by smart reordering</h5>
2697A
first attempt to improve the speed of the linear solution process is to choose
2698a dof reordering that makes the ILU being closer to a full LU decomposition, as
2699already mentioned in the in-code comments. The
DoFRenumbering namespace compares
2700several choices for the renumbering of dofs for the Stokes equations. The best
2701result regarding the computing time was found for the King ordering, which is
2703program, the inner solver needs considerably less operations, e.g. about 62
2704inner CG iterations for the inversion of @f$A@f$ at cycle 4 compared to about 75
2705iterations with the standard Cuthill-McKee-algorithm. Also, the computing time
2706at cycle 4 decreased from about 17 to 11 minutes for the <code>solve()</code>
2707call. However, the King ordering (and the orderings provided by the
2709much more memory than the in-build deal versions, since it acts on abstract
2710graphs rather than the geometry provided by the
triangulation. In the present
2711case, the renumbering takes about 5 times as much memory, which yields an
2712infeasible algorithm for the last cycle in 3D with 1.2 million
2715<a name="BetterpreconditionerfortheinnerCGsolver"></a><h5>Better preconditioner for the inner CG solver</h5>
2717Another idea to improve the situation even more would be to choose a
2718preconditioner that makes CG for the (0,0) matrix @f$A@f$ converge in a
2719mesh-independent number of iterations, say 10 to 30. We have seen such a
2720candidate in @ref step_16 "step-16": multigrid.
2722<a name="BlockSchurcomplementpreconditioner"></a><h5>Block Schur complement preconditioner</h5>
2724<a name="block-schur"></a>
2725Even with a good preconditioner for @f$A@f$, we still
2726need to solve of the same linear system repeatedly (with different
2727right hand sides, though) in order to make the Schur complement solve
2728converge. The approach we are going to discuss here is how inner iteration
2729and outer iteration can be combined. If we persist in calculating the Schur
2730complement, there is no other possibility.
2732The alternative is to attack the block system at once and use an approximate
2733Schur complement as efficient preconditioner. The idea is as
2734follows: If we find a block preconditioner @f$P@f$ such that the matrix
2736 P^{-1}\left(\begin{array}{cc}
2740is simple, then an iterative solver with that preconditioner will converge in a
2741few iterations. Using the Schur complement @f$S = B A^{-1} B^T@f$, one finds that
2745 \left(\begin{array}{cc}
2746 A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1}
2749would appear to be a good choice since
2751 P^{-1}\left(\begin{array}{cc}
2755 \left(\begin{array}{cc}
2756 A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1}
2757 \
end{array}\right)\cdot \left(\begin{array}{cc}
2761 \left(\begin{array}{cc}
2762 I & A^{-1} B^T \\ 0 & I
2765This is the approach taken by the paper by Silvester and Wathen referenced
2766to in the introduction (with the exception that Silvester and Wathen use
2767right preconditioning). In
this case, a Krylov-based iterative method would
2768converge in one step only
if exact inverses of @f$A@f$ and @f$S@f$ were applied,
2769since all the
eigenvalues are one (and the number of iterations in such a
2770method is bounded by the number of distinct
eigenvalues). Below, we will
2771discuss the choice of an adequate solver
for this problem. First, we are
2772going to have a closer look at the implementation of the preconditioner.
2774Since @f$P@f$ is aimed to be a preconditioner only, we shall use approximations to
2775the inverse of the Schur complement @f$S@f$ and the
matrix @f$A@f$. Hence, the Schur
2776complement will be approximated by the pressure mass
matrix @f$M_p@f$, and we use
2777a preconditioner to @f$A@f$ (without an InverseMatrix
class around it)
for
2778approximating @f$A^{-1}@f$.
2780Here comes the
class that
implements the block Schur
2781complement preconditioner. The <code>vmult</code> operation for block vectors
2782according to the derivation above can be specified by three successive
2785template <
class PreconditionerA,
class PreconditionerMp>
2786class BlockSchurPreconditioner :
public Subscriptor
2791 const PreconditionerA &Apreconditioner);
2799 PreconditionerMp > > m_inverse;
2800 const PreconditionerA &a_preconditioner;
2806template <
class PreconditionerA,
class PreconditionerMp>
2807BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::BlockSchurPreconditioner(
2810 const PreconditionerA &Apreconditioner
2815 a_preconditioner (Apreconditioner),
2816 tmp (S.block(1,1).m())
2821template <
class PreconditionerA,
class PreconditionerMp>
2822void BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::vmult (
2827 a_preconditioner.vmult (dst.
block(0), src.
block(0));
2831 system_matrix->block(1,0).residual(tmp, dst.
block(0), src.
block(1));
2836 m_inverse->vmult (dst.
block(1), tmp);
2840Since we act on the whole block system now, we have to live with one
2841disadvantage: we need to perform the solver iterations on
2842the full block system instead of the smaller pressure space.
2844Now we turn to the question which solver we should use
for the block
2845system. The
first observation is that the resulting preconditioned
matrix cannot
2848The deal.II libraries implement several solvers that are appropriate
for the
2849problem at hand. One choice is the solver @ref
SolverBicgstab "BiCGStab", which
2850was used
for the solution of the unsymmetric advection problem in @ref step_9
"step-9". The
2852(generalized minimum residual). Both methods have their pros and cons - there
2853are problems where one of the two candidates clearly outperforms the other, and
2855<a href=
"http://en.wikipedia.org/wiki/GMRES#Comparison_with_other_solvers">Wikipedia</a>
's
2856article on the GMRES method gives a comparative presentation.
2857A more comprehensive and well-founded comparison can be read e.g. in the book by
2858J.W. Demmel (Applied Numerical Linear Algebra, SIAM, 1997, section 6.6.6).
2860For our specific problem with the ILU preconditioner for @f$A@f$, we certainly need
2861to perform hundreds of iterations on the block system for large problem sizes
2862(we won't beat CG!). Actually,
this disfavors GMRES: During the GMRES
2863iterations, a basis of Krylov vectors is successively built up and some
2864operations are performed on these vectors. The more vectors are in
this basis,
2865the more operations and memory will be needed. The number of operations scales
2866as @f${\cal O}(n + k^2)@f$ and memory as @f${\cal O}(kn)@f$, where @f$k@f$ is the number of
2867vectors in the Krylov basis and @f$n@f$ the size of the (block)
matrix.
2868To not let these demands grow excessively, deal.II limits the size @f$k@f$ of the
2869basis to 30 vectors by
default.
2870Then, the basis is rebuilt. This implementation of the GMRES method is called
2871GMRES(k), with
default @f$k=30@f$. What we have gained by
this restriction,
2872namely a bound on operations and memory requirements, will be compensated by
2873the fact that we use an incomplete basis -
this will increase the number of
2876BiCGStab, on the other hand, won
't get slower when many iterations are needed
2877(one iteration uses only results from one preceding step and
2878not all the steps as GMRES). Besides the fact the BiCGStab is more expensive per
2879step since two matrix-vector products are needed (compared to one for
2880CG or GMRES), there is one main reason which makes BiCGStab not appropriate for
2881this problem: The preconditioner applies the inverse of the pressure
2882mass matrix by using the InverseMatrix class. Since the application of the
2883inverse matrix to a vector is done only in approximative way (an exact inverse
2884is too expensive), this will also affect the solver. In the case of BiCGStab,
2885the Krylov vectors will not be orthogonal due to that perturbation. While
2886this is uncritical for a small number of steps (up to about 50), it ruins the
2887performance of the solver when these perturbations have grown to a significant
2888magnitude in the coarse of iterations.
2890We did some experiments with BiCGStab and found it to
2891be faster than GMRES up to refinement cycle 3 (in 3D), but it became very slow
2892for cycles 4 and 5 (even slower than the original Schur complement), so the
2893solver is useless in this situation. Choosing a sharper tolerance for the
2894inverse matrix class (<code>1e-10*src.l2_norm()</code> instead of
2895<code>1e-6*src.l2_norm()</code>) made BiCGStab perform well also for cycle 4,
2896but did not change the failure on the very large problems.
2898GMRES is of course also effected by the approximate inverses, but it is not as
2899sensitive to orthogonality and retains a relatively good performance also for
2900large sizes, see the results below.
2902With this said, we turn to the realization of the solver call with GMRES with
2903@f$k=100@f$ temporary vectors:
2906 const SparseMatrix<double> &pressure_mass_matrix
2907 = preconditioner_matrix.block(1,1);
2908 SparseILU<double> pmass_preconditioner;
2909 pmass_preconditioner.initialize (pressure_mass_matrix,
2910 SparseILU<double>::AdditionalData());
2912 InverseMatrix<SparseMatrix<double>,SparseILU<double> >
2913 m_inverse (pressure_mass_matrix, pmass_preconditioner);
2915 BlockSchurPreconditioner<typename InnerPreconditioner<dim>::type,
2917 preconditioner (system_matrix, m_inverse, *A_preconditioner);
2919 SolverControl solver_control (system_matrix.m(),
2920 1e-6*system_rhs.l2_norm());
2921 GrowingVectorMemory<BlockVector<double> > vector_memory;
2922 SolverGMRES<BlockVector<double> >::AdditionalData gmres_data;
2923 gmres_data.max_n_tmp_vectors = 100;
2925 SolverGMRES<BlockVector<double> > gmres(solver_control, vector_memory,
2928 gmres.solve(system_matrix, solution, system_rhs,
2931 constraints.distribute (solution);
2934 << solver_control.last_step()
2935 << " block GMRES iterations";
2938Obviously, one needs to add the include file @ref SolverGMRES
2939"<lac/solver_gmres.h>" in order to make this run.
2940We call the solver with a BlockVector template in order to enable
2941GMRES to operate on block vectors and matrices.
2942Note also that we need to set the (1,1) block in the system
2943matrix to zero (we saved the pressure mass matrix there which is not part of the
2944problem) after we copied the information to another matrix.
2946Using the Timer class, we collect some statistics that compare the runtime
2947of the block solver with the one from the problem implementation above.
2948Besides the solution with the two options we also check if the solutions
2949of the two variants are close to each other (i.e. this solver gives indeed the
2950same solution as we had before) and calculate the infinity
2951norm of the vector difference.
2953Let's
first see the results in 2D:
2956 Number of active cells: 64
2957 Number of degrees of freedom: 679 (594+85) [0.00162792 s]
2958 Assembling... [0.00108981 s]
2959 Computing preconditioner... [0.0025959 s]
2961 Schur complement: 11 outer CG iterations
for p [0.00479603s ]
2962 Block Schur preconditioner: 12 GMRES iterations [0.00441718 s]
2963 l_infinity difference between solution vectors: 5.38258e-07
2966 Number of active cells: 160
2967 Number of degrees of freedom: 1683 (1482+201) [0.00345707 s]
2968 Assembling... [0.00237417 s]
2969 Computing preconditioner... [0.00605702 s]
2971 Schur complement: 11 outer CG iterations
for p [0.0123992s ]
2972 Block Schur preconditioner: 12 GMRES iterations [0.011909 s]
2973 l_infinity difference between solution vectors: 1.74658e-05
2976 Number of active cells: 376
2977 Number of degrees of freedom: 3813 (3370+443) [0.00729299 s]
2978 Assembling... [0.00529909 s]
2979 Computing preconditioner... [0.0167508 s]
2981 Schur complement: 11 outer CG iterations
for p [0.031672s ]
2982 Block Schur preconditioner: 12 GMRES iterations [0.029232 s]
2983 l_infinity difference between solution vectors: 7.81569e-06
2986 Number of active cells: 880
2987 Number of degrees of freedom: 8723 (7722+1001) [0.017709 s]
2988 Assembling... [0.0126002 s]
2989 Computing preconditioner... [0.0435679 s]
2991 Schur complement: 11 outer CG iterations
for p [0.0971651s ]
2992 Block Schur preconditioner: 12 GMRES iterations [0.0992041 s]
2993 l_infinity difference between solution vectors: 1.87249e-05
2996 Number of active cells: 2008
2997 Number of degrees of freedom: 19383 (17186+2197) [0.039988 s]
2998 Assembling... [0.028281 s]
2999 Computing preconditioner... [0.118314 s]
3001 Schur complement: 11 outer CG iterations
for p [0.252133s ]
3002 Block Schur preconditioner: 13 GMRES iterations [0.269125 s]
3003 l_infinity difference between solution vectors: 6.38657e-05
3006 Number of active cells: 4288
3007 Number of degrees of freedom: 40855 (36250+4605) [0.0880702 s]
3008 Assembling... [0.0603511 s]
3009 Computing preconditioner... [0.278339 s]
3011 Schur complement: 11 outer CG iterations
for p [0.53846s ]
3012 Block Schur preconditioner: 13 GMRES iterations [0.578667 s]
3013 l_infinity difference between solution vectors: 0.000173363
3016We see that there is no huge difference in the solution time between the
3017block Schur complement preconditioner solver and the Schur complement
3018itself. The reason is simple: we used a direct solve as preconditioner
for
3019@f$A@f$ - so we cannot expect any gain by avoiding the inner iterations. We see
3020that the number of iterations has slightly increased
for GMRES, but all in
3021all the two choices are fairly similar.
3023The picture of course changes in 3D:
3027 Number of active cells: 32
3028 Number of degrees of freedom: 1356 (1275+81) [0.00845218 s]
3029 Assembling... [0.019372 s]
3030 Computing preconditioner... [0.00712395 s]
3032 Schur complement: 13 outer CG iterations
for p [0.0320101s ]
3033 Block Schur preconditioner: 22 GMRES iterations [0.0048759 s]
3034 l_infinity difference between solution vectors: 2.15942e-05
3037 Number of active cells: 144
3038 Number of degrees of freedom: 5088 (4827+261) [0.0346942 s]
3039 Assembling... [0.0857739 s]
3040 Computing preconditioner... [0.0465031 s]
3042 Schur complement: 14 outer CG iterations
for p [0.349258s ]
3043 Block Schur preconditioner: 35 GMRES iterations [0.048759 s]
3044 l_infinity difference between solution vectors: 1.77657e-05
3047 Number of active cells: 704
3048 Number of degrees of freedom: 22406 (21351+1055) [0.175669 s]
3049 Assembling... [0.437447 s]
3050 Computing preconditioner... [0.286435 s]
3052 Schur complement: 14 outer CG iterations
for p [3.65519s ]
3053 Block Schur preconditioner: 63 GMRES iterations [0.497787 s]
3054 l_infinity difference between solution vectors: 5.08078e-05
3057 Number of active cells: 3168
3058 Number of degrees of freedom: 93176 (89043+4133) [0.790985 s]
3059 Assembling... [1.97598 s]
3060 Computing preconditioner... [1.4325 s]
3062 Schur complement: 15 outer CG iterations
for p [29.9666s ]
3063 Block Schur preconditioner: 128 GMRES iterations [5.02645 s]
3064 l_infinity difference between solution vectors: 0.000119671
3067 Number of active cells: 11456
3068 Number of degrees of freedom: 327808 (313659+14149) [3.44995 s]
3069 Assembling... [7.54772 s]
3070 Computing preconditioner... [5.46306 s]
3072 Schur complement: 15 outer CG iterations
for p [139.987s ]
3073 Block Schur preconditioner: 255 GMRES iterations [38.0946 s]
3074 l_infinity difference between solution vectors: 0.00020793
3077 Number of active cells: 45056
3078 Number of degrees of freedom: 1254464 (1201371+53093) [19.6795 s]
3079 Assembling... [28.6586 s]
3080 Computing preconditioner... [22.401 s]
3082 Schur complement: 14 outer CG iterations
for p [796.767s ]
3083 Block Schur preconditioner: 524 GMRES iterations [355.597 s]
3084 l_infinity difference between solution vectors: 0.000501219
3087Here, the block preconditioned solver is clearly superior to the Schur
3088complement, but the advantage gets less
for more mesh points. This is
3089because GMRES(k) scales worse with the problem size than CG, as we discussed
3090above. Nonetheless, the improvement by a factor of 3-6
for moderate problem
3091sizes is quite impressive.
3094<a name=
"Combiningtheblockpreconditionerandmultigrid"></a><h5>Combining the block preconditioner and multigrid</h5>
3096An ultimate linear solver
for this problem could be imagined as a
3097combination of an optimal
3098preconditioner
for @f$A@f$ (
e.g. multigrid) and the block preconditioner
3099described above, which is the approach taken in the @ref step_31
"step-31"
3100and @ref step_32
"step-32" tutorial programs (where we use an algebraic multigrid
3101method) and @ref step_56
"step-56" (where we use a geometric multigrid method).
3104<a name=
"Noblockmatricesandvectors"></a><h5>No block matrices and vectors</h5>
3106Another possibility that can be taken into account is to not set up a block
3107system, but rather solve the system of velocity and pressure all at once. The
3108options are direct solve with UMFPACK (2D) or GMRES with ILU
3109preconditioning (3D). It should be straightforward to
try that.
3113<a name=
"Moreinterestingtestcases"></a><h4>More interesting testcases</h4>
3116The program can of course also serve as a basis to compute the flow in more
3117interesting cases. The original motivation to write
this program was
for it to
3118be a starting
point for some geophysical flow problems, such as the
3119movement of magma under places where continental plates drift apart (
for
3120example mid-ocean ridges). Of course, in such places, the geometry is more
3121complicated than the examples shown above, but it is not hard to accommodate
3124For example, by
using the following modification of the boundary
values
3129BoundaryValues<dim>::value (
const Point<dim> &p,
3130 const unsigned int component)
const
3132 Assert (component < this->n_components,
3133 ExcIndexRange (component, 0, this->n_components));
3135 const double x_offset = std::atan(p[1]*4)/3;
3138 return (p[0] < x_offset ? -1 : (p[0] > x_offset ? 1 : 0));
3142and the following way to generate the mesh as the domain
3143@f$[-2,2]\times[-2,2]\times[-1,0]@f$
3145 std::vector<unsigned int> subdivisions (dim, 1);
3146 subdivisions[0] = 4;
3148 subdivisions[1] = 4;
3152 Point<dim>(-2,-2,-1));
3162then we get images where the fault line is curved:
3163<table width=
"60%" align=
"center">
3166 <img src=
"https://www.dealii.org/images/steps/developer/step-22.3d-extension.png" alt=
"">
3169 <img src=
"https://www.dealii.org/images/steps/developer/step-22.3d-grid-extension.png" alt=
"">
3175<a name=
"PlainProg"></a>
3176<h1> The plain program</h1>
3177@include
"step-22.cc"
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
BlockType & block(const unsigned int i)
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void Cuthill_McKee(DoFHandler< dim, spacedim > &dof_handler, const bool reversed_numbering=false, const bool use_constraints=false, const std::vector< types::global_dof_index > &starting_indices=std::vector< types::global_dof_index >())
void subdivided_hyper_rectangle(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
int(&) functions(const void *v1, const void *v2)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)