deal.II version GIT relicensing-2173-gae8fc9d14b 2024-11-24 06:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-22.h
Go to the documentation of this file.
1) const
1341 *   {
1342 *   return Tensor<1, dim>();
1343 *   }
1344 *  
1345 *  
1346 *   template <int dim>
1347 *   void RightHandSide<dim>::value_list(const std::vector<Point<dim>> &vp,
1348 *   std::vector<Tensor<1, dim>> &values) const
1349 *   {
1350 *   for (unsigned int c = 0; c < vp.size(); ++c)
1351 *   {
1352 *   values[c] = RightHandSide<dim>::value(vp[c]);
1353 *   }
1354 *   }
1355 *  
1356 *  
1357 * @endcode
1358 *
1359 *
1360 * <a name="step_22-Linearsolversandpreconditioners"></a>
1361 * <h3>Linear solvers and preconditioners</h3>
1362 *
1363
1364 *
1365 * The linear solvers and preconditioners are discussed extensively in the
1366 * introduction. Here, we create the respective objects that will be used.
1367 *
1368
1369 *
1370 *
1371 * <a name="step_22-ThecodeInverseMatrixcodeclasstemplate"></a>
1372 * <h4>The <code>InverseMatrix</code> class template</h4>
1373 * The <code>InverseMatrix</code> class represents the data structure for an
1374 * inverse matrix. Unlike @ref step_20 "step-20", we implement this with a class instead of
1375 * the helper function inverse_linear_operator() we will apply this class to
1376 * different kinds of matrices that will require different preconditioners
1377 * (in @ref step_20 "step-20" we only used a non-identity preconditioner for the mass
1378 * matrix). The types of matrix and preconditioner are passed to this class
1379 * via template parameters, and matrix and preconditioner objects of these
1380 * types will then be passed to the constructor when an
1381 * <code>InverseMatrix</code> object is created. The member function
1382 * <code>vmult</code> is obtained by solving a linear system:
1383 *
1384 * @code
1385 *   template <class MatrixType, class PreconditionerType>
1386 *   class InverseMatrix : public EnableObserverPointer
1387 *   {
1388 *   public:
1389 *   InverseMatrix(const MatrixType &m,
1390 *   const PreconditionerType &preconditioner);
1391 *  
1392 *   void vmult(Vector<double> &dst, const Vector<double> &src) const;
1393 *  
1394 *   private:
1396 *   const ObserverPointer<const PreconditionerType> preconditioner;
1397 *   };
1398 *  
1399 *  
1400 *   template <class MatrixType, class PreconditionerType>
1401 *   InverseMatrix<MatrixType, PreconditionerType>::InverseMatrix(
1402 *   const MatrixType &m,
1403 *   const PreconditionerType &preconditioner)
1404 *   : matrix(&m)
1405 *   , preconditioner(&preconditioner)
1406 *   {}
1407 *  
1408 *  
1409 * @endcode
1410 *
1411 * This is the implementation of the <code>vmult</code> function.
1412 *
1413
1414 *
1415 * In this class we use a rather large tolerance for the solver control. The
1416 * reason for this is that the function is used very frequently, and hence,
1417 * any additional effort to make the residual in the CG solve smaller makes
1418 * the solution more expensive. Note that we do not only use this class as a
1419 * preconditioner for the Schur complement, but also when forming the
1420 * inverse of the Laplace matrix &ndash; which is hence directly responsible
1421 * for the accuracy of the solution itself, so we can't choose a too large
1422 * tolerance, either.
1423 *
1424 * @code
1425 *   template <class MatrixType, class PreconditionerType>
1426 *   void InverseMatrix<MatrixType, PreconditionerType>::vmult(
1427 *   Vector<double> &dst,
1428 *   const Vector<double> &src) const
1429 *   {
1430 *   SolverControl solver_control(src.size(), 1e-6 * src.l2_norm());
1431 *   SolverCG<Vector<double>> cg(solver_control);
1432 *  
1433 *   dst = 0;
1434 *  
1435 *   cg.solve(*matrix, dst, src, *preconditioner);
1436 *   }
1437 *  
1438 *  
1439 * @endcode
1440 *
1441 *
1442 * <a name="step_22-ThecodeSchurComplementcodeclasstemplate"></a>
1443 * <h4>The <code>SchurComplement</code> class template</h4>
1444 *
1445
1446 *
1447 * This class implements the Schur complement discussed in the introduction.
1448 * It is in analogy to @ref step_20 "step-20". Though, we now call it with a template
1449 * parameter <code>PreconditionerType</code> in order to access that when
1450 * specifying the respective type of the inverse matrix class. As a
1451 * consequence of the definition above, the declaration
1452 * <code>InverseMatrix</code> now contains the second template parameter for
1453 * a preconditioner class as above, which affects the
1454 * <code>ObserverPointer</code> object <code>m_inverse</code> as well.
1455 *
1456 * @code
1457 *   template <class PreconditionerType>
1458 *   class SchurComplement : public EnableObserverPointer
1459 *   {
1460 *   public:
1461 *   SchurComplement(
1462 *   const BlockSparseMatrix<double> &system_matrix,
1463 *   const InverseMatrix<SparseMatrix<double>, PreconditionerType> &A_inverse);
1464 *  
1465 *   void vmult(Vector<double> &dst, const Vector<double> &src) const;
1466 *  
1467 *   private:
1468 *   const ObserverPointer<const BlockSparseMatrix<double>> system_matrix;
1469 *   const ObserverPointer<
1470 *   const InverseMatrix<SparseMatrix<double>, PreconditionerType>>
1471 *   A_inverse;
1472 *  
1473 *   mutable Vector<double> tmp1, tmp2;
1474 *   };
1475 *  
1476 *  
1477 *  
1478 *   template <class PreconditionerType>
1479 *   SchurComplement<PreconditionerType>::SchurComplement(
1480 *   const BlockSparseMatrix<double> &system_matrix,
1481 *   const InverseMatrix<SparseMatrix<double>, PreconditionerType> &A_inverse)
1482 *   : system_matrix(&system_matrix)
1483 *   , A_inverse(&A_inverse)
1484 *   , tmp1(system_matrix.block(0, 0).m())
1485 *   , tmp2(system_matrix.block(0, 0).m())
1486 *   {}
1487 *  
1488 *  
1489 *   template <class PreconditionerType>
1490 *   void
1491 *   SchurComplement<PreconditionerType>::vmult(Vector<double> &dst,
1492 *   const Vector<double> &src) const
1493 *   {
1494 *   system_matrix->block(0, 1).vmult(tmp1, src);
1495 *   A_inverse->vmult(tmp2, tmp1);
1496 *   system_matrix->block(1, 0).vmult(dst, tmp2);
1497 *   }
1498 *  
1499 *  
1500 * @endcode
1501 *
1502 *
1503 * <a name="step_22-StokesProblemclassimplementation"></a>
1504 * <h3>StokesProblem class implementation</h3>
1505 *
1506
1507 *
1508 *
1509 * <a name="step_22-StokesProblemStokesProblem"></a>
1510 * <h4>StokesProblem::StokesProblem</h4>
1511 *
1512
1513 *
1514 * The constructor of this class looks very similar to the one of
1515 * @ref step_20 "step-20". The constructor initializes the variables for the polynomial
1516 * degree, triangulation, finite element system and the dof handler. The
1517 * underlying polynomial functions are of order <code>degree+1</code> for
1518 * the vector-valued velocity components and of order <code>degree</code>
1519 * for the pressure. This gives the LBB-stable element pair
1520 * @f$Q_{degree+1}^d\times Q_{degree}@f$, often referred to as the Taylor-Hood
1521 * element.
1522 *
1523
1524 *
1525 * Note that we initialize the triangulation with a MeshSmoothing argument,
1526 * which ensures that the refinement of cells is done in a way that the
1527 * approximation of the PDE solution remains well-behaved (problems arise if
1528 * grids are too unstructured), see the documentation of
1529 * <code>Triangulation::MeshSmoothing</code> for details.
1530 *
1531 * @code
1532 *   template <int dim>
1533 *   StokesProblem<dim>::StokesProblem(const unsigned int degree)
1534 *   : degree(degree)
1535 *   , triangulation(Triangulation<dim>::maximum_smoothing)
1536 *   , fe(FE_Q<dim>(degree + 1) ^ dim, FE_Q<dim>(degree))
1537 *   , dof_handler(triangulation)
1538 *   {}
1539 *  
1540 *  
1541 * @endcode
1542 *
1543 *
1544 * <a name="step_22-StokesProblemsetup_dofs"></a>
1545 * <h4>StokesProblem::setup_dofs</h4>
1546 *
1547
1548 *
1549 * Given a mesh, this function associates the degrees of freedom with it and
1550 * creates the corresponding matrices and vectors. At the beginning it also
1551 * releases the pointer to the preconditioner object (if the shared pointer
1552 * pointed at anything at all at this point) since it will definitely not be
1553 * needed any more after this point and will have to be re-computed after
1554 * assembling the matrix, and unties the sparse matrices from their sparsity
1555 * pattern objects.
1556 *
1557
1558 *
1559 * We then proceed with distributing degrees of freedom and renumbering
1560 * them: In order to make the ILU preconditioner (in 3d) work efficiently,
1561 * it is important to enumerate the degrees of freedom in such a way that it
1562 * reduces the bandwidth of the matrix, or maybe more importantly: in such a
1563 * way that the ILU is as close as possible to a real LU decomposition. On
1564 * the other hand, we need to preserve the block structure of velocity and
1565 * pressure already seen in @ref step_20 "step-20" and @ref step_21 "step-21". This is done in two
1566 * steps: First, all dofs are renumbered to improve the ILU and then we
1567 * renumber once again by components. Since
1568 * <code>DoFRenumbering::component_wise</code> does not touch the
1569 * renumbering within the individual blocks, the basic renumbering from the
1570 * first step remains. As for how the renumber degrees of freedom to improve
1571 * the ILU: deal.II has a number of algorithms that attempt to find
1572 * orderings to improve ILUs, or reduce the bandwidth of matrices, or
1573 * optimize some other aspect. The DoFRenumbering namespace shows a
1574 * comparison of the results we obtain with several of these algorithms
1575 * based on the testcase discussed here in this tutorial program. Here, we
1576 * will use the traditional Cuthill-McKee algorithm already used in some of
1577 * the previous tutorial programs. In the <a href="#improved-ilu">section
1578 * on improved ILU</a> we're going to discuss this issue in more detail.
1579 *
1580
1581 *
1582 * There is one more change compared to previous tutorial programs: There is
1583 * no reason in sorting the <code>dim</code> velocity components
1584 * individually. In fact, rather than first enumerating all @f$x@f$-velocities,
1585 * then all @f$y@f$-velocities, etc, we would like to keep all velocities at the
1586 * same location together and only separate between velocities (all
1587 * components) and pressures. By default, this is not what the
1588 * DoFRenumbering::component_wise function does: it treats each vector
1589 * component separately; what we have to do is group several components into
1590 * "blocks" and pass this block structure to that function. Consequently, we
1591 * allocate a vector <code>block_component</code> with as many elements as
1592 * there are components and describe all velocity components to correspond
1593 * to block 0, while the pressure component will form block 1:
1594 *
1595 * @code
1596 *   template <int dim>
1597 *   void StokesProblem<dim>::setup_dofs()
1598 *   {
1599 *   A_preconditioner.reset();
1600 *   system_matrix.clear();
1601 *   preconditioner_matrix.clear();
1602 *  
1603 *   dof_handler.distribute_dofs(fe);
1604 *   DoFRenumbering::Cuthill_McKee(dof_handler);
1605 *  
1606 *   std::vector<unsigned int> block_component(dim + 1, 0);
1607 *   block_component[dim] = 1;
1608 *   DoFRenumbering::component_wise(dof_handler, block_component);
1609 *  
1610 * @endcode
1611 *
1612 * Now comes the implementation of Dirichlet boundary conditions, which
1613 * should be evident after the discussion in the introduction. All that
1614 * changed is that the function already appears in the setup functions,
1615 * whereas we were used to see it in some assembly routine. Further down
1616 * below where we set up the mesh, we will associate the top boundary
1617 * where we impose Dirichlet boundary conditions with boundary indicator
1618 * 1. We will have to pass this boundary indicator as second argument to
1619 * the function below interpolating boundary values. There is one more
1620 * thing, though. The function describing the Dirichlet conditions was
1621 * defined for all components, both velocity and pressure. However, the
1622 * Dirichlet conditions are to be set for the velocity only. To this end,
1623 * we use a ComponentMask that only selects the velocity components. The
1624 * component mask is obtained from the finite element by specifying the
1625 * particular components we want. Since we use adaptively refined grids,
1626 * the affine constraints object needs to be first filled with hanging node
1627 * constraints generated from the DoF handler. Note the order of the two
1628 * functions &mdash; we first compute the hanging node constraints, and
1629 * then insert the boundary values into the constraints object. This makes
1630 * sure that we respect H<sup>1</sup> conformity on boundaries with
1631 * hanging nodes (in three space dimensions), where the hanging node needs
1632 * to dominate the Dirichlet boundary values.
1633 *
1634 * @code
1635 *   {
1636 *   constraints.clear();
1637 *  
1638 *   const FEValuesExtractors::Vector velocities(0);
1639 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
1641 *   1,
1642 *   BoundaryValues<dim>(),
1643 *   constraints,
1644 *   fe.component_mask(velocities));
1645 *   }
1646 *  
1647 *   constraints.close();
1648 *  
1649 * @endcode
1650 *
1651 * In analogy to @ref step_20 "step-20", we count the dofs in the individual components.
1652 * We could do this in the same way as there, but we want to operate on
1653 * the block structure we used already for the renumbering: The function
1654 * <code>DoFTools::count_dofs_per_fe_block</code> does the same as
1655 * <code>DoFTools::count_dofs_per_fe_component</code>, but now grouped as
1656 * velocity and pressure block via <code>block_component</code>.
1657 *
1658 * @code
1659 *   const std::vector<types::global_dof_index> dofs_per_block =
1660 *   DoFTools::count_dofs_per_fe_block(dof_handler, block_component);
1661 *   const types::global_dof_index n_u = dofs_per_block[0];
1662 *   const types::global_dof_index n_p = dofs_per_block[1];
1663 *  
1664 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
1665 *   << std::endl
1666 *   << " Number of degrees of freedom: " << dof_handler.n_dofs()
1667 *   << " (" << n_u << '+' << n_p << ')' << std::endl;
1668 *  
1669 * @endcode
1670 *
1671 * The next task is to allocate a sparsity pattern for the system matrix we
1672 * will create and one for the preconditioner matrix. We could do this in
1673 * the same way as in @ref step_20 "step-20", i.e. directly build an object of type
1674 * SparsityPattern through DoFTools::make_sparsity_pattern. However, there
1675 * is a major reason not to do so:
1676 * In 3d, the function DoFTools::max_couplings_between_dofs yields a
1677 * conservative but rather large number for the coupling between the
1678 * individual dofs, so that the memory initially provided for the creation
1679 * of the sparsity pattern of the matrix is far too much -- so much actually
1680 * that the initial sparsity pattern won't even fit into the physical memory
1681 * of most systems already for moderately-sized 3d problems, see also the
1682 * discussion in @ref step_18 "step-18". Instead, we first build temporary objects that use
1683 * a different data structure that doesn't require allocating more memory
1684 * than necessary but isn't suitable for use as a basis of SparseMatrix or
1685 * BlockSparseMatrix objects; in a second step we then copy these objects
1686 * into objects of type BlockSparsityPattern. This is entirely analogous to
1687 * what we already did in @ref step_11 "step-11" and @ref step_18 "step-18". In particular, we make use of
1688 * the fact that we will never write into the @f$(1,1)@f$ block of the system
1689 * matrix and that this is the only block to be filled for the
1690 * preconditioner matrix.
1691 *
1692
1693 *
1694 * All this is done inside new scopes, which means that the memory of
1695 * <code>dsp</code> will be released once the information has been copied to
1696 * <code>sparsity_pattern</code>.
1697 *
1698 * @code
1699 *   {
1700 *   BlockDynamicSparsityPattern dsp(dofs_per_block, dofs_per_block);
1701 *  
1702 *   Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
1703 *   for (unsigned int c = 0; c < dim + 1; ++c)
1704 *   for (unsigned int d = 0; d < dim + 1; ++d)
1705 *   if (!((c == dim) && (d == dim)))
1706 *   coupling[c][d] = DoFTools::always;
1707 *   else
1708 *   coupling[c][d] = DoFTools::none;
1709 *  
1710 *   DoFTools::make_sparsity_pattern(
1711 *   dof_handler, coupling, dsp, constraints, false);
1712 *  
1713 *   sparsity_pattern.copy_from(dsp);
1714 *   }
1715 *  
1716 *   {
1717 *   BlockDynamicSparsityPattern preconditioner_dsp(dofs_per_block,
1718 *   dofs_per_block);
1719 *  
1720 *   Table<2, DoFTools::Coupling> preconditioner_coupling(dim + 1, dim + 1);
1721 *   for (unsigned int c = 0; c < dim + 1; ++c)
1722 *   for (unsigned int d = 0; d < dim + 1; ++d)
1723 *   if (((c == dim) && (d == dim)))
1724 *   preconditioner_coupling[c][d] = DoFTools::always;
1725 *   else
1726 *   preconditioner_coupling[c][d] = DoFTools::none;
1727 *  
1728 *   DoFTools::make_sparsity_pattern(dof_handler,
1729 *   preconditioner_coupling,
1730 *   preconditioner_dsp,
1731 *   constraints,
1732 *   false);
1733 *  
1734 *   preconditioner_sparsity_pattern.copy_from(preconditioner_dsp);
1735 *   }
1736 *  
1737 * @endcode
1738 *
1739 * Finally, the system matrix, the preconsitioner matrix, the solution and
1740 * the right hand side vector are created from the block structure similar
1741 * to the approach in @ref step_20 "step-20":
1742 *
1743 * @code
1744 *   system_matrix.reinit(sparsity_pattern);
1745 *   preconditioner_matrix.reinit(preconditioner_sparsity_pattern);
1746 *  
1747 *   solution.reinit(dofs_per_block);
1748 *   system_rhs.reinit(dofs_per_block);
1749 *   }
1750 *  
1751 *  
1752 * @endcode
1753 *
1754 *
1755 * <a name="step_22-StokesProblemassemble_system"></a>
1756 * <h4>StokesProblem::assemble_system</h4>
1757 *
1758
1759 *
1760 * The assembly process follows the discussion in @ref step_20 "step-20" and in the
1761 * introduction. We use the well-known abbreviations for the data structures
1762 * that hold the local matrices, right hand side, and global numbering of the
1763 * degrees of freedom for the present cell.
1764 *
1765 * @code
1766 *   template <int dim>
1767 *   void StokesProblem<dim>::assemble_system()
1768 *   {
1769 *   system_matrix = 0;
1770 *   system_rhs = 0;
1771 *   preconditioner_matrix = 0;
1772 *  
1773 *   const QGauss<dim> quadrature_formula(degree + 2);
1774 *  
1775 *   FEValues<dim> fe_values(fe,
1776 *   quadrature_formula,
1777 *   update_values | update_quadrature_points |
1778 *   update_JxW_values | update_gradients);
1779 *  
1780 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1781 *  
1782 *   const unsigned int n_q_points = quadrature_formula.size();
1783 *  
1784 *   FullMatrix<double> local_matrix(dofs_per_cell, dofs_per_cell);
1785 *   FullMatrix<double> local_preconditioner_matrix(dofs_per_cell,
1786 *   dofs_per_cell);
1787 *   Vector<double> local_rhs(dofs_per_cell);
1788 *  
1789 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1790 *  
1791 *   const RightHandSide<dim> right_hand_side;
1792 *   std::vector<Tensor<1, dim>> rhs_values(n_q_points, Tensor<1, dim>());
1793 *  
1794 * @endcode
1795 *
1796 * Next, we need two objects that work as extractors for the FEValues
1797 * object. Their use is explained in detail in the report on @ref
1798 * vector_valued :
1799 *
1800 * @code
1801 *   const FEValuesExtractors::Vector velocities(0);
1802 *   const FEValuesExtractors::Scalar pressure(dim);
1803 *  
1804 * @endcode
1805 *
1806 * As an extension over @ref step_20 "step-20" and @ref step_21 "step-21", we include a few optimizations
1807 * that make assembly much faster for this particular problem. The
1808 * improvements are based on the observation that we do a few calculations
1809 * too many times when we do as in @ref step_20 "step-20": The symmetric gradient actually
1810 * has <code>dofs_per_cell</code> different values per quadrature point, but
1811 * we extract it <code>dofs_per_cell*dofs_per_cell</code> times from the
1812 * FEValues object - for both the loop over <code>i</code> and the inner
1813 * loop over <code>j</code>. In 3d, that means evaluating it @f$89^2=7921@f$
1814 * instead of @f$89@f$ times, a not insignificant difference.
1815 *
1816
1817 *
1818 * So what we're going to do here is to avoid such repeated calculations
1819 * by getting a vector of rank-2 tensors (and similarly for the divergence
1820 * and the basis function value on pressure) at the quadrature point prior
1821 * to starting the loop over the dofs on the cell. First, we create the
1822 * respective objects that will hold these values. Then, we start the loop
1823 * over all cells and the loop over the quadrature points, where we first
1824 * extract these values. There is one more optimization we implement here:
1825 * the local matrix (as well as the global one) is going to be symmetric,
1826 * since all the operations involved are symmetric with respect to @f$i@f$ and
1827 * @f$j@f$. This is implemented by simply running the inner loop not to
1828 * <code>dofs_per_cell</code>, but only up to <code>i</code>, the index of
1829 * the outer loop.
1830 *
1831 * @code
1832 *   std::vector<SymmetricTensor<2, dim>> symgrad_phi_u(dofs_per_cell);
1833 *   std::vector<double> div_phi_u(dofs_per_cell);
1834 *   std::vector<Tensor<1, dim>> phi_u(dofs_per_cell);
1835 *   std::vector<double> phi_p(dofs_per_cell);
1836 *  
1837 *   for (const auto &cell : dof_handler.active_cell_iterators())
1838 *   {
1839 *   fe_values.reinit(cell);
1840 *  
1841 *   local_matrix = 0;
1842 *   local_preconditioner_matrix = 0;
1843 *   local_rhs = 0;
1844 *  
1845 *   right_hand_side.value_list(fe_values.get_quadrature_points(),
1846 *   rhs_values);
1847 *  
1848 *   for (unsigned int q = 0; q < n_q_points; ++q)
1849 *   {
1850 *   for (unsigned int k = 0; k < dofs_per_cell; ++k)
1851 *   {
1852 *   symgrad_phi_u[k] =
1853 *   fe_values[velocities].symmetric_gradient(k, q);
1854 *   div_phi_u[k] = fe_values[velocities].divergence(k, q);
1855 *   phi_u[k] = fe_values[velocities].value(k, q);
1856 *   phi_p[k] = fe_values[pressure].value(k, q);
1857 *   }
1858 *  
1859 * @endcode
1860 *
1861 * Now finally for the bilinear forms of both the system matrix and
1862 * the matrix we use for the preconditioner. Recall that the
1863 * formulas for these two are
1864 * @f{align*}{
1865 * A_{ij} &= a(\varphi_i,\varphi_j)
1866 * \\ &= \underbrace{2(\varepsilon(\varphi_{i,\textbf{u}}),
1867 * \varepsilon(\varphi_{j,\textbf{u}}))_{\Omega}}
1868 * _{(1)}
1869 * \;
1870 * \underbrace{- (\textrm{div}\; \varphi_{i,\textbf{u}},
1871 * \varphi_{j,p})_{\Omega}}
1872 * _{(2)}
1873 * \;
1874 * \underbrace{- (\varphi_{i,p},
1875 * \textrm{div}\;
1876 * \varphi_{j,\textbf{u}})_{\Omega}}
1877 * _{(3)}
1878 * @f}
1879 * and
1880 * @f{align*}{
1881 * M_{ij} &= \underbrace{(\varphi_{i,p},
1882 * \varphi_{j,p})_{\Omega}}
1883 * _{(4)},
1884 * @f}
1885 * respectively, where @f$\varphi_{i,\textbf{u}}@f$ and @f$\varphi_{i,p}@f$
1886 * are the velocity and pressure components of the @f$i@f$th shape
1887 * function. The various terms above are then easily recognized in
1888 * the following implementation:
1889 *
1890 * @code
1891 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1892 *   {
1893 *   for (unsigned int j = 0; j <= i; ++j)
1894 *   {
1895 *   local_matrix(i, j) +=
1896 *   (2 * (symgrad_phi_u[i] * symgrad_phi_u[j]) // (1)
1897 *   - div_phi_u[i] * phi_p[j] // (2)
1898 *   - phi_p[i] * div_phi_u[j]) // (3)
1899 *   * fe_values.JxW(q); // * dx
1900 *  
1901 *   local_preconditioner_matrix(i, j) +=
1902 *   (phi_p[i] * phi_p[j]) // (4)
1903 *   * fe_values.JxW(q); // * dx
1904 *   }
1905 * @endcode
1906 *
1907 * Note that in the implementation of (1) above, `operator*`
1908 * is overloaded for symmetric tensors, yielding the scalar
1909 * product between the two tensors.
1910 *
1911
1912 *
1913 * For the right-hand side, we need to multiply the (vector of)
1914 * velocity shape functions with the vector of body force
1915 * right-hand side components, both evaluated at the current
1916 * quadrature point. We have implemented the body forces as a
1917 * `TensorFunction<1,dim>`, so its values at quadrature points
1918 * are already tensors for which the application of `operator*`
1919 * against the velocity components of the shape function results
1920 * in the dot product, as intended.
1921 *
1922 * @code
1923 *   local_rhs(i) += phi_u[i] // phi_u_i(x_q)
1924 *   * rhs_values[q] // * f(x_q)
1925 *   * fe_values.JxW(q); // * dx
1926 *   }
1927 *   }
1928 *  
1929 * @endcode
1930 *
1931 * Before we can write the local data into the global matrix (and
1932 * simultaneously use the AffineConstraints object to apply
1933 * Dirichlet boundary conditions and eliminate hanging node constraints,
1934 * as we discussed in the introduction), we have to be careful about one
1935 * thing, though. We have only built half of the local matrices
1936 * because of symmetry, but we're going to save the full matrices
1937 * in order to use the standard functions for solving. This is done
1938 * by flipping the indices in case we are pointing into the empty part
1939 * of the local matrices.
1940 *
1941 * @code
1942 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1943 *   for (unsigned int j = i + 1; j < dofs_per_cell; ++j)
1944 *   {
1945 *   local_matrix(i, j) = local_matrix(j, i);
1946 *   local_preconditioner_matrix(i, j) =
1947 *   local_preconditioner_matrix(j, i);
1948 *   }
1949 *  
1950 *   cell->get_dof_indices(local_dof_indices);
1951 *   constraints.distribute_local_to_global(local_matrix,
1952 *   local_rhs,
1953 *   local_dof_indices,
1954 *   system_matrix,
1955 *   system_rhs);
1956 *   constraints.distribute_local_to_global(local_preconditioner_matrix,
1957 *   local_dof_indices,
1958 *   preconditioner_matrix);
1959 *   }
1960 *  
1961 * @endcode
1962 *
1963 * Before we're going to solve this linear system, we generate a
1964 * preconditioner for the velocity-velocity matrix, i.e.,
1965 * <code>block(0,0)</code> in the system matrix. As mentioned above, this
1966 * depends on the spatial dimension. Since the two classes described by
1967 * the <code>InnerPreconditioner::type</code> alias have the same
1968 * interface, we do not have to do anything different whether we want to
1969 * use a sparse direct solver or an ILU:
1970 *
1971 * @code
1972 *   std::cout << " Computing preconditioner..." << std::endl << std::flush;
1973 *  
1974 *   A_preconditioner =
1975 *   std::make_shared<typename InnerPreconditioner<dim>::type>();
1976 *   A_preconditioner->initialize(
1977 *   system_matrix.block(0, 0),
1978 *   typename InnerPreconditioner<dim>::type::AdditionalData());
1979 *   }
1980 *  
1981 *  
1982 *  
1983 * @endcode
1984 *
1985 *
1986 * <a name="step_22-StokesProblemsolve"></a>
1987 * <h4>StokesProblem::solve</h4>
1988 *
1989
1990 *
1991 * After the discussion in the introduction and the definition of the
1992 * respective classes above, the implementation of the <code>solve</code>
1993 * function is rather straight-forward and done in a similar way as in
1994 * @ref step_20 "step-20". To start with, we need an object of the
1995 * <code>InverseMatrix</code> class that represents the inverse of the
1996 * matrix A. As described in the introduction, the inverse is generated with
1997 * the help of an inner preconditioner of type
1998 * <code>InnerPreconditioner::type</code>.
1999 *
2000 * @code
2001 *   template <int dim>
2002 *   void StokesProblem<dim>::solve()
2003 *   {
2004 *   const InverseMatrix<SparseMatrix<double>,
2005 *   typename InnerPreconditioner<dim>::type>
2006 *   A_inverse(system_matrix.block(0, 0), *A_preconditioner);
2007 *   Vector<double> tmp(solution.block(0).size());
2008 *  
2009 * @endcode
2010 *
2011 * This is as in @ref step_20 "step-20". We generate the right hand side @f$B A^{-1} F - G@f$
2012 * for the Schur complement and an object that represents the respective
2013 * linear operation @f$B A^{-1} B^T@f$, now with a template parameter
2014 * indicating the preconditioner - in accordance with the definition of
2015 * the class.
2016 *
2017 * @code
2018 *   {
2019 *   Vector<double> schur_rhs(solution.block(1).size());
2020 *   A_inverse.vmult(tmp, system_rhs.block(0));
2021 *   system_matrix.block(1, 0).vmult(schur_rhs, tmp);
2022 *   schur_rhs -= system_rhs.block(1);
2023 *  
2024 *   SchurComplement<typename InnerPreconditioner<dim>::type> schur_complement(
2025 *   system_matrix, A_inverse);
2026 *  
2027 * @endcode
2028 *
2029 * The usual control structures for the solver call are created...
2030 *
2031 * @code
2032 *   SolverControl solver_control(solution.block(1).size(),
2033 *   1e-6 * schur_rhs.l2_norm());
2034 *   SolverCG<Vector<double>> cg(solver_control);
2035 *  
2036 * @endcode
2037 *
2038 * Now to the preconditioner to the Schur complement. As explained in
2039 * the introduction, the preconditioning is done by a @ref GlossMassMatrix "mass matrix" in the
2040 * pressure variable.
2041 *
2042
2043 *
2044 * Actually, the solver needs to have the preconditioner in the form
2045 * @f$P^{-1}@f$, so we need to create an inverse operation. Once again, we
2046 * use an object of the class <code>InverseMatrix</code>, which
2047 * implements the <code>vmult</code> operation that is needed by the
2048 * solver. In this case, we have to invert the pressure mass matrix. As
2049 * it already turned out in earlier tutorial programs, the inversion of
2050 * a mass matrix is a rather cheap and straight-forward operation
2051 * (compared to, e.g., a Laplace matrix). The CG method with ILU
2052 * preconditioning converges in 5-10 steps, independently on the mesh
2053 * size. This is precisely what we do here: We choose another ILU
2054 * preconditioner and take it along to the InverseMatrix object via the
2055 * corresponding template parameter. A CG solver is then called within
2056 * the vmult operation of the inverse matrix.
2057 *
2058
2059 *
2060 * An alternative that is cheaper to build, but needs more iterations
2061 * afterwards, would be to choose a SSOR preconditioner with factor
2062 * 1.2. It needs about twice the number of iterations, but the costs for
2063 * its generation are almost negligible.
2064 *
2065 * @code
2066 *   SparseILU<double> preconditioner;
2067 *   preconditioner.initialize(preconditioner_matrix.block(1, 1),
2068 *   SparseILU<double>::AdditionalData());
2069 *  
2070 *   InverseMatrix<SparseMatrix<double>, SparseILU<double>> m_inverse(
2071 *   preconditioner_matrix.block(1, 1), preconditioner);
2072 *  
2073 * @endcode
2074 *
2075 * With the Schur complement and an efficient preconditioner at hand, we
2076 * can solve the respective equation for the pressure (i.e. block 0 in
2077 * the solution vector) in the usual way:
2078 *
2079 * @code
2080 *   cg.solve(schur_complement, solution.block(1), schur_rhs, m_inverse);
2081 *  
2082 * @endcode
2083 *
2084 * After this first solution step, the hanging node constraints have to
2085 * be distributed to the solution in order to achieve a consistent
2086 * pressure field.
2087 *
2088 * @code
2089 *   constraints.distribute(solution);
2090 *  
2091 *   std::cout << " " << solver_control.last_step()
2092 *   << " outer CG Schur complement iterations for pressure"
2093 *   << std::endl;
2094 *   }
2095 *  
2096 * @endcode
2097 *
2098 * As in @ref step_20 "step-20", we finally need to solve for the velocity equation where
2099 * we plug in the solution to the pressure equation. This involves only
2100 * objects we already know - so we simply multiply @f$p@f$ by @f$B^T@f$, subtract
2101 * the right hand side and multiply by the inverse of @f$A@f$. At the end, we
2102 * need to distribute the constraints from hanging nodes in order to
2103 * obtain a consistent flow field:
2104 *
2105 * @code
2106 *   {
2107 *   system_matrix.block(0, 1).vmult(tmp, solution.block(1));
2108 *   tmp *= -1;
2109 *   tmp += system_rhs.block(0);
2110 *  
2111 *   A_inverse.vmult(solution.block(0), tmp);
2112 *  
2113 *   constraints.distribute(solution);
2114 *   }
2115 *   }
2116 *  
2117 *  
2118 * @endcode
2119 *
2120 *
2121 * <a name="step_22-StokesProblemoutput_results"></a>
2122 * <h4>StokesProblem::output_results</h4>
2123 *
2124
2125 *
2126 * The next function generates graphical output. In this example, we are
2127 * going to use the VTK file format. We attach names to the individual
2128 * variables in the problem: <code>velocity</code> to the <code>dim</code>
2129 * components of velocity and <code>pressure</code> to the pressure.
2130 *
2131
2132 *
2133 * Not all visualization programs have the ability to group individual
2134 * vector components into a vector to provide vector plots; in particular,
2135 * this holds for some VTK-based visualization programs. In this case, the
2136 * logical grouping of components into vectors should already be described
2137 * in the file containing the data. In other words, what we need to do is
2138 * provide our output writers with a way to know which of the components of
2139 * the finite element logically form a vector (with @f$d@f$ components in @f$d@f$
2140 * space dimensions) rather than letting them assume that we simply have a
2141 * bunch of scalar fields. This is achieved using the members of the
2142 * <code>DataComponentInterpretation</code> namespace: as with the filename,
2143 * we create a vector in which the first <code>dim</code> components refer
2144 * to the velocities and are given the tag
2145 * DataComponentInterpretation::component_is_part_of_vector; we
2146 * finally push one tag
2147 * DataComponentInterpretation::component_is_scalar to describe
2148 * the grouping of the pressure variable.
2149 *
2150
2151 *
2152 * The rest of the function is then the same as in @ref step_20 "step-20".
2153 *
2154 * @code
2155 *   template <int dim>
2156 *   void
2157 *   StokesProblem<dim>::output_results(const unsigned int refinement_cycle) const
2158 *   {
2159 *   std::vector<std::string> solution_names(dim, "velocity");
2160 *   solution_names.emplace_back("pressure");
2161 *  
2162 *   std::vector<DataComponentInterpretation::DataComponentInterpretation>
2163 *   data_component_interpretation(
2164 *   dim, DataComponentInterpretation::component_is_part_of_vector);
2165 *   data_component_interpretation.push_back(
2166 *   DataComponentInterpretation::component_is_scalar);
2167 *  
2168 *   DataOut<dim> data_out;
2169 *   data_out.attach_dof_handler(dof_handler);
2170 *   data_out.add_data_vector(solution,
2171 *   solution_names,
2172 *   DataOut<dim>::type_dof_data,
2173 *   data_component_interpretation);
2174 *   data_out.build_patches();
2175 *  
2176 *   std::ofstream output(
2177 *   "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtk");
2178 *   data_out.write_vtk(output);
2179 *   }
2180 *  
2181 *  
2182 * @endcode
2183 *
2184 *
2185 * <a name="step_22-StokesProblemrefine_mesh"></a>
2186 * <h4>StokesProblem::refine_mesh</h4>
2187 *
2188
2189 *
2190 * This is the last interesting function of the <code>StokesProblem</code>
2191 * class. As indicated by its name, it takes the solution to the problem
2192 * and refines the mesh where this is needed. The procedure is the same as
2193 * in the respective step in @ref step_6 "step-6", with the exception that we base the
2194 * refinement only on the change in pressure, i.e., we call the Kelly error
2195 * estimator with a mask object of type ComponentMask that selects the
2196 * single scalar component for the pressure that we are interested in (we
2197 * get such a mask from the finite element class by specifying the component
2198 * we want). Additionally, we do not coarsen the grid again:
2199 *
2200 * @code
2201 *   template <int dim>
2202 *   void StokesProblem<dim>::refine_mesh()
2203 *   {
2204 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
2205 *  
2206 *   const FEValuesExtractors::Scalar pressure(dim);
2207 *   KellyErrorEstimator<dim>::estimate(
2208 *   dof_handler,
2209 *   QGauss<dim - 1>(degree + 1),
2210 *   std::map<types::boundary_id, const Function<dim> *>(),
2211 *   solution,
2212 *   estimated_error_per_cell,
2213 *   fe.component_mask(pressure));
2214 *  
2215 *   GridRefinement::refine_and_coarsen_fixed_number(triangulation,
2216 *   estimated_error_per_cell,
2217 *   0.3,
2218 *   0.0);
2219 *   triangulation.execute_coarsening_and_refinement();
2220 *   }
2221 *  
2222 *  
2223 * @endcode
2224 *
2225 *
2226 * <a name="step_22-StokesProblemrun"></a>
2227 * <h4>StokesProblem::run</h4>
2228 *
2229
2230 *
2231 * The last step in the Stokes class is, as usual, the function that
2232 * generates the initial grid and calls the other functions in the
2233 * respective order.
2234 *
2235
2236 *
2237 * We start off with a rectangle of size @f$4 \times 1@f$ (in 2d) or @f$4 \times 1
2238 * \times 1@f$ (in 3d), placed in @f$R^2/R^3@f$ as @f$(-2,2)\times(-1,0)@f$ or
2239 * @f$(-2,2)\times(0,1)\times(-1,0)@f$, respectively. It is natural to start
2240 * with equal mesh size in each direction, so we subdivide the initial
2241 * rectangle four times in the first coordinate direction. To limit the
2242 * scope of the variables involved in the creation of the mesh to the range
2243 * where we actually need them, we put the entire block between a pair of
2244 * braces:
2245 *
2246 * @code
2247 *   template <int dim>
2248 *   void StokesProblem<dim>::run()
2249 *   {
2250 *   {
2251 *   std::vector<unsigned int> subdivisions(dim, 1);
2252 *   subdivisions[0] = 4;
2253 *  
2254 *   const Point<dim> bottom_left = (dim == 2 ?
2255 *   Point<dim>(-2, -1) : // 2d case
2256 *   Point<dim>(-2, 0, -1)); // 3d case
2257 *  
2258 *   const Point<dim> top_right = (dim == 2 ?
2259 *   Point<dim>(2, 0) : // 2d case
2260 *   Point<dim>(2, 1, 0)); // 3d case
2261 *  
2262 *   GridGenerator::subdivided_hyper_rectangle(triangulation,
2263 *   subdivisions,
2264 *   bottom_left,
2265 *   top_right);
2266 *   }
2267 *  
2268 * @endcode
2269 *
2270 * A boundary indicator of 1 is set to all boundaries that are subject to
2271 * Dirichlet boundary conditions, i.e. to faces that are located at 0 in
2272 * the last coordinate direction. See the example description above for
2273 * details.
2274 *
2275 * @code
2276 *   for (const auto &cell : triangulation.active_cell_iterators())
2277 *   for (const auto &face : cell->face_iterators())
2278 *   if (face->center()[dim - 1] == 0)
2279 *   face->set_all_boundary_ids(1);
2280 *  
2281 *  
2282 * @endcode
2283 *
2284 * We then apply an initial refinement before solving for the first
2285 * time. In 3d, there are going to be more degrees of freedom, so we
2286 * refine less there:
2287 *
2288 * @code
2289 *   triangulation.refine_global(4 - dim);
2290 *  
2291 * @endcode
2292 *
2293 * As first seen in @ref step_6 "step-6", we cycle over the different refinement levels
2294 * and refine (except for the first cycle), setup the degrees of freedom
2295 * and matrices, assemble, solve and create output:
2296 *
2297 * @code
2298 *   for (unsigned int refinement_cycle = 0; refinement_cycle < 6;
2299 *   ++refinement_cycle)
2300 *   {
2301 *   std::cout << "Refinement cycle " << refinement_cycle << std::endl;
2302 *  
2303 *   if (refinement_cycle > 0)
2304 *   refine_mesh();
2305 *  
2306 *   setup_dofs();
2307 *  
2308 *   std::cout << " Assembling..." << std::endl << std::flush;
2309 *   assemble_system();
2310 *  
2311 *   std::cout << " Solving..." << std::flush;
2312 *   solve();
2313 *  
2314 *   output_results(refinement_cycle);
2315 *  
2316 *   std::cout << std::endl;
2317 *   }
2318 *   }
2319 *   } // namespace Step22
2320 *  
2321 *  
2322 * @endcode
2323 *
2324 *
2325 * <a name="step_22-Thecodemaincodefunction"></a>
2326 * <h3>The <code>main</code> function</h3>
2327 *
2328
2329 *
2330 * The main function is the same as in @ref step_20 "step-20". We pass the element degree as
2331 * a parameter and choose the space dimension at the well-known template slot.
2332 *
2333 * @code
2334 *   int main()
2335 *   {
2336 *   try
2337 *   {
2338 *   using namespace Step22;
2339 *  
2340 *   StokesProblem<2> flow_problem(1);
2341 *   flow_problem.run();
2342 *   }
2343 *   catch (std::exception &exc)
2344 *   {
2345 *   std::cerr << std::endl
2346 *   << std::endl
2347 *   << "----------------------------------------------------"
2348 *   << std::endl;
2349 *   std::cerr << "Exception on processing: " << std::endl
2350 *   << exc.what() << std::endl
2351 *   << "Aborting!" << std::endl
2352 *   << "----------------------------------------------------"
2353 *   << std::endl;
2354 *  
2355 *   return 1;
2356 *   }
2357 *   catch (...)
2358 *   {
2359 *   std::cerr << std::endl
2360 *   << std::endl
2361 *   << "----------------------------------------------------"
2362 *   << std::endl;
2363 *   std::cerr << "Unknown exception!" << std::endl
2364 *   << "Aborting!" << std::endl
2365 *   << "----------------------------------------------------"
2366 *   << std::endl;
2367 *   return 1;
2368 *   }
2369 *  
2370 *   return 0;
2371 *   }
2372 * @endcode
2373<a name="step_22-Results"></a><h1>Results</h1>
2374
2375
2376<a name="step_22-Outputoftheprogramandgraphicalvisualization"></a><h3>Output of the program and graphical visualization</h3>
2377
2378
2379<a name="step_22-2Dcalculations"></a><h4>2D calculations</h4>
2380
2381
2382Running the program with the space dimension set to 2 in the <code>main</code>
2383function yields the following output (in "release mode",
2384See also <a href="https://www.math.colostate.edu/~bangerth/videos.676.18.html">video lecture 18</a>.):
2385@code
2386examples/step-22> make run
2387Refinement cycle 0
2388 Number of active cells: 64
2389 Number of degrees of freedom: 679 (594+85)
2390 Assembling...
2391 Computing preconditioner...
2392 Solving... 11 outer CG Schur complement iterations for pressure
2393
2394Refinement cycle 1
2395 Number of active cells: 160
2396 Number of degrees of freedom: 1683 (1482+201)
2397 Assembling...
2398 Computing preconditioner...
2399 Solving... 11 outer CG Schur complement iterations for pressure
2400
2401Refinement cycle 2
2402 Number of active cells: 376
2403 Number of degrees of freedom: 3813 (3370+443)
2404 Assembling...
2405 Computing preconditioner...
2406 Solving... 11 outer CG Schur complement iterations for pressure
2407
2408Refinement cycle 3
2409 Number of active cells: 880
2410 Number of degrees of freedom: 8723 (7722+1001)
2411 Assembling...
2412 Computing preconditioner...
2413 Solving... 11 outer CG Schur complement iterations for pressure
2414
2415Refinement cycle 4
2416 Number of active cells: 2008
2417 Number of degrees of freedom: 19383 (17186+2197)
2418 Assembling...
2419 Computing preconditioner...
2420 Solving... 11 outer CG Schur complement iterations for pressure
2421
2422Refinement cycle 5
2423 Number of active cells: 4288
2424 Number of degrees of freedom: 40855 (36250+4605)
2425 Assembling...
2426 Computing preconditioner...
2427 Solving... 11 outer CG Schur complement iterations for pressure
2428@endcode
2429
2430The entire computation above takes about 2 seconds on a reasonably
2431quick (for 2015 standards) machine.
2432
2433What we see immediately from this is that the number of (outer)
2434iterations does not increase as we refine the mesh. This confirms the
2435statement in the introduction that preconditioning the Schur
2436complement with the mass matrix indeed yields a matrix spectrally
2437equivalent to the identity matrix (i.e. with eigenvalues bounded above
2438and below independently of the mesh size or the relative sizes of
2439cells). In other words, the mass matrix and the Schur complement are
2440spectrally equivalent.
2441
2442In the images below, we show the grids for the first six refinement
2443steps in the program. Observe how the grid is refined in regions
2444where the solution rapidly changes: On the upper boundary, we have
2445Dirichlet boundary conditions that are -1 in the left half of the line
2446and 1 in the right one, so there is an abrupt change at @f$x=0@f$. Likewise,
2447there are changes from Dirichlet to Neumann data in the two upper
2448corners, so there is need for refinement there as well:
2449
2450<table width="60%" align="center">
2451 <tr>
2452 <td align="center">
2453 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-0.png" alt="">
2454 </td>
2455 <td align="center">
2456 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-1.png" alt="">
2457 </td>
2458 </tr>
2459 <tr>
2460 <td align="center">
2461 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-2.png" alt="">
2462 </td>
2463 <td align="center">
2464 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-3.png" alt="">
2465 </td>
2466 </tr>
2467 <tr>
2468 <td align="center">
2469 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-4.png" alt="">
2470 </td>
2471 <td align="center">
2472 <img src="https://www.dealii.org/images/steps/developer/step-22.2d.mesh-5.png" alt="">
2473 </td>
2474 </tr>
2475</table>
2476
2477Finally, following is a plot of the flow field. It shows fluid
2478transported along with the moving upper boundary and being replaced by
2479material coming from below:
2480
2481<img src="https://www.dealii.org/images/steps/developer/step-22.2d.solution.png" alt="">
2482
2483This plot uses the capability of VTK-based visualization programs (in
2484this case of VisIt) to show vector data; this is the result of us
2485declaring the velocity components of the finite element in use to be a
2486set of vector components, rather than independent scalar components in
2487the <code>StokesProblem@<dim@>::%output_results</code> function of this
2488tutorial program.
2489
2490
2491
2492<a name="step_22-3Dcalculations"></a><h4>3D calculations</h4>
2493
2494
2495In 3d, the screen output of the program looks like this:
2496
2497@code
2498Refinement cycle 0
2499 Number of active cells: 32
2500 Number of degrees of freedom: 1356 (1275+81)
2501 Assembling...
2502 Computing preconditioner...
2503 Solving... 13 outer CG Schur complement iterations for pressure.
2504
2505Refinement cycle 1
2506 Number of active cells: 144
2507 Number of degrees of freedom: 5088 (4827+261)
2508 Assembling...
2509 Computing preconditioner...
2510 Solving... 14 outer CG Schur complement iterations for pressure.
2511
2512Refinement cycle 2
2513 Number of active cells: 704
2514 Number of degrees of freedom: 22406 (21351+1055)
2515 Assembling...
2516 Computing preconditioner...
2517 Solving... 14 outer CG Schur complement iterations for pressure.
2518
2519Refinement cycle 3
2520 Number of active cells: 3168
2521 Number of degrees of freedom: 93176 (89043+4133)
2522 Assembling...
2523 Computing preconditioner...
2524 Solving... 15 outer CG Schur complement iterations for pressure.
2525
2526Refinement cycle 4
2527 Number of active cells: 11456
2528 Number of degrees of freedom: 327808 (313659+14149)
2529 Assembling...
2530 Computing preconditioner...
2531 Solving... 15 outer CG Schur complement iterations for pressure.
2532
2533Refinement cycle 5
2534 Number of active cells: 45056
2535 Number of degrees of freedom: 1254464 (1201371+53093)
2536 Assembling...
2537 Computing preconditioner...
2538 Solving... 14 outer CG Schur complement iterations for pressure.
2539@endcode
2540
2541Again, we see that the number of outer iterations does not increase as
2542we refine the mesh. Nevertheless, the compute time increases
2543significantly: for each of the iterations above separately, it takes about
25440.14 seconds, 0.63 seconds, 4.8 seconds, 35 seconds, 2 minutes and 33 seconds,
2545and 13 minutes and 12 seconds. This overall superlinear (in the number of
2546unknowns) increase in runtime is due to the fact that our inner solver is not
2547@f${\cal O}(N)@f$: a simple experiment shows that as we keep refining the mesh, the
2548average number of ILU-preconditioned CG iterations to invert the
2549velocity-velocity block @f$A@f$ increases.
2550
2551We will address the question of how possibly to improve our solver <a
2552href="#improved-solver">below</a>.
2553
2554As for the graphical output, the grids generated during the solution
2555look as follow:
2556
2557<table width="60%" align="center">
2558 <tr>
2559 <td align="center">
2560 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-0.png" alt="">
2561 </td>
2562 <td align="center">
2563 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-1.png" alt="">
2564 </td>
2565 </tr>
2566 <tr>
2567 <td align="center">
2568 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-2.png" alt="">
2569 </td>
2570 <td align="center">
2571 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-3.png" alt="">
2572 </td>
2573 </tr>
2574 <tr>
2575 <td align="center">
2576 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-4.png" alt="">
2577 </td>
2578 <td align="center">
2579 <img src="https://www.dealii.org/images/steps/developer/step-22.3d.mesh-5.png" alt="">
2580 </td>
2581 </tr>
2582</table>
2583
2584Again, they show essentially the location of singularities introduced
2585by boundary conditions. The vector field computed makes for an
2586interesting graph:
2587
2588<img src="https://www.dealii.org/images/steps/developer/step-22.3d.solution.png" alt="">
2589
2590The isocontours shown here as well are those of the pressure
2591variable, showing the singularity at the point of discontinuous
2592velocity boundary conditions.
2593
2594
2595
2596<a name="step_22-Sparsitypattern"></a><h3>Sparsity pattern</h3>
2597
2598
2599As explained during the generation of the sparsity pattern, it is
2600important to have the numbering of degrees of freedom in mind when
2601using preconditioners like incomplete LU decompositions. This is most
2602conveniently visualized using the distribution of nonzero elements in
2603the @ref GlossStiffnessMatrix "stiffness matrix".
2604
2605If we don't do anything special to renumber degrees of freedom (i.e.,
2606without using DoFRenumbering::Cuthill_McKee, but with using
2607DoFRenumbering::component_wise to ensure that degrees of freedom are
2608appropriately sorted into their corresponding blocks of the matrix and
2609vector), then we get the following image after the first adaptive
2610refinement in two dimensions:
2611
2612<img src="https://www.dealii.org/images/steps/developer/step-22.2d.sparsity-nor.png" alt="">
2613
2614In order to generate such a graph, you have to insert a piece of
2615code like the following to the end of the setup step.
2616@code
2617 {
2618 std::ofstream out ("sparsity_pattern.gpl");
2619 sparsity_pattern.print_gnuplot(out);
2620 }
2621@endcode
2622
2623It is clearly visible that the nonzero entries are spread over almost the
2624whole matrix. This makes preconditioning by ILU inefficient: ILU generates a
2625Gaussian elimination (LU decomposition) without fill-in elements, which means
2626that more tentative fill-ins left out will result in a worse approximation of
2627the complete decomposition.
2628
2629In this program, we have thus chosen a more advanced renumbering of
2630components. The renumbering with DoFRenumbering::Cuthill_McKee and grouping
2631the components into velocity and pressure yields the following output:
2632
2633<img src="https://www.dealii.org/images/steps/developer/step-22.2d.sparsity-ren.png" alt="">
2634
2635It is apparent that the situation has improved a lot. Most of the elements are
2636now concentrated around the diagonal in the (0,0) block in the matrix. Similar
2637effects are also visible for the other blocks. In this case, the ILU
2638decomposition will be much closer to the full LU decomposition, which improves
2639the quality of the preconditioner. (It may be interesting to note that the
2640sparse direct solver UMFPACK does some %internal renumbering of the equations
2641before actually generating a sparse LU decomposition; that procedure leads to
2642a very similar pattern to the one we got from the Cuthill-McKee algorithm.)
2643
2644Finally, we want to have a closer
2645look at a sparsity pattern in 3D. We show only the (0,0) block of the
2646matrix, again after one adaptive refinement. Apart from the fact that the matrix
2647size has increased, it is also visible that there are many more entries
2648in the matrix. Moreover, even for the optimized renumbering, there will be a
2649considerable amount of tentative fill-in elements. This illustrates why UMFPACK
2650is not a good choice in 3D - a full decomposition needs many new entries that
2651 eventually won't fit into the physical memory (RAM):
2652
2653<img src="https://www.dealii.org/images/steps/developer/step-22.3d.sparsity_uu-ren.png" alt="">
2654
2655
2656
2657<a name="step_22-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2658
2659
2660<a name="step-22-improved-solver">
2661<a name="step_22-Improvedlinearsolverin3D"></a><h4>Improved linear solver in 3D</h4>
2662
2663</a>
2664
2665We have seen in the section of computational results that the number of outer
2666iterations does not depend on the mesh size, which is optimal in a sense of
2667scalability. This does, however, not apply to the solver as a whole, as
2668mentioned above:
2669We did not look at the number of inner iterations when generating the inverse of
2670the matrix @f$A@f$ and the mass matrix @f$M_p@f$. Of course, this is unproblematic in
2671the 2D case where we precondition @f$A@f$ with a direct solver and the
2672<code>vmult</code> operation of the inverse matrix structure will converge in
2673one single CG step, but this changes in 3D where we only use an ILU
2674preconditioner. There, the number of required preconditioned CG steps to
2675invert @f$A@f$ increases as the mesh is refined, and each <code>vmult</code>
2676operation involves on average approximately 14, 23, 36, 59, 75 and 101 inner
2677CG iterations in the refinement steps shown above. (On the other hand,
2678the number of iterations for applying the inverse pressure mass matrix is
2679always around five, both in two and three dimensions.) To summarize, most work
2680is spent on solving linear systems with the same matrix @f$A@f$ over and over again.
2681What makes this look even worse is the fact that we
2682actually invert a matrix that is about 95 percent the size of the total system
2683matrix and stands for 85 percent of the non-zero entries in the sparsity
2684pattern. Hence, the natural question is whether it is reasonable to solve a
2685linear system with matrix @f$A@f$ for about 15 times when calculating the solution
2686to the block system.
2687
2688The answer is, of course, that we can do that in a few other (most of the time
2689better) ways.
2690Nevertheless, it has to be remarked that an indefinite system as the one
2691at hand puts indeed much higher
2692demands on the linear algebra than standard elliptic problems as we have seen
2693in the early tutorial programs. The improvements are still rather
2694unsatisfactory, if one compares with an elliptic problem of similar
2695size. Either way, we will introduce below a number of improvements to the
2696linear solver, a discussion that we will re-consider again with additional
2697options in the @ref step_31 "step-31" program.
2698
2699<a name="step-22-improved-ilu">
2700<a name="step_22-BetterILUdecompositionbysmartreordering"></a><h5>Better ILU decomposition by smart reordering</h5>
2701
2702</a>
2703A first attempt to improve the speed of the linear solution process is to choose
2704a dof reordering that makes the ILU being closer to a full LU decomposition, as
2705already mentioned in the in-code comments. The DoFRenumbering namespace compares
2706several choices for the renumbering of dofs for the Stokes equations. The best
2707result regarding the computing time was found for the King ordering, which is
2708accessed through the call DoFRenumbering::boost::king_ordering. With that
2709program, the inner solver needs considerably less operations, e.g. about 62
2710inner CG iterations for the inversion of @f$A@f$ at cycle 4 compared to about 75
2711iterations with the standard Cuthill-McKee-algorithm. Also, the computing time
2712at cycle 4 decreased from about 17 to 11 minutes for the <code>solve()</code>
2713call. However, the King ordering (and the orderings provided by the
2714DoFRenumbering::boost namespace in general) has a serious drawback - it uses
2715much more memory than the in-build deal versions, since it acts on abstract
2716graphs rather than the geometry provided by the triangulation. In the present
2717case, the renumbering takes about 5 times as much memory, which yields an
2718infeasible algorithm for the last cycle in 3D with 1.2 million
2719unknowns.
2720
2721<a name="step_22-BetterpreconditionerfortheinnerCGsolver"></a><h5>Better preconditioner for the inner CG solver</h5>
2722
2723Another idea to improve the situation even more would be to choose a
2724preconditioner that makes CG for the (0,0) matrix @f$A@f$ converge in a
2725mesh-independent number of iterations, say 10 to 30. We have seen such a
2726candidate in @ref step_16 "step-16": multigrid.
2727
2728<a name="step_22-BlockSchurcomplementpreconditioner"></a><h5>Block Schur complement preconditioner</h5>
2729
2730<a name="step-22-block-schur"></a>
2731Even with a good preconditioner for @f$A@f$, we still
2732need to solve of the same linear system repeatedly (with different
2733right hand sides, though) in order to make the Schur complement solve
2734converge. The approach we are going to discuss here is how inner iteration
2735and outer iteration can be combined. If we persist in calculating the Schur
2736complement, there is no other possibility.
2737
2738The alternative is to attack the block system at once and use an approximate
2739Schur complement as efficient preconditioner. The idea is as
2740follows: If we find a block preconditioner @f$P@f$ such that the matrix
2741@f{eqnarray*}{
2742 P^{-1}\left(\begin{array}{cc}
2743 A & B^T \\ B & 0
2744 \end{array}\right)
2745@f}
2746is simple, then an iterative solver with that preconditioner will converge in a
2747few iterations. Using the Schur complement @f$S = B A^{-1} B^T@f$, one finds that
2748@f{eqnarray*}{
2749 P^{-1}
2750 =
2751 \left(\begin{array}{cc}
2752 A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1}
2753 \end{array}\right)
2754@f}
2755would appear to be a good choice since
2756@f{eqnarray*}{
2757 P^{-1}\left(\begin{array}{cc}
2758 A & B^T \\ B & 0
2759 \end{array}\right)
2760 =
2761 \left(\begin{array}{cc}
2762 A^{-1} & 0 \\ S^{-1} B A^{-1} & -S^{-1}
2763 \end{array}\right)\cdot \left(\begin{array}{cc}
2764 A & B^T \\ B & 0
2765 \end{array}\right)
2766 =
2767 \left(\begin{array}{cc}
2768 I & A^{-1} B^T \\ 0 & I
2769 \end{array}\right).
2770@f}
2771This is the approach taken by the paper by Silvester and Wathen referenced
2772to in the introduction (with the exception that Silvester and Wathen use
2773right preconditioning). In this case, a Krylov-based iterative method would
2774converge in one step only if exact inverses of @f$A@f$ and @f$S@f$ were applied,
2775since all the eigenvalues are one (and the number of iterations in such a
2776method is bounded by the number of distinct eigenvalues). Below, we will
2777discuss the choice of an adequate solver for this problem. First, we are
2778going to have a closer look at the implementation of the preconditioner.
2779
2780Since @f$P@f$ is aimed to be a preconditioner only, we shall use approximations to
2781the inverse of the Schur complement @f$S@f$ and the matrix @f$A@f$. Hence, the Schur
2782complement will be approximated by the pressure mass matrix @f$M_p@f$, and we use
2783a preconditioner to @f$A@f$ (without an InverseMatrix class around it) for
2784approximating @f$A^{-1}@f$.
2785
2786Here comes the class that implements the block Schur
2787complement preconditioner. The <code>vmult</code> operation for block vectors
2788according to the derivation above can be specified by three successive
2789operations:
2790@code
2791template <class PreconditionerA, class PreconditionerMp>
2792class BlockSchurPreconditioner : public EnableObserverPointer
2793{
2794 public:
2795 BlockSchurPreconditioner (const BlockSparseMatrix<double> &S,
2796 const InverseMatrix<SparseMatrix<double>,PreconditionerMp> &Mpinv,
2797 const PreconditionerA &Apreconditioner);
2798
2799 void vmult (BlockVector<double> &dst,
2800 const BlockVector<double> &src) const;
2801
2802 private:
2805 PreconditionerMp > > m_inverse;
2806 const PreconditionerA &a_preconditioner;
2807
2808 mutable Vector<double> tmp;
2809
2810};
2811
2812template <class PreconditionerA, class PreconditionerMp>
2813BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::BlockSchurPreconditioner(
2815 const InverseMatrix<SparseMatrix<double>,PreconditionerMp> &Mpinv,
2816 const PreconditionerA &Apreconditioner
2817 )
2818 :
2819 system_matrix (&S),
2820 m_inverse (&Mpinv),
2821 a_preconditioner (Apreconditioner),
2822 tmp (S.block(1,1).m())
2823{}
2824
2825 // Now the interesting function, the multiplication of
2826 // the preconditioner with a BlockVector.
2827template <class PreconditionerA, class PreconditionerMp>
2828void BlockSchurPreconditioner<PreconditionerA, PreconditionerMp>::vmult (
2830 const BlockVector<double> &src) const
2831{
2832 // Form u_new = A^{-1} u
2833 a_preconditioner.vmult (dst.block(0), src.block(0));
2834 // Form tmp = - B u_new + p
2835 // (<code>SparseMatrix::residual</code>
2836 // does precisely this)
2837 system_matrix->block(1,0).residual(tmp, dst.block(0), src.block(1));
2838 // Change sign in tmp
2839 tmp *= -1;
2840 // Multiply by approximate Schur complement
2841 // (i.e. a pressure mass matrix)
2842 m_inverse->vmult (dst.block(1), tmp);
2843}
2844@endcode
2845
2846Since we act on the whole block system now, we have to live with one
2847disadvantage: we need to perform the solver iterations on
2848the full block system instead of the smaller pressure space.
2849
2850Now we turn to the question which solver we should use for the block
2851system. The first observation is that the resulting preconditioned matrix cannot
2852be solved with CG since it is neither positive definite nor symmetric.
2853
2854The deal.II libraries implement several solvers that are appropriate for the
2855problem at hand. One choice is the solver @ref SolverBicgstab "BiCGStab", which
2856was used for the solution of the unsymmetric advection problem in @ref step_9 "step-9". The
2857second option, the one we are going to choose, is @ref SolverGMRES "GMRES"
2858(generalized minimum residual). Both methods have their pros and cons - there
2859are problems where one of the two candidates clearly outperforms the other, and
2860vice versa.
2861<a href="http://en.wikipedia.org/wiki/GMRES#Comparison_with_other_solvers">Wikipedia</a>'s
2862article on the GMRES method gives a comparative presentation.
2863A more comprehensive and well-founded comparison can be read e.g. in the book by
2864J.W. Demmel (Applied Numerical Linear Algebra, SIAM, 1997, section 6.6.6).
2865
2866For our specific problem with the ILU preconditioner for @f$A@f$, we certainly need
2867to perform hundreds of iterations on the block system for large problem sizes
2868(we won't beat CG!). Actually, this disfavors GMRES: During the GMRES
2869iterations, a basis of Krylov vectors is successively built up and some
2870operations are performed on these vectors. The more vectors are in this basis,
2871the more operations and memory will be needed. The number of operations scales
2872as @f${\cal O}(n + k^2)@f$ and memory as @f${\cal O}(kn)@f$, where @f$k@f$ is the number of
2873vectors in the Krylov basis and @f$n@f$ the size of the (block) matrix.
2874To not let these demands grow excessively, deal.II limits the size @f$k@f$ of the
2875basis to 30 vectors by default.
2876Then, the basis is rebuilt. This implementation of the GMRES method is called
2877GMRES(k), with default @f$k=30@f$. What we have gained by this restriction,
2878namely a bound on operations and memory requirements, will be compensated by
2879the fact that we use an incomplete basis - this will increase the number of
2880required iterations.
2881
2882BiCGStab, on the other hand, won't get slower when many iterations are needed
2883(one iteration uses only results from one preceding step and
2884not all the steps as GMRES). Besides the fact the BiCGStab is more expensive per
2885step since two matrix-vector products are needed (compared to one for
2886CG or GMRES), there is one main reason which makes BiCGStab not appropriate for
2887this problem: The preconditioner applies the inverse of the pressure
2888mass matrix by using the InverseMatrix class. Since the application of the
2889inverse matrix to a vector is done only in approximative way (an exact inverse
2890is too expensive), this will also affect the solver. In the case of BiCGStab,
2891the Krylov vectors will not be orthogonal due to that perturbation. While
2892this is uncritical for a small number of steps (up to about 50), it ruins the
2893performance of the solver when these perturbations have grown to a significant
2894magnitude in the coarse of iterations.
2895
2896We did some experiments with BiCGStab and found it to
2897be faster than GMRES up to refinement cycle 3 (in 3D), but it became very slow
2898for cycles 4 and 5 (even slower than the original Schur complement), so the
2899solver is useless in this situation. Choosing a sharper tolerance for the
2900inverse matrix class (<code>1e-10*src.l2_norm()</code> instead of
2901<code>1e-6*src.l2_norm()</code>) made BiCGStab perform well also for cycle 4,
2902but did not change the failure on the very large problems.
2903
2904GMRES is of course also effected by the approximate inverses, but it is not as
2905sensitive to orthogonality and retains a relatively good performance also for
2906large sizes, see the results below.
2907
2908With this said, we turn to the realization of the solver call with GMRES with
2909@f$k=100@f$ temporary vectors:
2910
2911@code
2912 const SparseMatrix<double> &pressure_mass_matrix
2913 = preconditioner_matrix.block(1,1);
2914 SparseILU<double> pmass_preconditioner;
2915 pmass_preconditioner.initialize (pressure_mass_matrix,
2916 SparseILU<double>::AdditionalData());
2917
2918 InverseMatrix<SparseMatrix<double>,SparseILU<double> >
2919 m_inverse (pressure_mass_matrix, pmass_preconditioner);
2920
2921 BlockSchurPreconditioner<typename InnerPreconditioner<dim>::type,
2922 SparseILU<double> >
2923 preconditioner (system_matrix, m_inverse, *A_preconditioner);
2924
2925 SolverControl solver_control (system_matrix.m(),
2926 1e-6*system_rhs.l2_norm());
2927 GrowingVectorMemory<BlockVector<double> > vector_memory;
2928 SolverGMRES<BlockVector<double> >::AdditionalData gmres_data;
2929 gmres_data.max_basis_size = 100;
2930
2931 SolverGMRES<BlockVector<double> > gmres(solver_control, vector_memory,
2932 gmres_data);
2933
2934 gmres.solve(system_matrix, solution, system_rhs,
2935 preconditioner);
2936
2937 constraints.distribute (solution);
2938
2939 std::cout << " "
2940 << solver_control.last_step()
2941 << " block GMRES iterations";
2942@endcode
2943
2944Obviously, one needs to add the include file @ref SolverGMRES
2945"<lac/solver_gmres.h>" in order to make this run.
2946We call the solver with a BlockVector template in order to enable
2947GMRES to operate on block vectors and matrices.
2948Note also that we need to set the (1,1) block in the system
2949matrix to zero (we saved the pressure mass matrix there which is not part of the
2950problem) after we copied the information to another matrix.
2951
2952Using the Timer class, we collect some statistics that compare the runtime
2953of the block solver with the one from the problem implementation above.
2954Besides the solution with the two options we also check if the solutions
2955of the two variants are close to each other (i.e. this solver gives indeed the
2956same solution as we had before) and calculate the infinity
2957norm of the vector difference.
2958
2959Let's first see the results in 2D:
2960@code
2961Refinement cycle 0
2962 Number of active cells: 64
2963 Number of degrees of freedom: 679 (594+85) [0.00162792 s]
2964 Assembling... [0.00108981 s]
2965 Computing preconditioner... [0.0025959 s]
2966 Solving...
2967 Schur complement: 11 outer CG iterations for p [0.00479603s ]
2968 Block Schur preconditioner: 12 GMRES iterations [0.00441718 s]
2969 l_infinity difference between solution vectors: 5.38258e-07
2970
2971Refinement cycle 1
2972 Number of active cells: 160
2973 Number of degrees of freedom: 1683 (1482+201) [0.00345707 s]
2974 Assembling... [0.00237417 s]
2975 Computing preconditioner... [0.00605702 s]
2976 Solving...
2977 Schur complement: 11 outer CG iterations for p [0.0123992s ]
2978 Block Schur preconditioner: 12 GMRES iterations [0.011909 s]
2979 l_infinity difference between solution vectors: 1.74658e-05
2980
2981Refinement cycle 2
2982 Number of active cells: 376
2983 Number of degrees of freedom: 3813 (3370+443) [0.00729299 s]
2984 Assembling... [0.00529909 s]
2985 Computing preconditioner... [0.0167508 s]
2986 Solving...
2987 Schur complement: 11 outer CG iterations for p [0.031672s ]
2988 Block Schur preconditioner: 12 GMRES iterations [0.029232 s]
2989 l_infinity difference between solution vectors: 7.81569e-06
2990
2991Refinement cycle 3
2992 Number of active cells: 880
2993 Number of degrees of freedom: 8723 (7722+1001) [0.017709 s]
2994 Assembling... [0.0126002 s]
2995 Computing preconditioner... [0.0435679 s]
2996 Solving...
2997 Schur complement: 11 outer CG iterations for p [0.0971651s ]
2998 Block Schur preconditioner: 12 GMRES iterations [0.0992041 s]
2999 l_infinity difference between solution vectors: 1.87249e-05
3000
3001Refinement cycle 4
3002 Number of active cells: 2008
3003 Number of degrees of freedom: 19383 (17186+2197) [0.039988 s]
3004 Assembling... [0.028281 s]
3005 Computing preconditioner... [0.118314 s]
3006 Solving...
3007 Schur complement: 11 outer CG iterations for p [0.252133s ]
3008 Block Schur preconditioner: 13 GMRES iterations [0.269125 s]
3009 l_infinity difference between solution vectors: 6.38657e-05
3010
3011Refinement cycle 5
3012 Number of active cells: 4288
3013 Number of degrees of freedom: 40855 (36250+4605) [0.0880702 s]
3014 Assembling... [0.0603511 s]
3015 Computing preconditioner... [0.278339 s]
3016 Solving...
3017 Schur complement: 11 outer CG iterations for p [0.53846s ]
3018 Block Schur preconditioner: 13 GMRES iterations [0.578667 s]
3019 l_infinity difference between solution vectors: 0.000173363
3020@endcode
3021
3022We see that there is no huge difference in the solution time between the
3023block Schur complement preconditioner solver and the Schur complement
3024itself. The reason is simple: we used a direct solve as preconditioner for
3025@f$A@f$ - so we cannot expect any gain by avoiding the inner iterations. We see
3026that the number of iterations has slightly increased for GMRES, but all in
3027all the two choices are fairly similar.
3028
3029The picture of course changes in 3D:
3030
3031@code
3032Refinement cycle 0
3033 Number of active cells: 32
3034 Number of degrees of freedom: 1356 (1275+81) [0.00845218 s]
3035 Assembling... [0.019372 s]
3036 Computing preconditioner... [0.00712395 s]
3037 Solving...
3038 Schur complement: 13 outer CG iterations for p [0.0320101s ]
3039 Block Schur preconditioner: 22 GMRES iterations [0.0048759 s]
3040 l_infinity difference between solution vectors: 2.15942e-05
3041
3042Refinement cycle 1
3043 Number of active cells: 144
3044 Number of degrees of freedom: 5088 (4827+261) [0.0346942 s]
3045 Assembling... [0.0857739 s]
3046 Computing preconditioner... [0.0465031 s]
3047 Solving...
3048 Schur complement: 14 outer CG iterations for p [0.349258s ]
3049 Block Schur preconditioner: 35 GMRES iterations [0.048759 s]
3050 l_infinity difference between solution vectors: 1.77657e-05
3051
3052Refinement cycle 2
3053 Number of active cells: 704
3054 Number of degrees of freedom: 22406 (21351+1055) [0.175669 s]
3055 Assembling... [0.437447 s]
3056 Computing preconditioner... [0.286435 s]
3057 Solving...
3058 Schur complement: 14 outer CG iterations for p [3.65519s ]
3059 Block Schur preconditioner: 63 GMRES iterations [0.497787 s]
3060 l_infinity difference between solution vectors: 5.08078e-05
3061
3062Refinement cycle 3
3063 Number of active cells: 3168
3064 Number of degrees of freedom: 93176 (89043+4133) [0.790985 s]
3065 Assembling... [1.97598 s]
3066 Computing preconditioner... [1.4325 s]
3067 Solving...
3068 Schur complement: 15 outer CG iterations for p [29.9666s ]
3069 Block Schur preconditioner: 128 GMRES iterations [5.02645 s]
3070 l_infinity difference between solution vectors: 0.000119671
3071
3072Refinement cycle 4
3073 Number of active cells: 11456
3074 Number of degrees of freedom: 327808 (313659+14149) [3.44995 s]
3075 Assembling... [7.54772 s]
3076 Computing preconditioner... [5.46306 s]
3077 Solving...
3078 Schur complement: 15 outer CG iterations for p [139.987s ]
3079 Block Schur preconditioner: 255 GMRES iterations [38.0946 s]
3080 l_infinity difference between solution vectors: 0.00020793
3081
3082Refinement cycle 5
3083 Number of active cells: 45056
3084 Number of degrees of freedom: 1254464 (1201371+53093) [19.6795 s]
3085 Assembling... [28.6586 s]
3086 Computing preconditioner... [22.401 s]
3087 Solving...
3088 Schur complement: 14 outer CG iterations for p [796.767s ]
3089 Block Schur preconditioner: 524 GMRES iterations [355.597 s]
3090 l_infinity difference between solution vectors: 0.000501219
3091@endcode
3092
3093Here, the block preconditioned solver is clearly superior to the Schur
3094complement, but the advantage gets less for more mesh points. This is
3095because GMRES(k) scales worse with the problem size than CG, as we discussed
3096above. Nonetheless, the improvement by a factor of 3-6 for moderate problem
3097sizes is quite impressive.
3098
3099
3100<a name="step_22-Combiningtheblockpreconditionerandmultigrid"></a><h5>Combining the block preconditioner and multigrid</h5>
3101
3102An ultimate linear solver for this problem could be imagined as a
3103combination of an optimal
3104preconditioner for @f$A@f$ (e.g. multigrid) and the block preconditioner
3105described above, which is the approach taken in the @ref step_31 "step-31"
3106and @ref step_32 "step-32" tutorial programs (where we use an algebraic multigrid
3107method) and @ref step_56 "step-56" (where we use a geometric multigrid method).
3108
3109
3110<a name="step_22-Noblockmatricesandvectors"></a><h5>No block matrices and vectors</h5>
3111
3112Another possibility that can be taken into account is to not set up a block
3113system, but rather solve the system of velocity and pressure all at once. The
3114options are direct solve with UMFPACK (2D) or GMRES with ILU
3115preconditioning (3D). It should be straightforward to try that.
3116
3117
3118
3119<a name="step_22-Moreinterestingtestcases"></a><h4>More interesting testcases</h4>
3120
3121
3122The program can of course also serve as a basis to compute the flow in more
3123interesting cases. The original motivation to write this program was for it to
3124be a starting point for some geophysical flow problems, such as the
3125movement of magma under places where continental plates drift apart (for
3126example mid-ocean ridges). Of course, in such places, the geometry is more
3127complicated than the examples shown above, but it is not hard to accommodate
3128for that.
3129
3130For example, by using the following modification of the boundary values
3131function
3132@code
3133template <int dim>
3134double
3135BoundaryValues<dim>::value (const Point<dim> &p,
3136 const unsigned int component) const
3137{
3138 Assert (component < this->n_components,
3139 ExcIndexRange (component, 0, this->n_components));
3140
3141 const double x_offset = std::atan(p[1]*4)/3;
3142
3143 if (component == 0)
3144 return (p[0] < x_offset ? -1 : (p[0] > x_offset ? 1 : 0));
3145 return 0;
3146}
3147@endcode
3148and the following way to generate the mesh as the domain
3149@f$[-2,2]\times[-2,2]\times[-1,0]@f$
3150@code
3151 std::vector<unsigned int> subdivisions (dim, 1);
3152 subdivisions[0] = 4;
3153 if (dim>2)
3154 subdivisions[1] = 4;
3155
3156 const Point<dim> bottom_left = (dim == 2 ?
3157 Point<dim>(-2,-1) :
3158 Point<dim>(-2,-2,-1));
3159 const Point<dim> top_right = (dim == 2 ?
3160 Point<dim>(2,0) :
3161 Point<dim>(2,2,0));
3162
3164 subdivisions,
3165 bottom_left,
3166 top_right);
3167@endcode
3168then we get images where the fault line is curved:
3169<table width="60%" align="center">
3170 <tr>
3171 <td align="center">
3172 <img src="https://www.dealii.org/images/steps/developer/step-22.3d-extension.png" alt="">
3173 </td>
3174 <td align="center">
3175 <img src="https://www.dealii.org/images/steps/developer/step-22.3d-grid-extension.png" alt="">
3176 </td>
3177 </tr>
3178</table>
3179 *
3180 *
3181<a name="step_22-PlainProg"></a>
3182<h1> The plain program</h1>
3183@include "step-22.cc"
3184*/
BlockType & block(const unsigned int i)
Definition point.h:111
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
#define Assert(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
std::vector< index_type > data
Definition mpi.cc:735
std::size_t size
Definition mpi.cc:734
const Event initial
Definition event.cc:64
void component_wise(DoFHandler< dim, spacedim > &dof_handler, const std::vector< unsigned int > &target_component=std::vector< unsigned int >())
void Cuthill_McKee(DoFHandler< dim, spacedim > &dof_handler, const bool reversed_numbering=false, const bool use_constraints=false, const std::vector< types::global_dof_index > &starting_indices=std::vector< types::global_dof_index >())
std::vector< types::global_dof_index > count_dofs_per_fe_block(const DoFHandler< dim, spacedim > &dof, const std::vector< unsigned int > &target_block=std::vector< unsigned int >())
std::vector< types::global_dof_index > count_dofs_per_fe_component(const DoFHandler< dim, spacedim > &dof_handler, const bool vector_valued_once=false, const std::vector< unsigned int > &target_component={})
void subdivided_hyper_rectangle(Triangulation< dim, spacedim > &tria, const std::vector< unsigned int > &repetitions, const Point< dim > &p1, const Point< dim > &p2, const bool colorize=false)
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
VectorType::value_type * end(VectorType &V)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
inline ::VectorizedArray< Number, width > atan(const ::VectorizedArray< Number, width > &x)
Definition types.h:32
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)
std::array< Number, 1 > eigenvalues(const SymmetricTensor< 2, 1, Number > &T)