Loading [MathJax]/extensions/TeX/AMSsymbols.js
 Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
step-32.h
Go to the documentation of this file.
1
1514 * constexpr double kappa = 1e-6; /* m^2 / s */
1515 * constexpr double reference_density = 3300; /* kg / m^3 */
1516 * constexpr double reference_temperature = 293; /* K */
1517 * constexpr double expansion_coefficient = 2e-5; /* 1/K */
1518 * constexpr double specific_heat = 1250; /* J / K / kg */
1519 * constexpr double radiogenic_heating = 7.4e-12; /* W / kg */
1520 *
1521 *
1522 * constexpr double R0 = 6371000. - 2890000.; /* m */
1523 * constexpr double R1 = 6371000. - 35000.; /* m */
1524 *
1525 * constexpr double T0 = 4000 + 273; /* K */
1526 * constexpr double T1 = 700 + 273; /* K */
1527 *
1528 *
1529 * @endcode
1530 *
1531 * The next set of definitions are for functions that encode the density
1532 * as a function of temperature, the gravity vector, and the initial
1533 * values for the temperature. Again, all of these (along with the values
1534 * they compute) are discussed in the introduction:
1535 *
1536 * @code
1537 * double density(const double temperature)
1538 * {
1539 * return (
1540 * reference_density *
1541 * (1 - expansion_coefficient * (temperature - reference_temperature)));
1542 * }
1543 *
1544 *
1545 * template <int dim>
1546 * Tensor<1, dim> gravity_vector(const Point<dim> &p)
1547 * {
1548 * const double r = p.norm();
1549 * return -(1.245e-6 * r + 7.714e13 / r / r) * p / r;
1550 * }
1551 *
1552 *
1553 *
1554 * template <int dim>
1555 * class TemperatureInitialValues : public Function<dim>
1556 * {
1557 * public:
1558 * TemperatureInitialValues()
1559 * : Function<dim>(1)
1560 * {}
1561 *
1562 * virtual double value(const Point<dim> & p,
1563 * const unsigned int component = 0) const override;
1564 *
1565 * virtual void vector_value(const Point<dim> &p,
1566 * Vector<double> & value) const override;
1567 * };
1568 *
1569 *
1570 *
1571 * template <int dim>
1572 * double TemperatureInitialValues<dim>::value(const Point<dim> &p,
1573 * const unsigned int) const
1574 * {
1575 * const double r = p.norm();
1576 * const double h = R1 - R0;
1577 *
1578 * const double s = (r - R0) / h;
1579 * const double q =
1580 * (dim == 3) ? std::max(0.0, cos(numbers::PI * abs(p(2) / R1))) : 1.0;
1581 * const double phi = std::atan2(p(0), p(1));
1582 * const double tau = s + 0.2 * s * (1 - s) * std::sin(6 * phi) * q;
1583 *
1584 * return T0 * (1.0 - tau) + T1 * tau;
1585 * }
1586 *
1587 *
1588 * template <int dim>
1589 * void
1590 * TemperatureInitialValues<dim>::vector_value(const Point<dim> &p,
1591 * Vector<double> & values) const
1592 * {
1593 * for (unsigned int c = 0; c < this->n_components; ++c)
1594 * values(c) = TemperatureInitialValues<dim>::value(p, c);
1595 * }
1596 *
1597 *
1598 * @endcode
1599 *
1600 * As mentioned in the introduction we need to rescale the pressure to
1601 * avoid the relative ill-conditioning of the momentum and mass
1602 * conservation equations. The scaling factor is @f$\frac{\eta}{L}@f$ where
1603 * @f$L@f$ was a typical length scale. By experimenting it turns out that a
1604 * good length scale is the diameter of plumes, which is around 10 km:
1605 *
1606 * @code
1607 * constexpr double pressure_scaling = eta / 10000;
1608 *
1609 * @endcode
1610 *
1611 * The final number in this namespace is a constant that denotes the
1612 * number of seconds per (average, tropical) year. We use this only when
1613 * generating screen output: internally, all computations of this program
1614 * happen in SI units (kilogram, meter, seconds) but writing geological
1615 * times in seconds yields numbers that one can't relate to reality, and
1616 * so we convert to years using the factor defined here:
1617 *
1618 * @code
1619 * const double year_in_seconds = 60 * 60 * 24 * 365.2425;
1620 *
1621 * } // namespace EquationData
1622 *
1623 *
1624 *
1625 * @endcode
1626 *
1627 *
1628 * <a name="PreconditioningtheStokessystem"></a>
1629 * <h3>Preconditioning the Stokes system</h3>
1630 *
1631
1632 *
1633 * This namespace implements the preconditioner. As discussed in the
1634 * introduction, this preconditioner differs in a number of key portions
1635 * from the one used in @ref step_31 "step-31". Specifically, it is a right preconditioner,
1636 * implementing the matrix
1637 * @f{align*}
1638 * \left(\begin{array}{cc}A^{-1} & B^T
1639 * \\0 & S^{-1}
1640 * \end{array}\right)
1641 * @f}
1642 * where the two inverse matrix operations
1643 * are approximated by linear solvers or, if the right flag is given to the
1644 * constructor of this class, by a single AMG V-cycle for the velocity
1645 * block. The three code blocks of the <code>vmult</code> function implement
1646 * the multiplications with the three blocks of this preconditioner matrix
1647 * and should be self explanatory if you have read through @ref step_31 "step-31" or the
1648 * discussion of composing solvers in @ref step_20 "step-20".
1649 *
1650 * @code
1651 * namespace LinearSolvers
1652 * {
1653 * template <class PreconditionerTypeA, class PreconditionerTypeMp>
1654 * class BlockSchurPreconditioner : public Subscriptor
1655 * {
1656 * public:
1657 * BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
1658 * const TrilinosWrappers::BlockSparseMatrix &Spre,
1659 * const PreconditionerTypeMp &Mppreconditioner,
1660 * const PreconditionerTypeA & Apreconditioner,
1661 * const bool do_solve_A)
1662 * : stokes_matrix(&S)
1663 * , stokes_preconditioner_matrix(&Spre)
1664 * , mp_preconditioner(Mppreconditioner)
1665 * , a_preconditioner(Apreconditioner)
1666 * , do_solve_A(do_solve_A)
1667 * {}
1668 *
1669 * void vmult(TrilinosWrappers::MPI::BlockVector & dst,
1670 * const TrilinosWrappers::MPI::BlockVector &src) const
1671 * {
1672 * TrilinosWrappers::MPI::Vector utmp(src.block(0));
1673 *
1674 * {
1675 * SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
1676 *
1677 * SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
1678 *
1679 * solver.solve(stokes_preconditioner_matrix->block(1, 1),
1680 * dst.block(1),
1681 * src.block(1),
1682 * mp_preconditioner);
1683 *
1684 * dst.block(1) *= -1.0;
1685 * }
1686 *
1687 * {
1688 * stokes_matrix->block(0, 1).vmult(utmp, dst.block(1));
1689 * utmp *= -1.0;
1690 * utmp.add(src.block(0));
1691 * }
1692 *
1693 * if (do_solve_A == true)
1694 * {
1695 * SolverControl solver_control(5000, utmp.l2_norm() * 1e-2);
1696 * TrilinosWrappers::SolverCG solver(solver_control);
1697 * solver.solve(stokes_matrix->block(0, 0),
1698 * dst.block(0),
1699 * utmp,
1700 * a_preconditioner);
1701 * }
1702 * else
1703 * a_preconditioner.vmult(dst.block(0), utmp);
1704 * }
1705 *
1706 * private:
1707 * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1708 * stokes_matrix;
1709 * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1710 * stokes_preconditioner_matrix;
1711 * const PreconditionerTypeMp &mp_preconditioner;
1712 * const PreconditionerTypeA & a_preconditioner;
1713 * const bool do_solve_A;
1714 * };
1715 * } // namespace LinearSolvers
1716 *
1717 *
1718 *
1719 * @endcode
1720 *
1721 *
1722 * <a name="Definitionofassemblydatastructures"></a>
1723 * <h3>Definition of assembly data structures</h3>
1724 *
1725
1726 *
1727 * As described in the introduction, we will use the WorkStream mechanism
1728 * discussed in the @ref threads module to parallelize operations among the
1729 * processors of a single machine. The WorkStream class requires that data
1730 * is passed around in two kinds of data structures, one for scratch data
1731 * and one to pass data from the assembly function to the function that
1732 * copies local contributions into global objects.
1733 *
1734
1735 *
1736 * The following namespace (and the two sub-namespaces) contains a
1737 * collection of data structures that serve this purpose, one pair for each
1738 * of the four operations discussed in the introduction that we will want to
1739 * parallelize. Each assembly routine gets two sets of data: a Scratch array
1740 * that collects all the classes and arrays that are used for the
1741 * calculation of the cell contribution, and a CopyData array that keeps
1742 * local matrices and vectors which will be written into the global
1743 * matrix. Whereas CopyData is a container for the final data that is
1744 * written into the global matrices and vector (and, thus, absolutely
1745 * necessary), the Scratch arrays are merely there for performance reasons
1746 * &mdash; it would be much more expensive to set up a FEValues object on
1747 * each cell, than creating it only once and updating some derivative data.
1748 *
1749
1750 *
1751 * @ref step_31 "step-31" had four assembly routines: One for the preconditioner matrix of
1752 * the Stokes system, one for the Stokes matrix and right hand side, one for
1753 * the temperature matrices and one for the right hand side of the
1754 * temperature equation. We here organize the scratch arrays and CopyData
1755 * objects for each of those four assembly components using a
1756 * <code>struct</code> environment (since we consider these as temporary
1757 * objects we pass around, rather than classes that implement functionality
1758 * of their own, though this is a more subjective point of view to
1759 * distinguish between <code>struct</code>s and <code>class</code>es).
1760 *
1761
1762 *
1763 * Regarding the Scratch objects, each struct is equipped with a constructor
1764 * that creates an @ref FEValues object using the @ref FiniteElement,
1765 * Quadrature, @ref Mapping (which describes the interpolation of curved
1766 * boundaries), and @ref UpdateFlags instances. Moreover, we manually
1767 * implement a copy constructor (since the FEValues class is not copyable by
1768 * itself), and provide some additional vector fields that are used to hold
1769 * intermediate data during the computation of local contributions.
1770 *
1771
1772 *
1773 * Let us start with the scratch arrays and, specifically, the one used for
1774 * assembly of the Stokes preconditioner:
1775 *
1776 * @code
1777 * namespace Assembly
1778 * {
1779 * namespace Scratch
1780 * {
1781 * template <int dim>
1782 * struct StokesPreconditioner
1783 * {
1784 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe,
1785 * const Quadrature<dim> & stokes_quadrature,
1786 * const Mapping<dim> & mapping,
1787 * const UpdateFlags update_flags);
1788 *
1789 * StokesPreconditioner(const StokesPreconditioner &data);
1790 *
1791 *
1792 * FEValues<dim> stokes_fe_values;
1793 *
1794 * std::vector<Tensor<2, dim>> grad_phi_u;
1795 * std::vector<double> phi_p;
1796 * };
1797 *
1798 * template <int dim>
1799 * StokesPreconditioner<dim>::StokesPreconditioner(
1800 * const FiniteElement<dim> &stokes_fe,
1801 * const Quadrature<dim> & stokes_quadrature,
1802 * const Mapping<dim> & mapping,
1803 * const UpdateFlags update_flags)
1804 * : stokes_fe_values(mapping, stokes_fe, stokes_quadrature, update_flags)
1805 * , grad_phi_u(stokes_fe.n_dofs_per_cell())
1806 * , phi_p(stokes_fe.n_dofs_per_cell())
1807 * {}
1808 *
1809 *
1810 *
1811 * template <int dim>
1812 * StokesPreconditioner<dim>::StokesPreconditioner(
1813 * const StokesPreconditioner &scratch)
1814 * : stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
1815 * scratch.stokes_fe_values.get_fe(),
1816 * scratch.stokes_fe_values.get_quadrature(),
1817 * scratch.stokes_fe_values.get_update_flags())
1818 * , grad_phi_u(scratch.grad_phi_u)
1819 * , phi_p(scratch.phi_p)
1820 * {}
1821 *
1822 *
1823 *
1824 * @endcode
1825 *
1826 * The next one is the scratch object used for the assembly of the full
1827 * Stokes system. Observe that we derive the StokesSystem scratch class
1828 * from the StokesPreconditioner class above. We do this because all the
1829 * objects that are necessary for the assembly of the preconditioner are
1830 * also needed for the actual matrix system and right hand side, plus
1831 * some extra data. This makes the program more compact. Note also that
1832 * the assembly of the Stokes system and the temperature right hand side
1833 * further down requires data from temperature and velocity,
1834 * respectively, so we actually need two FEValues objects for those two
1835 * cases.
1836 *
1837 * @code
1838 * template <int dim>
1839 * struct StokesSystem : public StokesPreconditioner<dim>
1840 * {
1841 * StokesSystem(const FiniteElement<dim> &stokes_fe,
1842 * const Mapping<dim> & mapping,
1843 * const Quadrature<dim> & stokes_quadrature,
1844 * const UpdateFlags stokes_update_flags,
1845 * const FiniteElement<dim> &temperature_fe,
1846 * const UpdateFlags temperature_update_flags);
1847 *
1848 * StokesSystem(const StokesSystem<dim> &data);
1849 *
1850 *
1851 * FEValues<dim> temperature_fe_values;
1852 *
1853 * std::vector<Tensor<1, dim>> phi_u;
1854 * std::vector<SymmetricTensor<2, dim>> grads_phi_u;
1855 * std::vector<double> div_phi_u;
1856 *
1857 * std::vector<double> old_temperature_values;
1858 * };
1859 *
1860 *
1861 * template <int dim>
1862 * StokesSystem<dim>::StokesSystem(
1863 * const FiniteElement<dim> &stokes_fe,
1864 * const Mapping<dim> & mapping,
1865 * const Quadrature<dim> & stokes_quadrature,
1866 * const UpdateFlags stokes_update_flags,
1867 * const FiniteElement<dim> &temperature_fe,
1868 * const UpdateFlags temperature_update_flags)
1869 * : StokesPreconditioner<dim>(stokes_fe,
1870 * stokes_quadrature,
1871 * mapping,
1872 * stokes_update_flags)
1873 * , temperature_fe_values(mapping,
1874 * temperature_fe,
1875 * stokes_quadrature,
1876 * temperature_update_flags)
1877 * , phi_u(stokes_fe.n_dofs_per_cell())
1878 * , grads_phi_u(stokes_fe.n_dofs_per_cell())
1879 * , div_phi_u(stokes_fe.n_dofs_per_cell())
1880 * , old_temperature_values(stokes_quadrature.size())
1881 * {}
1882 *
1883 *
1884 * template <int dim>
1885 * StokesSystem<dim>::StokesSystem(const StokesSystem<dim> &scratch)
1886 * : StokesPreconditioner<dim>(scratch)
1887 * , temperature_fe_values(
1888 * scratch.temperature_fe_values.get_mapping(),
1889 * scratch.temperature_fe_values.get_fe(),
1890 * scratch.temperature_fe_values.get_quadrature(),
1891 * scratch.temperature_fe_values.get_update_flags())
1892 * , phi_u(scratch.phi_u)
1893 * , grads_phi_u(scratch.grads_phi_u)
1894 * , div_phi_u(scratch.div_phi_u)
1895 * , old_temperature_values(scratch.old_temperature_values)
1896 * {}
1897 *
1898 *
1899 * @endcode
1900 *
1901 * After defining the objects used in the assembly of the Stokes system,
1902 * we do the same for the assembly of the matrices necessary for the
1903 * temperature system. The general structure is very similar:
1904 *
1905 * @code
1906 * template <int dim>
1907 * struct TemperatureMatrix
1908 * {
1909 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe,
1910 * const Mapping<dim> & mapping,
1911 * const Quadrature<dim> & temperature_quadrature);
1912 *
1913 * TemperatureMatrix(const TemperatureMatrix &data);
1914 *
1915 *
1916 * FEValues<dim> temperature_fe_values;
1917 *
1918 * std::vector<double> phi_T;
1919 * std::vector<Tensor<1, dim>> grad_phi_T;
1920 * };
1921 *
1922 *
1923 * template <int dim>
1924 * TemperatureMatrix<dim>::TemperatureMatrix(
1925 * const FiniteElement<dim> &temperature_fe,
1926 * const Mapping<dim> & mapping,
1927 * const Quadrature<dim> & temperature_quadrature)
1928 * : temperature_fe_values(mapping,
1929 * temperature_fe,
1930 * temperature_quadrature,
1931 * update_values | update_gradients |
1932 * update_JxW_values)
1933 * , phi_T(temperature_fe.n_dofs_per_cell())
1934 * , grad_phi_T(temperature_fe.n_dofs_per_cell())
1935 * {}
1936 *
1937 *
1938 * template <int dim>
1939 * TemperatureMatrix<dim>::TemperatureMatrix(
1940 * const TemperatureMatrix &scratch)
1941 * : temperature_fe_values(
1942 * scratch.temperature_fe_values.get_mapping(),
1943 * scratch.temperature_fe_values.get_fe(),
1944 * scratch.temperature_fe_values.get_quadrature(),
1945 * scratch.temperature_fe_values.get_update_flags())
1946 * , phi_T(scratch.phi_T)
1947 * , grad_phi_T(scratch.grad_phi_T)
1948 * {}
1949 *
1950 *
1951 * @endcode
1952 *
1953 * The final scratch object is used in the assembly of the right hand
1954 * side of the temperature system. This object is significantly larger
1955 * than the ones above because a lot more quantities enter the
1956 * computation of the right hand side of the temperature equation. In
1957 * particular, the temperature values and gradients of the previous two
1958 * time steps need to be evaluated at the quadrature points, as well as
1959 * the velocities and the strain rates (i.e. the symmetric gradients of
1960 * the velocity) that enter the right hand side as friction heating
1961 * terms. Despite the number of terms, the following should be rather
1962 * self explanatory:
1963 *
1964 * @code
1965 * template <int dim>
1966 * struct TemperatureRHS
1967 * {
1968 * TemperatureRHS(const FiniteElement<dim> &temperature_fe,
1969 * const FiniteElement<dim> &stokes_fe,
1970 * const Mapping<dim> & mapping,
1971 * const Quadrature<dim> & quadrature);
1972 *
1973 * TemperatureRHS(const TemperatureRHS &data);
1974 *
1975 *
1976 * FEValues<dim> temperature_fe_values;
1977 * FEValues<dim> stokes_fe_values;
1978 *
1979 * std::vector<double> phi_T;
1980 * std::vector<Tensor<1, dim>> grad_phi_T;
1981 *
1982 * std::vector<Tensor<1, dim>> old_velocity_values;
1983 * std::vector<Tensor<1, dim>> old_old_velocity_values;
1984 *
1985 * std::vector<SymmetricTensor<2, dim>> old_strain_rates;
1986 * std::vector<SymmetricTensor<2, dim>> old_old_strain_rates;
1987 *
1988 * std::vector<double> old_temperature_values;
1989 * std::vector<double> old_old_temperature_values;
1990 * std::vector<Tensor<1, dim>> old_temperature_grads;
1991 * std::vector<Tensor<1, dim>> old_old_temperature_grads;
1992 * std::vector<double> old_temperature_laplacians;
1993 * std::vector<double> old_old_temperature_laplacians;
1994 * };
1995 *
1996 *
1997 * template <int dim>
1998 * TemperatureRHS<dim>::TemperatureRHS(
1999 * const FiniteElement<dim> &temperature_fe,
2000 * const FiniteElement<dim> &stokes_fe,
2001 * const Mapping<dim> & mapping,
2002 * const Quadrature<dim> & quadrature)
2003 * : temperature_fe_values(mapping,
2004 * temperature_fe,
2005 * quadrature,
2006 * update_values | update_gradients |
2007 * update_hessians | update_quadrature_points |
2008 * update_JxW_values)
2009 * , stokes_fe_values(mapping,
2010 * stokes_fe,
2011 * quadrature,
2012 * update_values | update_gradients)
2013 * , phi_T(temperature_fe.n_dofs_per_cell())
2014 * , grad_phi_T(temperature_fe.n_dofs_per_cell())
2015 * ,
2016 *
2017 * old_velocity_values(quadrature.size())
2018 * , old_old_velocity_values(quadrature.size())
2019 * , old_strain_rates(quadrature.size())
2020 * , old_old_strain_rates(quadrature.size())
2021 * ,
2022 *
2023 * old_temperature_values(quadrature.size())
2024 * , old_old_temperature_values(quadrature.size())
2025 * , old_temperature_grads(quadrature.size())
2026 * , old_old_temperature_grads(quadrature.size())
2027 * , old_temperature_laplacians(quadrature.size())
2028 * , old_old_temperature_laplacians(quadrature.size())
2029 * {}
2030 *
2031 *
2032 * template <int dim>
2033 * TemperatureRHS<dim>::TemperatureRHS(const TemperatureRHS &scratch)
2034 * : temperature_fe_values(
2035 * scratch.temperature_fe_values.get_mapping(),
2036 * scratch.temperature_fe_values.get_fe(),
2037 * scratch.temperature_fe_values.get_quadrature(),
2038 * scratch.temperature_fe_values.get_update_flags())
2039 * , stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
2040 * scratch.stokes_fe_values.get_fe(),
2041 * scratch.stokes_fe_values.get_quadrature(),
2042 * scratch.stokes_fe_values.get_update_flags())
2043 * , phi_T(scratch.phi_T)
2044 * , grad_phi_T(scratch.grad_phi_T)
2045 * ,
2046 *
2047 * old_velocity_values(scratch.old_velocity_values)
2048 * , old_old_velocity_values(scratch.old_old_velocity_values)
2049 * , old_strain_rates(scratch.old_strain_rates)
2050 * , old_old_strain_rates(scratch.old_old_strain_rates)
2051 * ,
2052 *
2053 * old_temperature_values(scratch.old_temperature_values)
2054 * , old_old_temperature_values(scratch.old_old_temperature_values)
2055 * , old_temperature_grads(scratch.old_temperature_grads)
2056 * , old_old_temperature_grads(scratch.old_old_temperature_grads)
2057 * , old_temperature_laplacians(scratch.old_temperature_laplacians)
2058 * , old_old_temperature_laplacians(scratch.old_old_temperature_laplacians)
2059 * {}
2060 * } // namespace Scratch
2061 *
2062 *
2063 * @endcode
2064 *
2065 * The CopyData objects are even simpler than the Scratch objects as all
2066 * they have to do is to store the results of local computations until
2067 * they can be copied into the global matrix or vector objects. These
2068 * structures therefore only need to provide a constructor, a copy
2069 * operation, and some arrays for local matrix, local vectors and the
2070 * relation between local and global degrees of freedom (a.k.a.
2071 * <code>local_dof_indices</code>). Again, we have one such structure for
2072 * each of the four operations we will parallelize using the WorkStream
2073 * class:
2074 *
2075 * @code
2076 * namespace CopyData
2077 * {
2078 * template <int dim>
2079 * struct StokesPreconditioner
2080 * {
2081 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe);
2082 * StokesPreconditioner(const StokesPreconditioner &data);
2083 * StokesPreconditioner &operator=(const StokesPreconditioner &) = default;
2084 *
2085 * FullMatrix<double> local_matrix;
2086 * std::vector<types::global_dof_index> local_dof_indices;
2087 * };
2088 *
2089 * template <int dim>
2090 * StokesPreconditioner<dim>::StokesPreconditioner(
2091 * const FiniteElement<dim> &stokes_fe)
2092 * : local_matrix(stokes_fe.n_dofs_per_cell(), stokes_fe.n_dofs_per_cell())
2093 * , local_dof_indices(stokes_fe.n_dofs_per_cell())
2094 * {}
2095 *
2096 * template <int dim>
2097 * StokesPreconditioner<dim>::StokesPreconditioner(
2098 * const StokesPreconditioner &data)
2099 * : local_matrix(data.local_matrix)
2100 * , local_dof_indices(data.local_dof_indices)
2101 * {}
2102 *
2103 *
2104 *
2105 * template <int dim>
2106 * struct StokesSystem : public StokesPreconditioner<dim>
2107 * {
2108 * StokesSystem(const FiniteElement<dim> &stokes_fe);
2109 *
2110 * Vector<double> local_rhs;
2111 * };
2112 *
2113 * template <int dim>
2114 * StokesSystem<dim>::StokesSystem(const FiniteElement<dim> &stokes_fe)
2115 * : StokesPreconditioner<dim>(stokes_fe)
2116 * , local_rhs(stokes_fe.n_dofs_per_cell())
2117 * {}
2118 *
2119 *
2120 *
2121 * template <int dim>
2122 * struct TemperatureMatrix
2123 * {
2124 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe);
2125 *
2126 * FullMatrix<double> local_mass_matrix;
2127 * FullMatrix<double> local_stiffness_matrix;
2128 * std::vector<types::global_dof_index> local_dof_indices;
2129 * };
2130 *
2131 * template <int dim>
2132 * TemperatureMatrix<dim>::TemperatureMatrix(
2133 * const FiniteElement<dim> &temperature_fe)
2134 * : local_mass_matrix(temperature_fe.n_dofs_per_cell(),
2135 * temperature_fe.n_dofs_per_cell())
2136 * , local_stiffness_matrix(temperature_fe.n_dofs_per_cell(),
2137 * temperature_fe.n_dofs_per_cell())
2138 * , local_dof_indices(temperature_fe.n_dofs_per_cell())
2139 * {}
2140 *
2141 *
2142 *
2143 * template <int dim>
2144 * struct TemperatureRHS
2145 * {
2146 * TemperatureRHS(const FiniteElement<dim> &temperature_fe);
2147 *
2148 * Vector<double> local_rhs;
2149 * std::vector<types::global_dof_index> local_dof_indices;
2150 * FullMatrix<double> matrix_for_bc;
2151 * };
2152 *
2153 * template <int dim>
2154 * TemperatureRHS<dim>::TemperatureRHS(
2155 * const FiniteElement<dim> &temperature_fe)
2156 * : local_rhs(temperature_fe.n_dofs_per_cell())
2157 * , local_dof_indices(temperature_fe.n_dofs_per_cell())
2158 * , matrix_for_bc(temperature_fe.n_dofs_per_cell(),
2159 * temperature_fe.n_dofs_per_cell())
2160 * {}
2161 * } // namespace CopyData
2162 * } // namespace Assembly
2163 *
2164 *
2165 *
2166 * @endcode
2167 *
2168 *
2169 * <a name="ThecodeBoussinesqFlowProblemcodeclasstemplate"></a>
2170 * <h3>The <code>BoussinesqFlowProblem</code> class template</h3>
2171 *
2172
2173 *
2174 * This is the declaration of the main class. It is very similar to @ref step_31 "step-31"
2175 * but there are a number differences we will comment on below.
2176 *
2177
2178 *
2179 * The top of the class is essentially the same as in @ref step_31 "step-31", listing the
2180 * public methods and a set of private functions that do the heavy
2181 * lifting. Compared to @ref step_31 "step-31" there are only two additions to this
2182 * section: the function <code>get_cfl_number()</code> that computes the
2183 * maximum CFL number over all cells which we then compute the global time
2184 * step from, and the function <code>get_entropy_variation()</code> that is
2185 * used in the computation of the entropy stabilization. It is akin to the
2186 * <code>get_extrapolated_temperature_range()</code> we have used in @ref step_31 "step-31"
2187 * for this purpose, but works on the entropy instead of the temperature
2188 * instead.
2189 *
2190 * @code
2191 * template <int dim>
2192 * class BoussinesqFlowProblem
2193 * {
2194 * public:
2195 * struct Parameters;
2196 * BoussinesqFlowProblem(Parameters &parameters);
2197 * void run();
2198 *
2199 * private:
2200 * void setup_dofs();
2201 * void assemble_stokes_preconditioner();
2202 * void build_stokes_preconditioner();
2203 * void assemble_stokes_system();
2204 * void assemble_temperature_matrix();
2205 * void assemble_temperature_system(const double maximal_velocity);
2206 * double get_maximal_velocity() const;
2207 * double get_cfl_number() const;
2208 * double get_entropy_variation(const double average_temperature) const;
2209 * std::pair<double, double> get_extrapolated_temperature_range() const;
2210 * void solve();
2211 * void output_results();
2212 * void refine_mesh(const unsigned int max_grid_level);
2213 *
2214 * double compute_viscosity(
2215 * const std::vector<double> & old_temperature,
2216 * const std::vector<double> & old_old_temperature,
2217 * const std::vector<Tensor<1, dim>> &old_temperature_grads,
2218 * const std::vector<Tensor<1, dim>> &old_old_temperature_grads,
2219 * const std::vector<double> & old_temperature_laplacians,
2220 * const std::vector<double> & old_old_temperature_laplacians,
2221 * const std::vector<Tensor<1, dim>> &old_velocity_values,
2222 * const std::vector<Tensor<1, dim>> &old_old_velocity_values,
2223 * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
2224 * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
2225 * const double global_u_infty,
2226 * const double global_T_variation,
2227 * const double average_temperature,
2228 * const double global_entropy_variation,
2229 * const double cell_diameter) const;
2230 *
2231 * public:
2232 * @endcode
2233 *
2234 * The first significant new component is the definition of a struct for
2235 * the parameters according to the discussion in the introduction. This
2236 * structure is initialized by reading from a parameter file during
2237 * construction of this object.
2238 *
2239 * @code
2240 * struct Parameters
2241 * {
2242 * Parameters(const std::string &parameter_filename);
2243 *
2244 * static void declare_parameters(ParameterHandler &prm);
2245 * void parse_parameters(ParameterHandler &prm);
2246 *
2247 * double end_time;
2248 *
2249 * unsigned int initial_global_refinement;
2250 * unsigned int initial_adaptive_refinement;
2251 *
2252 * bool generate_graphical_output;
2253 * unsigned int graphical_output_interval;
2254 *
2255 * unsigned int adaptive_refinement_interval;
2256 *
2257 * double stabilization_alpha;
2258 * double stabilization_c_R;
2259 * double stabilization_beta;
2260 *
2261 * unsigned int stokes_velocity_degree;
2262 * bool use_locally_conservative_discretization;
2263 *
2264 * unsigned int temperature_degree;
2265 * };
2266 *
2267 * private:
2268 * Parameters &parameters;
2269 *
2270 * @endcode
2271 *
2272 * The <code>pcout</code> (for <i>%parallel <code>std::cout</code></i>)
2273 * object is used to simplify writing output: each MPI process can use
2274 * this to generate output as usual, but since each of these processes
2275 * will (hopefully) produce the same output it will just be replicated
2276 * many times over; with the ConditionalOStream class, only the output
2277 * generated by one MPI process will actually be printed to screen,
2278 * whereas the output by all the other threads will simply be forgotten.
2279 *
2280 * @code
2281 * ConditionalOStream pcout;
2282 *
2283 * @endcode
2284 *
2285 * The following member variables will then again be similar to those in
2286 * @ref step_31 "step-31" (and to other tutorial programs). As mentioned in the
2287 * introduction, we fully distribute computations, so we will have to use
2288 * the parallel::distributed::Triangulation class (see @ref step_40 "step-40") but the
2289 * remainder of these variables is rather standard with two exceptions:
2290 *
2291
2292 *
2293 * - The <code>mapping</code> variable is used to denote a higher-order
2294 * polynomial mapping. As mentioned in the introduction, we use this
2295 * mapping when forming integrals through quadrature for all cells that
2296 * are adjacent to either the inner or outer boundaries of our domain
2297 * where the boundary is curved.
2298 *
2299
2300 *
2301 * - In a bit of naming confusion, you will notice below that some of the
2302 * variables from namespace TrilinosWrappers are taken from namespace
2303 * TrilinosWrappers::MPI (such as the right hand side vectors) whereas
2304 * others are not (such as the various matrices). This is due to legacy
2305 * reasons. We will frequently have to query velocities
2306 * and temperatures at arbitrary quadrature points; consequently, rather
2307 * than importing ghost information of a vector whenever we need access
2308 * to degrees of freedom that are relevant locally but owned by another
2309 * processor, we solve linear systems in %parallel but then immediately
2310 * initialize a vector including ghost entries of the solution for further
2311 * processing. The various <code>*_solution</code> vectors are therefore
2312 * filled immediately after solving their respective linear system in
2313 * %parallel and will always contain values for all
2314 * @ref GlossLocallyRelevantDof "locally relevant degrees of freedom";
2315 * the fully distributed vectors that we obtain from the solution process
2316 * and that only ever contain the
2317 * @ref GlossLocallyOwnedDof "locally owned degrees of freedom" are
2318 * destroyed immediately after the solution process and after we have
2319 * copied the relevant values into the member variable vectors.
2320 *
2321 * @code
2322 * parallel::distributed::Triangulation<dim> triangulation;
2323 * double global_Omega_diameter;
2324 *
2325 * const MappingQ<dim> mapping;
2326 *
2327 * const FESystem<dim> stokes_fe;
2328 * DoFHandler<dim> stokes_dof_handler;
2329 * AffineConstraints<double> stokes_constraints;
2330 *
2331 * TrilinosWrappers::BlockSparseMatrix stokes_matrix;
2332 * TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
2333 *
2334 * TrilinosWrappers::MPI::BlockVector stokes_solution;
2335 * TrilinosWrappers::MPI::BlockVector old_stokes_solution;
2336 * TrilinosWrappers::MPI::BlockVector stokes_rhs;
2337 *
2338 *
2339 * FE_Q<dim> temperature_fe;
2340 * DoFHandler<dim> temperature_dof_handler;
2341 * AffineConstraints<double> temperature_constraints;
2342 *
2343 * TrilinosWrappers::SparseMatrix temperature_mass_matrix;
2344 * TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
2345 * TrilinosWrappers::SparseMatrix temperature_matrix;
2346 *
2347 * TrilinosWrappers::MPI::Vector temperature_solution;
2348 * TrilinosWrappers::MPI::Vector old_temperature_solution;
2349 * TrilinosWrappers::MPI::Vector old_old_temperature_solution;
2350 * TrilinosWrappers::MPI::Vector temperature_rhs;
2351 *
2352 *
2353 * double time_step;
2354 * double old_time_step;
2355 * unsigned int timestep_number;
2356 *
2357 * std::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
2358 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
2359 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
2360 *
2361 * bool rebuild_stokes_matrix;
2362 * bool rebuild_stokes_preconditioner;
2363 * bool rebuild_temperature_matrices;
2364 * bool rebuild_temperature_preconditioner;
2365 *
2366 * @endcode
2367 *
2368 * The next member variable, <code>computing_timer</code> is used to
2369 * conveniently account for compute time spent in certain "sections" of
2370 * the code that are repeatedly entered. For example, we will enter (and
2371 * leave) sections for Stokes matrix assembly and would like to accumulate
2372 * the run time spent in this section over all time steps. Every so many
2373 * time steps as well as at the end of the program (through the destructor
2374 * of the TimerOutput class) we will then produce a nice summary of the
2375 * times spent in the different sections into which we categorize the
2376 * run-time of this program.
2377 *
2378 * @code
2379 * TimerOutput computing_timer;
2380 *
2381 * @endcode
2382 *
2383 * After these member variables we have a number of auxiliary functions
2384 * that have been broken out of the ones listed above. Specifically, there
2385 * are first three functions that we call from <code>setup_dofs</code> and
2386 * then the ones that do the assembling of linear systems:
2387 *
2388 * @code
2389 * void setup_stokes_matrix(
2390 * const std::vector<IndexSet> &stokes_partitioning,
2391 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2392 * void setup_stokes_preconditioner(
2393 * const std::vector<IndexSet> &stokes_partitioning,
2394 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2395 * void setup_temperature_matrices(
2396 * const IndexSet &temperature_partitioning,
2397 * const IndexSet &temperature_relevant_partitioning);
2398 *
2399 *
2400 * @endcode
2401 *
2402 * Following the @ref MTWorkStream "task-based parallelization" paradigm,
2403 * we split all the assembly routines into two parts: a first part that
2404 * can do all the calculations on a certain cell without taking care of
2405 * other threads, and a second part (which is writing the local data into
2406 * the global matrices and vectors) which can be entered by only one
2407 * thread at a time. In order to implement that, we provide functions for
2408 * each of those two steps for all the four assembly routines that we use
2409 * in this program. The following eight functions do exactly this:
2410 *
2411 * @code
2412 * void local_assemble_stokes_preconditioner(
2413 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2414 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
2415 * Assembly::CopyData::StokesPreconditioner<dim> & data);
2416 *
2417 * void copy_local_to_global_stokes_preconditioner(
2418 * const Assembly::CopyData::StokesPreconditioner<dim> &data);
2419 *
2420 *
2421 * void local_assemble_stokes_system(
2422 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2423 * Assembly::Scratch::StokesSystem<dim> & scratch,
2424 * Assembly::CopyData::StokesSystem<dim> & data);
2425 *
2426 * void copy_local_to_global_stokes_system(
2427 * const Assembly::CopyData::StokesSystem<dim> &data);
2428 *
2429 *
2430 * void local_assemble_temperature_matrix(
2431 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2432 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
2433 * Assembly::CopyData::TemperatureMatrix<dim> & data);
2434 *
2435 * void copy_local_to_global_temperature_matrix(
2436 * const Assembly::CopyData::TemperatureMatrix<dim> &data);
2437 *
2438 *
2439 *
2440 * void local_assemble_temperature_rhs(
2441 * const std::pair<double, double> global_T_range,
2442 * const double global_max_velocity,
2443 * const double global_entropy_variation,
2444 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2445 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
2446 * Assembly::CopyData::TemperatureRHS<dim> & data);
2447 *
2448 * void copy_local_to_global_temperature_rhs(
2449 * const Assembly::CopyData::TemperatureRHS<dim> &data);
2450 *
2451 * @endcode
2452 *
2453 * Finally, we forward declare a member class that we will define later on
2454 * and that will be used to compute a number of quantities from our
2455 * solution vectors that we'd like to put into the output files for
2456 * visualization.
2457 *
2458 * @code
2459 * class Postprocessor;
2460 * };
2461 *
2462 *
2463 * @endcode
2464 *
2465 *
2466 * <a name="BoussinesqFlowProblemclassimplementation"></a>
2467 * <h3>BoussinesqFlowProblem class implementation</h3>
2468 *
2469
2470 *
2471 *
2472 * <a name="BoussinesqFlowProblemParameters"></a>
2473 * <h4>BoussinesqFlowProblem::Parameters</h4>
2474 *
2475
2476 *
2477 * Here comes the definition of the parameters for the Stokes problem. We
2478 * allow to set the end time for the simulation, the level of refinements
2479 * (both global and adaptive, which in the sum specify what maximum level
2480 * the cells are allowed to have), and the interval between refinements in
2481 * the time stepping.
2482 *
2483
2484 *
2485 * Then, we let the user specify constants for the stabilization parameters
2486 * (as discussed in the introduction), the polynomial degree for the Stokes
2487 * velocity space, whether to use the locally conservative discretization
2488 * based on FE_DGP elements for the pressure or not (FE_Q elements for
2489 * pressure), and the polynomial degree for the temperature interpolation.
2490 *
2491
2492 *
2493 * The constructor checks for a valid input file (if not, a file with
2494 * default parameters for the quantities is written), and eventually parses
2495 * the parameters.
2496 *
2497 * @code
2498 * template <int dim>
2499 * BoussinesqFlowProblem<dim>::Parameters::Parameters(
2500 * const std::string &parameter_filename)
2501 * : end_time(1e8)
2502 * , initial_global_refinement(2)
2503 * , initial_adaptive_refinement(2)
2504 * , adaptive_refinement_interval(10)
2505 * , stabilization_alpha(2)
2506 * , stabilization_c_R(0.11)
2507 * , stabilization_beta(0.078)
2508 * , stokes_velocity_degree(2)
2509 * , use_locally_conservative_discretization(true)
2510 * , temperature_degree(2)
2511 * {
2512 * ParameterHandler prm;
2513 * BoussinesqFlowProblem<dim>::Parameters::declare_parameters(prm);
2514 *
2515 * std::ifstream parameter_file(parameter_filename);
2516 *
2517 * if (!parameter_file)
2518 * {
2519 * parameter_file.close();
2520 *
2521 * std::ofstream parameter_out(parameter_filename);
2522 * prm.print_parameters(parameter_out, ParameterHandler::Text);
2523 *
2524 * AssertThrow(
2525 * false,
2526 * ExcMessage(
2527 * "Input parameter file <" + parameter_filename +
2528 * "> not found. Creating a template file of the same name."));
2529 * }
2530 *
2531 * prm.parse_input(parameter_file);
2532 * parse_parameters(prm);
2533 * }
2534 *
2535 *
2536 *
2537 * @endcode
2538 *
2539 * Next we have a function that declares the parameters that we expect in
2540 * the input file, together with their data types, default values and a
2541 * description:
2542 *
2543 * @code
2544 * template <int dim>
2545 * void BoussinesqFlowProblem<dim>::Parameters::declare_parameters(
2546 * ParameterHandler &prm)
2547 * {
2548 * prm.declare_entry("End time",
2549 * "1e8",
2550 * Patterns::Double(0),
2551 * "The end time of the simulation in years.");
2552 * prm.declare_entry("Initial global refinement",
2553 * "2",
2554 * Patterns::Integer(0),
2555 * "The number of global refinement steps performed on "
2556 * "the initial coarse mesh, before the problem is first "
2557 * "solved there.");
2558 * prm.declare_entry("Initial adaptive refinement",
2559 * "2",
2560 * Patterns::Integer(0),
2561 * "The number of adaptive refinement steps performed after "
2562 * "initial global refinement.");
2563 * prm.declare_entry("Time steps between mesh refinement",
2564 * "10",
2565 * Patterns::Integer(1),
2566 * "The number of time steps after which the mesh is to be "
2567 * "adapted based on computed error indicators.");
2568 * prm.declare_entry("Generate graphical output",
2569 * "false",
2570 * Patterns::Bool(),
2571 * "Whether graphical output is to be generated or not. "
2572 * "You may not want to get graphical output if the number "
2573 * "of processors is large.");
2574 * prm.declare_entry("Time steps between graphical output",
2575 * "50",
2576 * Patterns::Integer(1),
2577 * "The number of time steps between each generation of "
2578 * "graphical output files.");
2579 *
2580 * prm.enter_subsection("Stabilization parameters");
2581 * {
2582 * prm.declare_entry("alpha",
2583 * "2",
2584 * Patterns::Double(1, 2),
2585 * "The exponent in the entropy viscosity stabilization.");
2586 * prm.declare_entry("c_R",
2587 * "0.11",
2588 * Patterns::Double(0),
2589 * "The c_R factor in the entropy viscosity "
2590 * "stabilization.");
2591 * prm.declare_entry("beta",
2592 * "0.078",
2593 * Patterns::Double(0),
2594 * "The beta factor in the artificial viscosity "
2595 * "stabilization. An appropriate value for 2d is 0.052 "
2596 * "and 0.078 for 3d.");
2597 * }
2598 * prm.leave_subsection();
2599 *
2600 * prm.enter_subsection("Discretization");
2601 * {
2602 * prm.declare_entry(
2603 * "Stokes velocity polynomial degree",
2604 * "2",
2605 * Patterns::Integer(1),
2606 * "The polynomial degree to use for the velocity variables "
2607 * "in the Stokes system.");
2608 * prm.declare_entry(
2609 * "Temperature polynomial degree",
2610 * "2",
2611 * Patterns::Integer(1),
2612 * "The polynomial degree to use for the temperature variable.");
2613 * prm.declare_entry(
2614 * "Use locally conservative discretization",
2615 * "true",
2616 * Patterns::Bool(),
2617 * "Whether to use a Stokes discretization that is locally "
2618 * "conservative at the expense of a larger number of degrees "
2619 * "of freedom, or to go with a cheaper discretization "
2620 * "that does not locally conserve mass (although it is "
2621 * "globally conservative.");
2622 * }
2623 * prm.leave_subsection();
2624 * }
2625 *
2626 *
2627 *
2628 * @endcode
2629 *
2630 * And then we need a function that reads the contents of the
2631 * ParameterHandler object we get by reading the input file and puts the
2632 * results into variables that store the values of the parameters we have
2633 * previously declared:
2634 *
2635 * @code
2636 * template <int dim>
2637 * void BoussinesqFlowProblem<dim>::Parameters::parse_parameters(
2638 * ParameterHandler &prm)
2639 * {
2640 * end_time = prm.get_double("End time");
2641 * initial_global_refinement = prm.get_integer("Initial global refinement");
2642 * initial_adaptive_refinement =
2643 * prm.get_integer("Initial adaptive refinement");
2644 *
2645 * adaptive_refinement_interval =
2646 * prm.get_integer("Time steps between mesh refinement");
2647 *
2648 * generate_graphical_output = prm.get_bool("Generate graphical output");
2649 * graphical_output_interval =
2650 * prm.get_integer("Time steps between graphical output");
2651 *
2652 * prm.enter_subsection("Stabilization parameters");
2653 * {
2654 * stabilization_alpha = prm.get_double("alpha");
2655 * stabilization_c_R = prm.get_double("c_R");
2656 * stabilization_beta = prm.get_double("beta");
2657 * }
2658 * prm.leave_subsection();
2659 *
2660 * prm.enter_subsection("Discretization");
2661 * {
2662 * stokes_velocity_degree =
2663 * prm.get_integer("Stokes velocity polynomial degree");
2664 * temperature_degree = prm.get_integer("Temperature polynomial degree");
2665 * use_locally_conservative_discretization =
2666 * prm.get_bool("Use locally conservative discretization");
2667 * }
2668 * prm.leave_subsection();
2669 * }
2670 *
2671 *
2672 *
2673 * @endcode
2674 *
2675 *
2676 * <a name="BoussinesqFlowProblemBoussinesqFlowProblem"></a>
2677 * <h4>BoussinesqFlowProblem::BoussinesqFlowProblem</h4>
2678 *
2679
2680 *
2681 * The constructor of the problem is very similar to the constructor in
2682 * @ref step_31 "step-31". What is different is the %parallel communication: Trilinos uses
2683 * a message passing interface (MPI) for data distribution. When entering
2684 * the BoussinesqFlowProblem class, we have to decide how the parallelization
2685 * is to be done. We choose a rather simple strategy and let all processors
2686 * that are running the program work together, specified by the communicator
2687 * <code>MPI_COMM_WORLD</code>. Next, we create the output stream (as we
2688 * already did in @ref step_18 "step-18") that only generates output on the first MPI
2689 * process and is completely forgetful on all others. The implementation of
2690 * this idea is to check the process number when <code>pcout</code> gets a
2691 * true argument, and it uses the <code>std::cout</code> stream for
2692 * output. If we are one processor five, for instance, then we will give a
2693 * <code>false</code> argument to <code>pcout</code>, which means that the
2694 * output of that processor will not be printed. With the exception of the
2695 * mapping object (for which we use polynomials of degree 4) all but the
2696 * final member variable are exactly the same as in @ref step_31 "step-31".
2697 *
2698
2699 *
2700 * This final object, the TimerOutput object, is then told to restrict
2701 * output to the <code>pcout</code> stream (processor 0), and then we
2702 * specify that we want to get a summary table at the end of the program
2703 * which shows us wallclock times (as opposed to CPU times). We will
2704 * manually also request intermediate summaries every so many time steps in
2705 * the <code>run()</code> function below.
2706 *
2707 * @code
2708 * template <int dim>
2709 * BoussinesqFlowProblem<dim>::BoussinesqFlowProblem(Parameters &parameters_)
2710 * : parameters(parameters_)
2711 * , pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0))
2712 * ,
2713 *
2714 * triangulation(MPI_COMM_WORLD,
2718 * ,
2719 *
2720 * global_Omega_diameter(0.)
2721 * ,
2722 *
2723 * mapping(4)
2724 * ,
2725 *
2726 * stokes_fe(FE_Q<dim>(parameters.stokes_velocity_degree),
2727 * dim,
2728 * (parameters.use_locally_conservative_discretization ?
2729 * static_cast<const FiniteElement<dim> &>(
2730 * FE_DGP<dim>(parameters.stokes_velocity_degree - 1)) :
2731 * static_cast<const FiniteElement<dim> &>(
2732 * FE_Q<dim>(parameters.stokes_velocity_degree - 1))),
2733 * 1)
2734 * ,
2735 *
2736 * stokes_dof_handler(triangulation)
2737 * ,
2738 *
2739 * temperature_fe(parameters.temperature_degree)
2740 * , temperature_dof_handler(triangulation)
2741 * ,
2742 *
2743 * time_step(0)
2744 * , old_time_step(0)
2745 * , timestep_number(0)
2746 * , rebuild_stokes_matrix(true)
2747 * , rebuild_stokes_preconditioner(true)
2748 * , rebuild_temperature_matrices(true)
2749 * , rebuild_temperature_preconditioner(true)
2750 * ,
2751 *
2752 * computing_timer(MPI_COMM_WORLD,
2753 * pcout,
2756 * {}
2757 *
2758 *
2759 *
2760 * @endcode
2761 *
2762 *
2763 * <a name="TheBoussinesqFlowProblemhelperfunctions"></a>
2764 * <h4>The BoussinesqFlowProblem helper functions</h4>
2765 *
2766 * <a name="BoussinesqFlowProblemget_maximal_velocity"></a>
2767 * <h5>BoussinesqFlowProblem::get_maximal_velocity</h5>
2768 *
2769
2770 *
2771 * Except for two small details, the function to compute the global maximum
2772 * of the velocity is the same as in @ref step_31 "step-31". The first detail is actually
2773 * common to all functions that implement loops over all cells in the
2774 * triangulation: When operating in %parallel, each processor can only work
2775 * on a chunk of cells since each processor only has a certain part of the
2776 * entire triangulation. This chunk of cells that we want to work on is
2777 * identified via a so-called <code>subdomain_id</code>, as we also did in
2778 * @ref step_18 "step-18". All we need to change is hence to perform the cell-related
2779 * operations only on cells that are owned by the current process (as
2780 * opposed to ghost or artificial cells), i.e. for which the subdomain id
2781 * equals the number of the process ID. Since this is a commonly used
2782 * operation, there is a shortcut for this operation: we can ask whether the
2783 * cell is owned by the current processor using
2784 * <code>cell-@>is_locally_owned()</code>.
2785 *
2786
2787 *
2788 * The second difference is the way we calculate the maximum value. Before,
2789 * we could simply have a <code>double</code> variable that we checked
2790 * against on each quadrature point for each cell. Now, we have to be a bit
2791 * more careful since each processor only operates on a subset of
2792 * cells. What we do is to first let each processor calculate the maximum
2793 * among its cells, and then do a global communication operation
2794 * <code>Utilities::MPI::max</code> that computes the maximum value among
2795 * all the maximum values of the individual processors. MPI provides such a
2796 * call, but it's even simpler to use the respective function in namespace
2797 * Utilities::MPI using the MPI communicator object since that will do the
2798 * right thing even if we work without MPI and on a single machine only. The
2799 * call to <code>Utilities::MPI::max</code> needs two arguments, namely the
2800 * local maximum (input) and the MPI communicator, which is MPI_COMM_WORLD
2801 * in this example.
2802 *
2803 * @code
2804 * template <int dim>
2805 * double BoussinesqFlowProblem<dim>::get_maximal_velocity() const
2806 * {
2807 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
2808 * parameters.stokes_velocity_degree);
2809 * const unsigned int n_q_points = quadrature_formula.size();
2810 *
2811 * FEValues<dim> fe_values(mapping,
2812 * stokes_fe,
2813 * quadrature_formula,
2814 * update_values);
2815 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2816 *
2817 * const FEValuesExtractors::Vector velocities(0);
2818 *
2819 * double max_local_velocity = 0;
2820 *
2821 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2822 * if (cell->is_locally_owned())
2823 * {
2824 * fe_values.reinit(cell);
2825 * fe_values[velocities].get_function_values(stokes_solution,
2826 * velocity_values);
2827 *
2828 * for (unsigned int q = 0; q < n_q_points; ++q)
2829 * max_local_velocity =
2830 * std::max(max_local_velocity, velocity_values[q].norm());
2831 * }
2832 *
2833 * return Utilities::MPI::max(max_local_velocity, MPI_COMM_WORLD);
2834 * }
2835 *
2836 *
2837 * @endcode
2838 *
2839 *
2840 * <a name="BoussinesqFlowProblemget_cfl_number"></a>
2841 * <h5>BoussinesqFlowProblem::get_cfl_number</h5>
2842 *
2843
2844 *
2845 * The next function does something similar, but we now compute the CFL
2846 * number, i.e., maximal velocity on a cell divided by the cell
2847 * diameter. This number is necessary to determine the time step size, as we
2848 * use a semi-explicit time stepping scheme for the temperature equation
2849 * (see @ref step_31 "step-31" for a discussion). We compute it in the same way as above:
2850 * Compute the local maximum over all locally owned cells, then exchange it
2851 * via MPI to find the global maximum.
2852 *
2853 * @code
2854 * template <int dim>
2855 * double BoussinesqFlowProblem<dim>::get_cfl_number() const
2856 * {
2857 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
2858 * parameters.stokes_velocity_degree);
2859 * const unsigned int n_q_points = quadrature_formula.size();
2860 *
2861 * FEValues<dim> fe_values(mapping,
2862 * stokes_fe,
2863 * quadrature_formula,
2864 * update_values);
2865 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2866 *
2867 * const FEValuesExtractors::Vector velocities(0);
2868 *
2869 * double max_local_cfl = 0;
2870 *
2871 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2872 * if (cell->is_locally_owned())
2873 * {
2874 * fe_values.reinit(cell);
2875 * fe_values[velocities].get_function_values(stokes_solution,
2876 * velocity_values);
2877 *
2878 * double max_local_velocity = 1e-10;
2879 * for (unsigned int q = 0; q < n_q_points; ++q)
2880 * max_local_velocity =
2881 * std::max(max_local_velocity, velocity_values[q].norm());
2882 * max_local_cfl =
2883 * std::max(max_local_cfl, max_local_velocity / cell->diameter());
2884 * }
2885 *
2886 * return Utilities::MPI::max(max_local_cfl, MPI_COMM_WORLD);
2887 * }
2888 *
2889 *
2890 * @endcode
2891 *
2892 *
2893 * <a name="BoussinesqFlowProblemget_entropy_variation"></a>
2894 * <h5>BoussinesqFlowProblem::get_entropy_variation</h5>
2895 *
2896
2897 *
2898 * Next comes the computation of the global entropy variation
2899 * @f$\|E(T)-\bar{E}(T)\|_\infty@f$ where the entropy @f$E@f$ is defined as
2900 * discussed in the introduction. This is needed for the evaluation of the
2901 * stabilization in the temperature equation as explained in the
2902 * introduction. The entropy variation is actually only needed if we use
2903 * @f$\alpha=2@f$ as a power in the residual computation. The infinity norm is
2904 * computed by the maxima over quadrature points, as usual in discrete
2905 * computations.
2906 *
2907
2908 *
2909 * In order to compute this quantity, we first have to find the
2910 * space-average @f$\bar{E}(T)@f$ and then evaluate the maximum. However, that
2911 * means that we would need to perform two loops. We can avoid the overhead
2912 * by noting that @f$\|E(T)-\bar{E}(T)\|_\infty =
2913 * \max\big(E_{\textrm{max}}(T)-\bar{E}(T),
2914 * \bar{E}(T)-E_{\textrm{min}}(T)\big)@f$, i.e., the maximum out of the
2915 * deviation from the average entropy in positive and negative
2916 * directions. The four quantities we need for the latter formula (maximum
2917 * entropy, minimum entropy, average entropy, area) can all be evaluated in
2918 * the same loop over all cells, so we choose this simpler variant.
2919 *
2920 * @code
2921 * template <int dim>
2922 * double BoussinesqFlowProblem<dim>::get_entropy_variation(
2923 * const double average_temperature) const
2924 * {
2925 * if (parameters.stabilization_alpha != 2)
2926 * return 1.;
2927 *
2928 * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 1);
2929 * const unsigned int n_q_points = quadrature_formula.size();
2930 *
2931 * FEValues<dim> fe_values(temperature_fe,
2932 * quadrature_formula,
2933 * update_values | update_JxW_values);
2934 * std::vector<double> old_temperature_values(n_q_points);
2935 * std::vector<double> old_old_temperature_values(n_q_points);
2936 *
2937 * @endcode
2938 *
2939 * In the two functions above we computed the maximum of numbers that were
2940 * all non-negative, so we knew that zero was certainly a lower bound. On
2941 * the other hand, here we need to find the maximum deviation from the
2942 * average value, i.e., we will need to know the maximal and minimal
2943 * values of the entropy for which we don't a priori know the sign.
2944 *
2945
2946 *
2947 * To compute it, we can therefore start with the largest and smallest
2948 * possible values we can store in a double precision number: The minimum
2949 * is initialized with a bigger and the maximum with a smaller number than
2950 * any one that is going to appear. We are then guaranteed that these
2951 * numbers will be overwritten in the loop on the first cell or, if this
2952 * processor does not own any cells, in the communication step at the
2953 * latest. The following loop then computes the minimum and maximum local
2954 * entropy as well as keeps track of the area/volume of the part of the
2955 * domain we locally own and the integral over the entropy on it:
2956 *
2957 * @code
2958 * double min_entropy = std::numeric_limits<double>::max(),
2959 * max_entropy = -std::numeric_limits<double>::max(), area = 0,
2960 * entropy_integrated = 0;
2961 *
2962 * for (const auto &cell : temperature_dof_handler.active_cell_iterators())
2963 * if (cell->is_locally_owned())
2964 * {
2965 * fe_values.reinit(cell);
2966 * fe_values.get_function_values(old_temperature_solution,
2967 * old_temperature_values);
2968 * fe_values.get_function_values(old_old_temperature_solution,
2969 * old_old_temperature_values);
2970 * for (unsigned int q = 0; q < n_q_points; ++q)
2971 * {
2972 * const double T =
2973 * (old_temperature_values[q] + old_old_temperature_values[q]) / 2;
2974 * const double entropy =
2975 * ((T - average_temperature) * (T - average_temperature));
2976 *
2977 * min_entropy = std::min(min_entropy, entropy);
2978 * max_entropy = std::max(max_entropy, entropy);
2979 * area += fe_values.JxW(q);
2980 * entropy_integrated += fe_values.JxW(q) * entropy;
2981 * }
2982 * }
2983 *
2984 * @endcode
2985 *
2986 * Now we only need to exchange data between processors: we need to sum
2987 * the two integrals (<code>area</code>, <code>entropy_integrated</code>),
2988 * and get the extrema for maximum and minimum. We could do this through
2989 * four different data exchanges, but we can it with two:
2990 * Utilities::MPI::sum also exists in a variant that takes an array of
2991 * values that are all to be summed up. And we can also utilize the
2992 * Utilities::MPI::max function by realizing that forming the minimum over
2993 * the minimal entropies equals forming the negative of the maximum over
2994 * the negative of the minimal entropies; this maximum can then be
2995 * combined with forming the maximum over the maximal entropies.
2996 *
2997 * @code
2998 * const double local_sums[2] = {entropy_integrated, area},
2999 * local_maxima[2] = {-min_entropy, max_entropy};
3000 * double global_sums[2], global_maxima[2];
3001 *
3002 * Utilities::MPI::sum(local_sums, MPI_COMM_WORLD, global_sums);
3003 * Utilities::MPI::max(local_maxima, MPI_COMM_WORLD, global_maxima);
3004 *
3005 * @endcode
3006 *
3007 * Having computed everything this way, we can then compute the average
3008 * entropy and find the @f$L^\infty@f$ norm by taking the larger of the
3009 * deviation of the maximum or minimum from the average:
3010 *
3011 * @code
3012 * const double average_entropy = global_sums[0] / global_sums[1];
3013 * const double entropy_diff = std::max(global_maxima[1] - average_entropy,
3014 * average_entropy - (-global_maxima[0]));
3015 * return entropy_diff;
3016 * }
3017 *
3018 *
3019 *
3020 * @endcode
3021 *
3022 *
3023 * <a name="BoussinesqFlowProblemget_extrapolated_temperature_range"></a>
3024 * <h5>BoussinesqFlowProblem::get_extrapolated_temperature_range</h5>
3025 *
3026
3027 *
3028 * The next function computes the minimal and maximal value of the
3029 * extrapolated temperature over the entire domain. Again, this is only a
3030 * slightly modified version of the respective function in @ref step_31 "step-31". As in
3031 * the function above, we collect local minima and maxima and then compute
3032 * the global extrema using the same trick as above.
3033 *
3034
3035 *
3036 * As already discussed in @ref step_31 "step-31", the function needs to distinguish
3037 * between the first and all following time steps because it uses a higher
3038 * order temperature extrapolation scheme when at least two previous time
3039 * steps are available.
3040 *
3041 * @code
3042 * template <int dim>
3043 * std::pair<double, double>
3044 * BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range() const
3045 * {
3046 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
3047 * parameters.temperature_degree);
3048 * const unsigned int n_q_points = quadrature_formula.size();
3049 *
3050 * FEValues<dim> fe_values(mapping,
3051 * temperature_fe,
3052 * quadrature_formula,
3053 * update_values);
3054 * std::vector<double> old_temperature_values(n_q_points);
3055 * std::vector<double> old_old_temperature_values(n_q_points);
3056 *
3057 * double min_local_temperature = std::numeric_limits<double>::max(),
3058 * max_local_temperature = -std::numeric_limits<double>::max();
3059 *
3060 * if (timestep_number != 0)
3061 * {
3062 * for (const auto &cell : temperature_dof_handler.active_cell_iterators())
3063 * if (cell->is_locally_owned())
3064 * {
3065 * fe_values.reinit(cell);
3066 * fe_values.get_function_values(old_temperature_solution,
3067 * old_temperature_values);
3068 * fe_values.get_function_values(old_old_temperature_solution,
3069 * old_old_temperature_values);
3070 *
3071 * for (unsigned int q = 0; q < n_q_points; ++q)
3072 * {
3073 * const double temperature =
3074 * (1. + time_step / old_time_step) *
3075 * old_temperature_values[q] -
3076 * time_step / old_time_step * old_old_temperature_values[q];
3077 *
3078 * min_local_temperature =
3079 * std::min(min_local_temperature, temperature);
3080 * max_local_temperature =
3081 * std::max(max_local_temperature, temperature);
3082 * }
3083 * }
3084 * }
3085 * else
3086 * {
3087 * for (const auto &cell : temperature_dof_handler.active_cell_iterators())
3088 * if (cell->is_locally_owned())
3089 * {
3090 * fe_values.reinit(cell);
3091 * fe_values.get_function_values(old_temperature_solution,
3092 * old_temperature_values);
3093 *
3094 * for (unsigned int q = 0; q < n_q_points; ++q)
3095 * {
3096 * const double temperature = old_temperature_values[q];
3097 *
3098 * min_local_temperature =
3099 * std::min(min_local_temperature, temperature);
3100 * max_local_temperature =
3101 * std::max(max_local_temperature, temperature);
3102 * }
3103 * }
3104 * }
3105 *
3106 * double local_extrema[2] = {-min_local_temperature, max_local_temperature};
3107 * double global_extrema[2];
3108 * Utilities::MPI::max(local_extrema, MPI_COMM_WORLD, global_extrema);
3109 *
3110 * return std::make_pair(-global_extrema[0], global_extrema[1]);
3111 * }
3112 *
3113 *
3114 * @endcode
3115 *
3116 *
3117 * <a name="BoussinesqFlowProblemcompute_viscosity"></a>
3118 * <h5>BoussinesqFlowProblem::compute_viscosity</h5>
3119 *
3120
3121 *
3122 * The function that calculates the viscosity is purely local and so needs
3123 * no communication at all. It is mostly the same as in @ref step_31 "step-31" but with an
3124 * updated formulation of the viscosity if @f$\alpha=2@f$ is chosen:
3125 *
3126 * @code
3127 * template <int dim>
3128 * double BoussinesqFlowProblem<dim>::compute_viscosity(
3129 * const std::vector<double> & old_temperature,
3130 * const std::vector<double> & old_old_temperature,
3131 * const std::vector<Tensor<1, dim>> & old_temperature_grads,
3132 * const std::vector<Tensor<1, dim>> & old_old_temperature_grads,
3133 * const std::vector<double> & old_temperature_laplacians,
3134 * const std::vector<double> & old_old_temperature_laplacians,
3135 * const std::vector<Tensor<1, dim>> & old_velocity_values,
3136 * const std::vector<Tensor<1, dim>> & old_old_velocity_values,
3137 * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
3138 * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
3139 * const double global_u_infty,
3140 * const double global_T_variation,
3141 * const double average_temperature,
3142 * const double global_entropy_variation,
3143 * const double cell_diameter) const
3144 * {
3145 * if (global_u_infty == 0)
3146 * return 5e-3 * cell_diameter;
3147 *
3148 * const unsigned int n_q_points = old_temperature.size();
3149 *
3150 * double max_residual = 0;
3151 * double max_velocity = 0;
3152 *
3153 * for (unsigned int q = 0; q < n_q_points; ++q)
3154 * {
3155 * const Tensor<1, dim> u =
3156 * (old_velocity_values[q] + old_old_velocity_values[q]) / 2;
3157 *
3158 * const SymmetricTensor<2, dim> strain_rate =
3159 * (old_strain_rates[q] + old_old_strain_rates[q]) / 2;
3160 *
3161 * const double T = (old_temperature[q] + old_old_temperature[q]) / 2;
3162 * const double dT_dt =
3163 * (old_temperature[q] - old_old_temperature[q]) / old_time_step;
3164 * const double u_grad_T =
3165 * u * (old_temperature_grads[q] + old_old_temperature_grads[q]) / 2;
3166 *
3167 * const double kappa_Delta_T =
3168 * EquationData::kappa *
3169 * (old_temperature_laplacians[q] + old_old_temperature_laplacians[q]) /
3170 * 2;
3171 * const double gamma =
3172 * ((EquationData::radiogenic_heating * EquationData::density(T) +
3173 * 2 * EquationData::eta * strain_rate * strain_rate) /
3174 * (EquationData::density(T) * EquationData::specific_heat));
3175 *
3176 * double residual = std::abs(dT_dt + u_grad_T - kappa_Delta_T - gamma);
3177 * if (parameters.stabilization_alpha == 2)
3178 * residual *= std::abs(T - average_temperature);
3179 *
3180 * max_residual = std::max(residual, max_residual);
3181 * max_velocity = std::max(std::sqrt(u * u), max_velocity);
3182 * }
3183 *
3184 * const double max_viscosity =
3185 * (parameters.stabilization_beta * max_velocity * cell_diameter);
3186 * if (timestep_number == 0)
3187 * return max_viscosity;
3188 * else
3189 * {
3190 * Assert(old_time_step > 0, ExcInternalError());
3191 *
3192 * double entropy_viscosity;
3193 * if (parameters.stabilization_alpha == 2)
3194 * entropy_viscosity =
3195 * (parameters.stabilization_c_R * cell_diameter * cell_diameter *
3196 * max_residual / global_entropy_variation);
3197 * else
3198 * entropy_viscosity =
3199 * (parameters.stabilization_c_R * cell_diameter *
3200 * global_Omega_diameter * max_velocity * max_residual /
3201 * (global_u_infty * global_T_variation));
3202 *
3203 * return std::min(max_viscosity, entropy_viscosity);
3204 * }
3205 * }
3206 *
3207 *
3208 *
3209 * @endcode
3210 *
3211 *
3212 * <a name="TheBoussinesqFlowProblemsetupfunctions"></a>
3213 * <h4>The BoussinesqFlowProblem setup functions</h4>
3214 *
3215
3216 *
3217 * The following three functions set up the Stokes matrix, the matrix used
3218 * for the Stokes preconditioner, and the temperature matrix. The code is
3219 * mostly the same as in @ref step_31 "step-31", but it has been broken out into three
3220 * functions of their own for simplicity.
3221 *
3222
3223 *
3224 * The main functional difference between the code here and that in @ref step_31 "step-31"
3225 * is that the matrices we want to set up are distributed across multiple
3226 * processors. Since we still want to build up the sparsity pattern first
3227 * for efficiency reasons, we could continue to build the <i>entire</i>
3228 * sparsity pattern as a BlockDynamicSparsityPattern, as we did in
3229 * @ref step_31 "step-31". However, that would be inefficient: every processor would build
3230 * the same sparsity pattern, but only initialize a small part of the matrix
3231 * using it. It also violates the principle that every processor should only
3232 * work on those cells it owns (and, if necessary the layer of ghost cells
3233 * around it).
3234 *
3235
3236 *
3237 * Rather, we use an object of type TrilinosWrappers::BlockSparsityPattern,
3238 * which is (obviously) a wrapper around a sparsity pattern object provided
3239 * by Trilinos. The advantage is that the Trilinos sparsity pattern class
3240 * can communicate across multiple processors: if this processor fills in
3241 * all the nonzero entries that result from the cells it owns, and every
3242 * other processor does so as well, then at the end after some MPI
3243 * communication initiated by the <code>compress()</code> call, we will have
3244 * the globally assembled sparsity pattern available with which the global
3245 * matrix can be initialized.
3246 *
3247
3248 *
3249 * There is one important aspect when initializing Trilinos sparsity
3250 * patterns in parallel: In addition to specifying the locally owned rows
3251 * and columns of the matrices via the @p stokes_partitioning index set, we
3252 * also supply information about all the rows we are possibly going to write
3253 * into when assembling on a certain processor. The set of locally relevant
3254 * rows contains all such rows (possibly also a few unnecessary ones, but it
3255 * is difficult to find the exact row indices before actually getting
3256 * indices on all cells and resolving constraints). This additional
3257 * information allows to exactly determine the structure for the
3258 * off-processor data found during assembly. While Trilinos matrices are
3259 * able to collect this information on the fly as well (when initializing
3260 * them from some other reinit method), it is less efficient and leads to
3261 * problems when assembling matrices with multiple threads. In this program,
3262 * we pessimistically assume that only one processor at a time can write
3263 * into the matrix while assembly (whereas the computation is parallel),
3264 * which is fine for Trilinos matrices. In practice, one can do better by
3265 * hinting WorkStream at cells that do not share vertices, allowing for
3266 * parallelism among those cells (see the graph coloring algorithms and
3267 * WorkStream with colored iterators argument). However, that only works
3268 * when only one MPI processor is present because Trilinos' internal data
3269 * structures for accumulating off-processor data on the fly are not thread
3270 * safe. With the initialization presented here, there is no such problem
3271 * and one could safely introduce graph coloring for this algorithm.
3272 *
3273
3274 *
3275 * The only other change we need to make is to tell the
3276 * DoFTools::make_sparsity_pattern() function that it is only supposed to
3277 * work on a subset of cells, namely the ones whose
3278 * <code>subdomain_id</code> equals the number of the current processor, and
3279 * to ignore all other cells.
3280 *
3281
3282 *
3283 * This strategy is replicated across all three of the following functions.
3284 *
3285
3286 *
3287 * Note that Trilinos matrices store the information contained in the
3288 * sparsity patterns, so we can safely release the <code>sp</code> variable
3289 * once the matrix has been given the sparsity structure.
3290 *
3291 * @code
3292 * template <int dim>
3293 * void BoussinesqFlowProblem<dim>::setup_stokes_matrix(
3294 * const std::vector<IndexSet> &stokes_partitioning,
3295 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3296 * {
3297 * stokes_matrix.clear();
3298 *
3299 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3300 * stokes_partitioning,
3301 * stokes_relevant_partitioning,
3302 * MPI_COMM_WORLD);
3303 *
3304 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3305 * for (unsigned int c = 0; c < dim + 1; ++c)
3306 * for (unsigned int d = 0; d < dim + 1; ++d)
3307 * if (!((c == dim) && (d == dim)))
3308 * coupling[c][d] = DoFTools::always;
3309 * else
3310 * coupling[c][d] = DoFTools::none;
3311 *
3312 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3313 * coupling,
3314 * sp,
3315 * stokes_constraints,
3316 * false,
3317 * Utilities::MPI::this_mpi_process(
3318 * MPI_COMM_WORLD));
3319 * sp.compress();
3320 *
3321 * stokes_matrix.reinit(sp);
3322 * }
3323 *
3324 *
3325 *
3326 * template <int dim>
3327 * void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner(
3328 * const std::vector<IndexSet> &stokes_partitioning,
3329 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3330 * {
3331 * Amg_preconditioner.reset();
3332 * Mp_preconditioner.reset();
3333 *
3334 * stokes_preconditioner_matrix.clear();
3335 *
3336 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3337 * stokes_partitioning,
3338 * stokes_relevant_partitioning,
3339 * MPI_COMM_WORLD);
3340 *
3341 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3342 * for (unsigned int c = 0; c < dim + 1; ++c)
3343 * for (unsigned int d = 0; d < dim + 1; ++d)
3344 * if (c == d)
3345 * coupling[c][d] = DoFTools::always;
3346 * else
3347 * coupling[c][d] = DoFTools::none;
3348 *
3349 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3350 * coupling,
3351 * sp,
3352 * stokes_constraints,
3353 * false,
3354 * Utilities::MPI::this_mpi_process(
3355 * MPI_COMM_WORLD));
3356 * sp.compress();
3357 *
3358 * stokes_preconditioner_matrix.reinit(sp);
3359 * }
3360 *
3361 *
3362 * template <int dim>
3363 * void BoussinesqFlowProblem<dim>::setup_temperature_matrices(
3364 * const IndexSet &temperature_partitioner,
3365 * const IndexSet &temperature_relevant_partitioner)
3366 * {
3367 * T_preconditioner.reset();
3368 * temperature_mass_matrix.clear();
3369 * temperature_stiffness_matrix.clear();
3370 * temperature_matrix.clear();
3371 *
3372 * TrilinosWrappers::SparsityPattern sp(temperature_partitioner,
3373 * temperature_partitioner,
3374 * temperature_relevant_partitioner,
3375 * MPI_COMM_WORLD);
3376 * DoFTools::make_sparsity_pattern(temperature_dof_handler,
3377 * sp,
3378 * temperature_constraints,
3379 * false,
3380 * Utilities::MPI::this_mpi_process(
3381 * MPI_COMM_WORLD));
3382 * sp.compress();
3383 *
3384 * temperature_matrix.reinit(sp);
3385 * temperature_mass_matrix.reinit(sp);
3386 * temperature_stiffness_matrix.reinit(sp);
3387 * }
3388 *
3389 *
3390 *
3391 * @endcode
3392 *
3393 * The remainder of the setup function (after splitting out the three
3394 * functions above) mostly has to deal with the things we need to do for
3395 * parallelization across processors. Because setting all of this up is a
3396 * significant compute time expense of the program, we put everything we do
3397 * here into a timer group so that we can get summary information about the
3398 * fraction of time spent in this part of the program at its end.
3399 *
3400
3401 *
3402 * At the top as usual we enumerate degrees of freedom and sort them by
3403 * component/block, followed by writing their numbers to the screen from
3404 * processor zero. The DoFHandler::distributed_dofs() function, when applied
3405 * to a parallel::distributed::Triangulation object, sorts degrees of
3406 * freedom in such a way that all degrees of freedom associated with
3407 * subdomain zero come before all those associated with subdomain one,
3408 * etc. For the Stokes part, this entails, however, that velocities and
3409 * pressures become intermixed, but this is trivially solved by sorting
3410 * again by blocks; it is worth noting that this latter operation leaves the
3411 * relative ordering of all velocities and pressures alone, i.e. within the
3412 * velocity block we will still have all those associated with subdomain
3413 * zero before all velocities associated with subdomain one, etc. This is
3414 * important since we store each of the blocks of this matrix distributed
3415 * across all processors and want this to be done in such a way that each
3416 * processor stores that part of the matrix that is roughly equal to the
3417 * degrees of freedom located on those cells that it will actually work on.
3418 *
3419
3420 *
3421 * When printing the numbers of degrees of freedom, note that these numbers
3422 * are going to be large if we use many processors. Consequently, we let the
3423 * stream put a comma separator in between every three digits. The state of
3424 * the stream, using the locale, is saved from before to after this
3425 * operation. While slightly opaque, the code works because the default
3426 * locale (which we get using the constructor call
3427 * <code>std::locale("")</code>) implies printing numbers with a comma
3428 * separator for every third digit (i.e., thousands, millions, billions).
3429 *
3430
3431 *
3432 * In this function as well as many below, we measure how much time
3433 * we spend here and collect that in a section called "Setup dof
3434 * systems" across function invocations. This is done using an
3435 * TimerOutput::Scope object that gets a timer going in the section
3436 * with above name of the `computing_timer` object upon construction
3437 * of the local variable; the timer is stopped again when the
3438 * destructor of the `timing_section` variable is called. This, of
3439 * course, happens either at the end of the function, or if we leave
3440 * the function through a `return` statement or when an exception is
3441 * thrown somewhere -- in other words, whenever we leave this
3442 * function in any way. The use of such "scope" objects therefore
3443 * makes sure that we do not have to manually add code that tells
3444 * the timer to stop at every location where this function may be
3445 * left.
3446 *
3447 * @code
3448 * template <int dim>
3449 * void BoussinesqFlowProblem<dim>::setup_dofs()
3450 * {
3451 * TimerOutput::Scope timing_section(computing_timer, "Setup dof systems");
3452 *
3453 * stokes_dof_handler.distribute_dofs(stokes_fe);
3454 *
3455 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
3456 * stokes_sub_blocks[dim] = 1;
3457 * DoFRenumbering::component_wise(stokes_dof_handler, stokes_sub_blocks);
3458 *
3459 * temperature_dof_handler.distribute_dofs(temperature_fe);
3460 *
3461 * const std::vector<types::global_dof_index> stokes_dofs_per_block =
3462 * DoFTools::count_dofs_per_fe_block(stokes_dof_handler, stokes_sub_blocks);
3463 *
3464 * const unsigned int n_u = stokes_dofs_per_block[0],
3465 * n_p = stokes_dofs_per_block[1],
3466 * n_T = temperature_dof_handler.n_dofs();
3467 *
3468 * std::locale s = pcout.get_stream().getloc();
3469 * pcout.get_stream().imbue(std::locale(""));
3470 * pcout << "Number of active cells: " << triangulation.n_global_active_cells()
3471 * << " (on " << triangulation.n_levels() << " levels)" << std::endl
3472 * << "Number of degrees of freedom: " << n_u + n_p + n_T << " (" << n_u
3473 * << '+' << n_p << '+' << n_T << ')' << std::endl
3474 * << std::endl;
3475 * pcout.get_stream().imbue(s);
3476 *
3477 *
3478 * @endcode
3479 *
3480 * After this, we have to set up the various partitioners (of type
3481 * <code>IndexSet</code>, see the introduction) that describe which parts
3482 * of each matrix or vector will be stored where, then call the functions
3483 * that actually set up the matrices, and at the end also resize the
3484 * various vectors we keep around in this program.
3485 *
3486 * @code
3487 * std::vector<IndexSet> stokes_partitioning, stokes_relevant_partitioning;
3488 * IndexSet temperature_partitioning(n_T),
3489 * temperature_relevant_partitioning(n_T);
3490 * IndexSet stokes_relevant_set;
3491 * {
3492 * IndexSet stokes_index_set = stokes_dof_handler.locally_owned_dofs();
3493 * stokes_partitioning.push_back(stokes_index_set.get_view(0, n_u));
3494 * stokes_partitioning.push_back(stokes_index_set.get_view(n_u, n_u + n_p));
3495 *
3496 * DoFTools::extract_locally_relevant_dofs(stokes_dof_handler,
3497 * stokes_relevant_set);
3498 * stokes_relevant_partitioning.push_back(
3499 * stokes_relevant_set.get_view(0, n_u));
3500 * stokes_relevant_partitioning.push_back(
3501 * stokes_relevant_set.get_view(n_u, n_u + n_p));
3502 *
3503 * temperature_partitioning = temperature_dof_handler.locally_owned_dofs();
3504 * DoFTools::extract_locally_relevant_dofs(
3505 * temperature_dof_handler, temperature_relevant_partitioning);
3506 * }
3507 *
3508 * @endcode
3509 *
3510 * Following this, we can compute constraints for the solution vectors,
3511 * including hanging node constraints and homogeneous and inhomogeneous
3512 * boundary values for the Stokes and temperature fields. Note that as for
3513 * everything else, the constraint objects can not hold <i>all</i>
3514 * constraints on every processor. Rather, each processor needs to store
3515 * only those that are actually necessary for correctness given that it
3516 * only assembles linear systems on cells it owns. As discussed in the
3517 * @ref distributed_paper "this paper", the set of constraints we need to
3518 * know about is exactly the set of constraints on all locally relevant
3519 * degrees of freedom, so this is what we use to initialize the constraint
3520 * objects.
3521 *
3522 * @code
3523 * {
3524 * stokes_constraints.clear();
3525 * stokes_constraints.reinit(stokes_relevant_set);
3526 *
3527 * DoFTools::make_hanging_node_constraints(stokes_dof_handler,
3528 * stokes_constraints);
3529 *
3530 * FEValuesExtractors::Vector velocity_components(0);
3531 * VectorTools::interpolate_boundary_values(
3532 * stokes_dof_handler,
3533 * 0,
3534 * Functions::ZeroFunction<dim>(dim + 1),
3535 * stokes_constraints,
3536 * stokes_fe.component_mask(velocity_components));
3537 *
3538 * std::set<types::boundary_id> no_normal_flux_boundaries;
3539 * no_normal_flux_boundaries.insert(1);
3540 * VectorTools::compute_no_normal_flux_constraints(stokes_dof_handler,
3541 * 0,
3542 * no_normal_flux_boundaries,
3543 * stokes_constraints,
3544 * mapping);
3545 * stokes_constraints.close();
3546 * }
3547 * {
3548 * temperature_constraints.clear();
3549 * temperature_constraints.reinit(temperature_relevant_partitioning);
3550 *
3551 * DoFTools::make_hanging_node_constraints(temperature_dof_handler,
3552 * temperature_constraints);
3553 * VectorTools::interpolate_boundary_values(
3554 * temperature_dof_handler,
3555 * 0,
3556 * EquationData::TemperatureInitialValues<dim>(),
3557 * temperature_constraints);
3558 * VectorTools::interpolate_boundary_values(
3559 * temperature_dof_handler,
3560 * 1,
3561 * EquationData::TemperatureInitialValues<dim>(),
3562 * temperature_constraints);
3563 * temperature_constraints.close();
3564 * }
3565 *
3566 * @endcode
3567 *
3568 * All this done, we can then initialize the various matrix and vector
3569 * objects to their proper sizes. At the end, we also record that all
3570 * matrices and preconditioners have to be re-computed at the beginning of
3571 * the next time step. Note how we initialize the vectors for the Stokes
3572 * and temperature right hand sides: These are writable vectors (last
3573 * boolean argument set to @p true) that have the correct one-to-one
3574 * partitioning of locally owned elements but are still given the relevant
3575 * partitioning for means of figuring out the vector entries that are
3576 * going to be set right away. As for matrices, this allows for writing
3577 * local contributions into the vector with multiple threads (always
3578 * assuming that the same vector entry is not accessed by multiple threads
3579 * at the same time). The other vectors only allow for read access of
3580 * individual elements, including ghosts, but are not suitable for
3581 * solvers.
3582 *
3583 * @code
3584 * setup_stokes_matrix(stokes_partitioning, stokes_relevant_partitioning);
3585 * setup_stokes_preconditioner(stokes_partitioning,
3586 * stokes_relevant_partitioning);
3587 * setup_temperature_matrices(temperature_partitioning,
3588 * temperature_relevant_partitioning);
3589 *
3590 * stokes_rhs.reinit(stokes_partitioning,
3591 * stokes_relevant_partitioning,
3592 * MPI_COMM_WORLD,
3593 * true);
3594 * stokes_solution.reinit(stokes_relevant_partitioning, MPI_COMM_WORLD);
3595 * old_stokes_solution.reinit(stokes_solution);
3596 *
3597 * temperature_rhs.reinit(temperature_partitioning,
3598 * temperature_relevant_partitioning,
3599 * MPI_COMM_WORLD,
3600 * true);
3601 * temperature_solution.reinit(temperature_relevant_partitioning,
3602 * MPI_COMM_WORLD);
3603 * old_temperature_solution.reinit(temperature_solution);
3604 * old_old_temperature_solution.reinit(temperature_solution);
3605 *
3606 * rebuild_stokes_matrix = true;
3607 * rebuild_stokes_preconditioner = true;
3608 * rebuild_temperature_matrices = true;
3609 * rebuild_temperature_preconditioner = true;
3610 * }
3611 *
3612 *
3613 *
3614 * @endcode
3615 *
3616 *
3617 * <a name="TheBoussinesqFlowProblemassemblyfunctions"></a>
3618 * <h4>The BoussinesqFlowProblem assembly functions</h4>
3619 *
3620
3621 *
3622 * Following the discussion in the introduction and in the @ref threads
3623 * module, we split the assembly functions into different parts:
3624 *
3625
3626 *
3627 * <ul> <li> The local calculations of matrices and right hand sides, given
3628 * a certain cell as input (these functions are named
3629 * <code>local_assemble_*</code> below). The resulting function is, in other
3630 * words, essentially the body of the loop over all cells in @ref step_31 "step-31". Note,
3631 * however, that these functions store the result from the local
3632 * calculations in variables of classes from the CopyData namespace.
3633 *
3634
3635 *
3636 * <li>These objects are then given to the second step which writes the
3637 * local data into the global data structures (these functions are named
3638 * <code>copy_local_to_global_*</code> below). These functions are pretty
3639 * trivial.
3640 *
3641
3642 *
3643 * <li>These two subfunctions are then used in the respective assembly
3644 * routine (called <code>assemble_*</code> below), where a WorkStream object
3645 * is set up and runs over all the cells that belong to the processor's
3646 * subdomain. </ul>
3647 *
3648
3649 *
3650 *
3651 * <a name="Stokespreconditionerassembly"></a>
3652 * <h5>Stokes preconditioner assembly</h5>
3653 *
3654
3655 *
3656 * Let us start with the functions that builds the Stokes
3657 * preconditioner. The first two of these are pretty trivial, given the
3658 * discussion above. Note in particular that the main point in using the
3659 * scratch data object is that we want to avoid allocating any objects on
3660 * the free space each time we visit a new cell. As a consequence, the
3661 * assembly function below only has automatic local variables, and
3662 * everything else is accessed through the scratch data object, which is
3663 * allocated only once before we start the loop over all cells:
3664 *
3665 * @code
3666 * template <int dim>
3667 * void BoussinesqFlowProblem<dim>::local_assemble_stokes_preconditioner(
3668 * const typename DoFHandler<dim>::active_cell_iterator &cell,
3669 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3670 * Assembly::CopyData::StokesPreconditioner<dim> & data)
3671 * {
3672 * const unsigned int dofs_per_cell = stokes_fe.n_dofs_per_cell();
3673 * const unsigned int n_q_points =
3674 * scratch.stokes_fe_values.n_quadrature_points;
3675 *
3676 * const FEValuesExtractors::Vector velocities(0);
3677 * const FEValuesExtractors::Scalar pressure(dim);
3678 *
3679 * scratch.stokes_fe_values.reinit(cell);
3680 * cell->get_dof_indices(data.local_dof_indices);
3681 *
3682 * data.local_matrix = 0;
3683 *
3684 * for (unsigned int q = 0; q < n_q_points; ++q)
3685 * {
3686 * for (unsigned int k = 0; k < dofs_per_cell; ++k)
3687 * {
3688 * scratch.grad_phi_u[k] =
3689 * scratch.stokes_fe_values[velocities].gradient(k, q);
3690 * scratch.phi_p[k] = scratch.stokes_fe_values[pressure].value(k, q);
3691 * }
3692 *
3693 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
3694 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
3695 * data.local_matrix(i, j) +=
3696 * (EquationData::eta *
3697 * scalar_product(scratch.grad_phi_u[i], scratch.grad_phi_u[j]) +
3698 * (1. / EquationData::eta) * EquationData::pressure_scaling *
3699 * EquationData::pressure_scaling *
3700 * (scratch.phi_p[i] * scratch.phi_p[j])) *
3701 * scratch.stokes_fe_values.JxW(q);
3702 * }
3703 * }
3704 *
3705 *
3706 *
3707 * template <int dim>
3708 * void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_preconditioner(
3709 * const Assembly::CopyData::StokesPreconditioner<dim> &data)
3710 * {
3711 * stokes_constraints.distribute_local_to_global(data.local_matrix,
3712 * data.local_dof_indices,
3713 * stokes_preconditioner_matrix);
3714 * }
3715 *
3716 *
3717 * @endcode
3718 *
3719 * Now for the function that actually puts things together, using the
3720 * WorkStream functions. WorkStream::run needs a start and end iterator to
3721 * enumerate the cells it is supposed to work on. Typically, one would use
3722 * DoFHandler::begin_active() and DoFHandler::end() for that but here we
3723 * actually only want the subset of cells that in fact are owned by the
3724 * current processor. This is where the FilteredIterator class comes into
3725 * play: you give it a range of cells and it provides an iterator that only
3726 * iterates over that subset of cells that satisfy a certain predicate (a
3727 * predicate is a function of one argument that either returns true or
3728 * false). The predicate we use here is IteratorFilters::LocallyOwnedCell,
3729 * i.e., it returns true exactly if the cell is owned by the current
3730 * processor. The resulting iterator range is then exactly what we need.
3731 *
3732
3733 *
3734 * With this obstacle out of the way, we call the WorkStream::run
3735 * function with this set of cells, scratch and copy objects, and
3736 * with pointers to two functions: the local assembly and
3737 * copy-local-to-global function. These functions need to have very
3738 * specific signatures: three arguments in the first and one
3739 * argument in the latter case (see the documentation of the
3740 * WorkStream::run function for the meaning of these arguments).
3741 * Note how we use a lambda functions to
3742 * create a function object that satisfies this requirement. It uses
3743 * function arguments for the local assembly function that specify
3744 * cell, scratch data, and copy data, as well as function argument
3745 * for the copy function that expects the
3746 * data to be written into the global matrix (also see the discussion in
3747 * @ref step_13 "step-13"'s <code>assemble_linear_system()</code> function). On the other
3748 * hand, the implicit zeroth argument of member functions (namely
3749 * the <code>this</code> pointer of the object on which that member
3750 * function is to operate on) is <i>bound</i> to the
3751 * <code>this</code> pointer of the current function and is captured. The
3752 * WorkStream::run function, as a consequence, does not need to know
3753 * anything about the object these functions work on.
3754 *
3755
3756 *
3757 * When the WorkStream is executed, it will create several local assembly
3758 * routines of the first kind for several cells and let some available
3759 * processors work on them. The function that needs to be synchronized,
3760 * i.e., the write operation into the global matrix, however, is executed by
3761 * only one thread at a time in the prescribed order. Of course, this only
3762 * holds for the parallelization on a single MPI process. Different MPI
3763 * processes will have their own WorkStream objects and do that work
3764 * completely independently (and in different memory spaces). In a
3765 * distributed calculation, some data will accumulate at degrees of freedom
3766 * that are not owned by the respective processor. It would be inefficient
3767 * to send data around every time we encounter such a dof. What happens
3768 * instead is that the Trilinos sparse matrix will keep that data and send
3769 * it to the owner at the end of assembly, by calling the
3770 * <code>compress()</code> command.
3771 *
3772 * @code
3773 * template <int dim>
3774 * void BoussinesqFlowProblem<dim>::assemble_stokes_preconditioner()
3775 * {
3776 * stokes_preconditioner_matrix = 0;
3777 *
3778 * const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3779 *
3780 * using CellFilter =
3782 *
3783 * auto worker =
3784 * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
3785 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3786 * Assembly::CopyData::StokesPreconditioner<dim> & data) {
3787 * this->local_assemble_stokes_preconditioner(cell, scratch, data);
3788 * };
3789 *
3790 * auto copier =
3791 * [this](const Assembly::CopyData::StokesPreconditioner<dim> &data) {
3792 * this->copy_local_to_global_stokes_preconditioner(data);
3793 * };
3794 *
3796 * stokes_dof_handler.begin_active()),
3797 * CellFilter(IteratorFilters::LocallyOwnedCell(),
3798 * stokes_dof_handler.end()),
3799 * worker,
3800 * copier,
3801 * Assembly::Scratch::StokesPreconditioner<dim>(
3802 * stokes_fe,
3803 * quadrature_formula,
3804 * mapping,
3806 * Assembly::CopyData::StokesPreconditioner<dim>(stokes_fe));
3807 *
3808 * stokes_preconditioner_matrix.compress(VectorOperation::add);
3809 * }
3810 *
3811 *
3812 *
3813 * @endcode
3814 *
3815 * The final function in this block initiates assembly of the Stokes
3816 * preconditioner matrix and then in fact builds the Stokes
3817 * preconditioner. It is mostly the same as in the serial case. The only
3818 * difference to @ref step_31 "step-31" is that we use a Jacobi preconditioner for the
3819 * pressure mass matrix instead of IC, as discussed in the introduction.
3820 *
3821 * @code
3822 * template <int dim>
3823 * void BoussinesqFlowProblem<dim>::build_stokes_preconditioner()
3824 * {
3825 * if (rebuild_stokes_preconditioner == false)
3826 * return;
3827 *
3828 * TimerOutput::Scope timer_section(computing_timer,
3829 * " Build Stokes preconditioner");
3830 * pcout << " Rebuilding Stokes preconditioner..." << std::flush;
3831 *
3832 * assemble_stokes_preconditioner();
3833 *
3834 * std::vector<std::vector<bool>> constant_modes;
3835 * FEValuesExtractors::Vector velocity_components(0);
3836 * DoFTools::extract_constant_modes(stokes_dof_handler,
3837 * stokes_fe.component_mask(
3838 * velocity_components),
3839 * constant_modes);
3840 *
3841 * Mp_preconditioner =
3842 * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
3843 * Amg_preconditioner = std::make_shared<TrilinosWrappers::PreconditionAMG>();
3844 *
3846 * Amg_data.constant_modes = constant_modes;
3847 * Amg_data.elliptic = true;
3848 * Amg_data.higher_order_elements = true;
3849 * Amg_data.smoother_sweeps = 2;
3850 * Amg_data.aggregation_threshold = 0.02;
3851 *
3852 * Mp_preconditioner->initialize(stokes_preconditioner_matrix.block(1, 1));
3853 * Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0, 0),
3854 * Amg_data);
3855 *
3856 * rebuild_stokes_preconditioner = false;
3857 *
3858 * pcout << std::endl;
3859 * }
3860 *
3861 *
3862 * @endcode
3863 *
3864 *
3865 * <a name="Stokessystemassembly"></a>
3866 * <h5>Stokes system assembly</h5>
3867 *
3868
3869 *
3870 * The next three functions implement the assembly of the Stokes system,
3871 * again split up into a part performing local calculations, one for writing
3872 * the local data into the global matrix and vector, and one for actually
3873 * running the loop over all cells with the help of the WorkStream
3874 * class. Note that the assembly of the Stokes matrix needs only to be done
3875 * in case we have changed the mesh. Otherwise, just the
3876 * (temperature-dependent) right hand side needs to be calculated
3877 * here. Since we are working with distributed matrices and vectors, we have
3878 * to call the respective <code>compress()</code> functions in the end of
3879 * the assembly in order to send non-local data to the owner process.
3880 *
3881 * @code
3882 * template <int dim>
3883 * void BoussinesqFlowProblem<dim>::local_assemble_stokes_system(
3884 * const typename DoFHandler<dim>::active_cell_iterator &cell,
3885 * Assembly::Scratch::StokesSystem<dim> & scratch,
3886 * Assembly::CopyData::StokesSystem<dim> & data)
3887 * {
3888 * const unsigned int dofs_per_cell =
3889 * scratch.stokes_fe_values.get_fe().n_dofs_per_cell();
3890 * const unsigned int n_q_points =
3891 * scratch.stokes_fe_values.n_quadrature_points;
3892 *
3893 * const FEValuesExtractors::Vector velocities(0);
3894 * const FEValuesExtractors::Scalar pressure(dim);
3895 *
3896 * scratch.stokes_fe_values.reinit(cell);
3897 *
3898 * typename DoFHandler<dim>::active_cell_iterator temperature_cell(
3899 * &triangulation, cell->level(), cell->index(), &temperature_dof_handler);
3900 * scratch.temperature_fe_values.reinit(temperature_cell);
3901 *
3902 * if (rebuild_stokes_matrix)
3903 * data.local_matrix = 0;
3904 * data.local_rhs = 0;
3905 *
3906 * scratch.temperature_fe_values.get_function_values(
3907 * old_temperature_solution, scratch.old_temperature_values);
3908 *
3909 * for (unsigned int q = 0; q < n_q_points; ++q)
3910 * {
3911 * const double old_temperature = scratch.old_temperature_values[q];
3912 *
3913 * for (unsigned int k = 0; k < dofs_per_cell; ++k)
3914 * {
3915 * scratch.phi_u[k] = scratch.stokes_fe_values[velocities].value(k, q);
3916 * if (rebuild_stokes_matrix)
3917 * {
3918 * scratch.grads_phi_u[k] =
3919 * scratch.stokes_fe_values[velocities].symmetric_gradient(k, q);
3920 * scratch.div_phi_u[k] =
3921 * scratch.stokes_fe_values[velocities].divergence(k, q);
3922 * scratch.phi_p[k] =
3923 * scratch.stokes_fe_values[pressure].value(k, q);
3924 * }
3925 * }
3926 *
3927 * if (rebuild_stokes_matrix == true)
3928 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
3929 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
3930 * data.local_matrix(i, j) +=
3931 * (EquationData::eta * 2 *
3932 * (scratch.grads_phi_u[i] * scratch.grads_phi_u[j]) -
3933 * (EquationData::pressure_scaling * scratch.div_phi_u[i] *
3934 * scratch.phi_p[j]) -
3935 * (EquationData::pressure_scaling * scratch.phi_p[i] *
3936 * scratch.div_phi_u[j])) *
3937 * scratch.stokes_fe_values.JxW(q);
3938 *
3939 * const Tensor<1, dim> gravity = EquationData::gravity_vector(
3940 * scratch.stokes_fe_values.quadrature_point(q));
3941 *
3942 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
3943 * data.local_rhs(i) += (EquationData::density(old_temperature) *
3944 * gravity * scratch.phi_u[i]) *
3945 * scratch.stokes_fe_values.JxW(q);
3946 * }
3947 *
3948 * cell->get_dof_indices(data.local_dof_indices);
3949 * }
3950 *
3951 *
3952 *
3953 * template <int dim>
3954 * void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_system(
3955 * const Assembly::CopyData::StokesSystem<dim> &data)
3956 * {
3957 * if (rebuild_stokes_matrix == true)
3958 * stokes_constraints.distribute_local_to_global(data.local_matrix,
3959 * data.local_rhs,
3960 * data.local_dof_indices,
3961 * stokes_matrix,
3962 * stokes_rhs);
3963 * else
3964 * stokes_constraints.distribute_local_to_global(data.local_rhs,
3965 * data.local_dof_indices,
3966 * stokes_rhs);
3967 * }
3968 *
3969 *
3970 *
3971 * template <int dim>
3972 * void BoussinesqFlowProblem<dim>::assemble_stokes_system()
3973 * {
3974 * TimerOutput::Scope timer_section(computing_timer,
3975 * " Assemble Stokes system");
3976 *
3977 * if (rebuild_stokes_matrix == true)
3978 * stokes_matrix = 0;
3979 *
3980 * stokes_rhs = 0;
3981 *
3982 * const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3983 *
3984 * using CellFilter =
3986 *
3988 * CellFilter(IteratorFilters::LocallyOwnedCell(),
3989 * stokes_dof_handler.begin_active()),
3990 * CellFilter(IteratorFilters::LocallyOwnedCell(), stokes_dof_handler.end()),
3991 * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
3992 * Assembly::Scratch::StokesSystem<dim> & scratch,
3993 * Assembly::CopyData::StokesSystem<dim> & data) {
3994 * this->local_assemble_stokes_system(cell, scratch, data);
3995 * },
3996 * [this](const Assembly::CopyData::StokesSystem<dim> &data) {
3997 * this->copy_local_to_global_stokes_system(data);
3998 * },
3999 * Assembly::Scratch::StokesSystem<dim>(
4000 * stokes_fe,
4001 * mapping,
4002 * quadrature_formula,
4004 * (rebuild_stokes_matrix == true ? update_gradients : UpdateFlags(0))),
4005 * temperature_fe,
4006 * update_values),
4007 * Assembly::CopyData::StokesSystem<dim>(stokes_fe));
4008 *
4009 * if (rebuild_stokes_matrix == true)
4010 * stokes_matrix.compress(VectorOperation::add);
4011 * stokes_rhs.compress(VectorOperation::add);
4012 *
4013 * rebuild_stokes_matrix = false;
4014 *
4015 * pcout << std::endl;
4016 * }
4017 *
4018 *
4019 * @endcode
4020 *
4021 *
4022 * <a name="Temperaturematrixassembly"></a>
4023 * <h5>Temperature matrix assembly</h5>
4024 *
4025
4026 *
4027 * The task to be performed by the next three functions is to calculate a
4028 * mass matrix and a Laplace matrix on the temperature system. These will be
4029 * combined in order to yield the semi-implicit time stepping matrix that
4030 * consists of the mass matrix plus a time step-dependent weight factor
4031 * times the Laplace matrix. This function is again essentially the body of
4032 * the loop over all cells from @ref step_31 "step-31".
4033 *
4034
4035 *
4036 * The two following functions perform similar services as the ones above.
4037 *
4038 * @code
4039 * template <int dim>
4040 * void BoussinesqFlowProblem<dim>::local_assemble_temperature_matrix(
4041 * const typename DoFHandler<dim>::active_cell_iterator &cell,
4042 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4043 * Assembly::CopyData::TemperatureMatrix<dim> & data)
4044 * {
4045 * const unsigned int dofs_per_cell =
4046 * scratch.temperature_fe_values.get_fe().n_dofs_per_cell();
4047 * const unsigned int n_q_points =
4048 * scratch.temperature_fe_values.n_quadrature_points;
4049 *
4050 * scratch.temperature_fe_values.reinit(cell);
4051 * cell->get_dof_indices(data.local_dof_indices);
4052 *
4053 * data.local_mass_matrix = 0;
4054 * data.local_stiffness_matrix = 0;
4055 *
4056 * for (unsigned int q = 0; q < n_q_points; ++q)
4057 * {
4058 * for (unsigned int k = 0; k < dofs_per_cell; ++k)
4059 * {
4060 * scratch.grad_phi_T[k] =
4061 * scratch.temperature_fe_values.shape_grad(k, q);
4062 * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4063 * }
4064 *
4065 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
4066 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
4067 * {
4068 * data.local_mass_matrix(i, j) +=
4069 * (scratch.phi_T[i] * scratch.phi_T[j] *
4070 * scratch.temperature_fe_values.JxW(q));
4071 * data.local_stiffness_matrix(i, j) +=
4072 * (EquationData::kappa * scratch.grad_phi_T[i] *
4073 * scratch.grad_phi_T[j] * scratch.temperature_fe_values.JxW(q));
4074 * }
4075 * }
4076 * }
4077 *
4078 *
4079 *
4080 * template <int dim>
4081 * void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_matrix(
4082 * const Assembly::CopyData::TemperatureMatrix<dim> &data)
4083 * {
4084 * temperature_constraints.distribute_local_to_global(data.local_mass_matrix,
4085 * data.local_dof_indices,
4086 * temperature_mass_matrix);
4087 * temperature_constraints.distribute_local_to_global(
4088 * data.local_stiffness_matrix,
4089 * data.local_dof_indices,
4090 * temperature_stiffness_matrix);
4091 * }
4092 *
4093 *
4094 * template <int dim>
4095 * void BoussinesqFlowProblem<dim>::assemble_temperature_matrix()
4096 * {
4097 * if (rebuild_temperature_matrices == false)
4098 * return;
4099 *
4100 * TimerOutput::Scope timer_section(computing_timer,
4101 * " Assemble temperature matrices");
4102 * temperature_mass_matrix = 0;
4103 * temperature_stiffness_matrix = 0;
4104 *
4105 * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4106 *
4107 * using CellFilter =
4109 *
4111 * CellFilter(IteratorFilters::LocallyOwnedCell(),
4112 * temperature_dof_handler.begin_active()),
4113 * CellFilter(IteratorFilters::LocallyOwnedCell(),
4114 * temperature_dof_handler.end()),
4115 * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
4116 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4117 * Assembly::CopyData::TemperatureMatrix<dim> & data) {
4118 * this->local_assemble_temperature_matrix(cell, scratch, data);
4119 * },
4120 * [this](const Assembly::CopyData::TemperatureMatrix<dim> &data) {
4121 * this->copy_local_to_global_temperature_matrix(data);
4122 * },
4123 * Assembly::Scratch::TemperatureMatrix<dim>(temperature_fe,
4124 * mapping,
4125 * quadrature_formula),
4126 * Assembly::CopyData::TemperatureMatrix<dim>(temperature_fe));
4127 *
4128 * temperature_mass_matrix.compress(VectorOperation::add);
4129 * temperature_stiffness_matrix.compress(VectorOperation::add);
4130 *
4131 * rebuild_temperature_matrices = false;
4132 * rebuild_temperature_preconditioner = true;
4133 * }
4134 *
4135 *
4136 * @endcode
4137 *
4138 *
4139 * <a name="Temperaturerighthandsideassembly"></a>
4140 * <h5>Temperature right hand side assembly</h5>
4141 *
4142
4143 *
4144 * This is the last assembly function. It calculates the right hand side of
4145 * the temperature system, which includes the convection and the
4146 * stabilization terms. It includes a lot of evaluations of old solutions at
4147 * the quadrature points (which are necessary for calculating the artificial
4148 * viscosity of stabilization), but is otherwise similar to the other
4149 * assembly functions. Notice, once again, how we resolve the dilemma of
4150 * having inhomogeneous boundary conditions, by just making a right hand
4151 * side at this point (compare the comments for the <code>project()</code>
4152 * function above): We create some matrix columns with exactly the values
4153 * that would be entered for the temperature stiffness matrix, in case we
4154 * have inhomogeneously constrained dofs. That will account for the correct
4155 * balance of the right hand side vector with the matrix system of
4156 * temperature.
4157 *
4158 * @code
4159 * template <int dim>
4160 * void BoussinesqFlowProblem<dim>::local_assemble_temperature_rhs(
4161 * const std::pair<double, double> global_T_range,
4162 * const double global_max_velocity,
4163 * const double global_entropy_variation,
4164 * const typename DoFHandler<dim>::active_cell_iterator &cell,
4165 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4166 * Assembly::CopyData::TemperatureRHS<dim> & data)
4167 * {
4168 * const bool use_bdf2_scheme = (timestep_number != 0);
4169 *
4170 * const unsigned int dofs_per_cell =
4171 * scratch.temperature_fe_values.get_fe().n_dofs_per_cell();
4172 * const unsigned int n_q_points =
4173 * scratch.temperature_fe_values.n_quadrature_points;
4174 *
4175 * const FEValuesExtractors::Vector velocities(0);
4176 *
4177 * data.local_rhs = 0;
4178 * data.matrix_for_bc = 0;
4179 * cell->get_dof_indices(data.local_dof_indices);
4180 *
4181 * scratch.temperature_fe_values.reinit(cell);
4182 *
4183 * typename DoFHandler<dim>::active_cell_iterator stokes_cell(
4184 * &triangulation, cell->level(), cell->index(), &stokes_dof_handler);
4185 * scratch.stokes_fe_values.reinit(stokes_cell);
4186 *
4187 * scratch.temperature_fe_values.get_function_values(
4188 * old_temperature_solution, scratch.old_temperature_values);
4189 * scratch.temperature_fe_values.get_function_values(
4190 * old_old_temperature_solution, scratch.old_old_temperature_values);
4191 *
4192 * scratch.temperature_fe_values.get_function_gradients(
4193 * old_temperature_solution, scratch.old_temperature_grads);
4194 * scratch.temperature_fe_values.get_function_gradients(
4195 * old_old_temperature_solution, scratch.old_old_temperature_grads);
4196 *
4197 * scratch.temperature_fe_values.get_function_laplacians(
4198 * old_temperature_solution, scratch.old_temperature_laplacians);
4199 * scratch.temperature_fe_values.get_function_laplacians(
4200 * old_old_temperature_solution, scratch.old_old_temperature_laplacians);
4201 *
4202 * scratch.stokes_fe_values[velocities].get_function_values(
4203 * stokes_solution, scratch.old_velocity_values);
4204 * scratch.stokes_fe_values[velocities].get_function_values(
4205 * old_stokes_solution, scratch.old_old_velocity_values);
4206 * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4207 * stokes_solution, scratch.old_strain_rates);
4208 * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4209 * old_stokes_solution, scratch.old_old_strain_rates);
4210 *
4211 * const double nu =
4212 * compute_viscosity(scratch.old_temperature_values,
4213 * scratch.old_old_temperature_values,
4214 * scratch.old_temperature_grads,
4215 * scratch.old_old_temperature_grads,
4216 * scratch.old_temperature_laplacians,
4217 * scratch.old_old_temperature_laplacians,
4218 * scratch.old_velocity_values,
4219 * scratch.old_old_velocity_values,
4220 * scratch.old_strain_rates,
4221 * scratch.old_old_strain_rates,
4222 * global_max_velocity,
4223 * global_T_range.second - global_T_range.first,
4224 * 0.5 * (global_T_range.second + global_T_range.first),
4225 * global_entropy_variation,
4226 * cell->diameter());
4227 *
4228 * for (unsigned int q = 0; q < n_q_points; ++q)
4229 * {
4230 * for (unsigned int k = 0; k < dofs_per_cell; ++k)
4231 * {
4232 * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4233 * scratch.grad_phi_T[k] =
4234 * scratch.temperature_fe_values.shape_grad(k, q);
4235 * }
4236 *
4237 *
4238 * const double T_term_for_rhs =
4239 * (use_bdf2_scheme ?
4240 * (scratch.old_temperature_values[q] *
4241 * (1 + time_step / old_time_step) -
4242 * scratch.old_old_temperature_values[q] * (time_step * time_step) /
4243 * (old_time_step * (time_step + old_time_step))) :
4244 * scratch.old_temperature_values[q]);
4245 *
4246 * const double ext_T =
4247 * (use_bdf2_scheme ? (scratch.old_temperature_values[q] *
4248 * (1 + time_step / old_time_step) -
4249 * scratch.old_old_temperature_values[q] *
4250 * time_step / old_time_step) :
4251 * scratch.old_temperature_values[q]);
4252 *
4253 * const Tensor<1, dim> ext_grad_T =
4254 * (use_bdf2_scheme ? (scratch.old_temperature_grads[q] *
4255 * (1 + time_step / old_time_step) -
4256 * scratch.old_old_temperature_grads[q] * time_step /
4257 * old_time_step) :
4258 * scratch.old_temperature_grads[q]);
4259 *
4260 * const Tensor<1, dim> extrapolated_u =
4261 * (use_bdf2_scheme ?
4262 * (scratch.old_velocity_values[q] * (1 + time_step / old_time_step) -
4263 * scratch.old_old_velocity_values[q] * time_step / old_time_step) :
4264 * scratch.old_velocity_values[q]);
4265 *
4266 * const SymmetricTensor<2, dim> extrapolated_strain_rate =
4267 * (use_bdf2_scheme ?
4268 * (scratch.old_strain_rates[q] * (1 + time_step / old_time_step) -
4269 * scratch.old_old_strain_rates[q] * time_step / old_time_step) :
4270 * scratch.old_strain_rates[q]);
4271 *
4272 * const double gamma =
4273 * ((EquationData::radiogenic_heating * EquationData::density(ext_T) +
4274 * 2 * EquationData::eta * extrapolated_strain_rate *
4275 * extrapolated_strain_rate) /
4276 * (EquationData::density(ext_T) * EquationData::specific_heat));
4277 *
4278 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
4279 * {
4280 * data.local_rhs(i) +=
4281 * (T_term_for_rhs * scratch.phi_T[i] -
4282 * time_step * extrapolated_u * ext_grad_T * scratch.phi_T[i] -
4283 * time_step * nu * ext_grad_T * scratch.grad_phi_T[i] +
4284 * time_step * gamma * scratch.phi_T[i]) *
4285 * scratch.temperature_fe_values.JxW(q);
4286 *
4287 * if (temperature_constraints.is_inhomogeneously_constrained(
4288 * data.local_dof_indices[i]))
4289 * {
4290 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
4291 * data.matrix_for_bc(j, i) +=
4292 * (scratch.phi_T[i] * scratch.phi_T[j] *
4293 * (use_bdf2_scheme ? ((2 * time_step + old_time_step) /
4294 * (time_step + old_time_step)) :
4295 * 1.) +
4296 * scratch.grad_phi_T[i] * scratch.grad_phi_T[j] *
4297 * EquationData::kappa * time_step) *
4298 * scratch.temperature_fe_values.JxW(q);
4299 * }
4300 * }
4301 * }
4302 * }
4303 *
4304 *
4305 * template <int dim>
4306 * void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_rhs(
4307 * const Assembly::CopyData::TemperatureRHS<dim> &data)
4308 * {
4309 * temperature_constraints.distribute_local_to_global(data.local_rhs,
4310 * data.local_dof_indices,
4311 * temperature_rhs,
4312 * data.matrix_for_bc);
4313 * }
4314 *
4315 *
4316 *
4317 * @endcode
4318 *
4319 * In the function that runs the WorkStream for actually calculating the
4320 * right hand side, we also generate the final matrix. As mentioned above,
4321 * it is a sum of the mass matrix and the Laplace matrix, times some time
4322 * step-dependent weight. This weight is specified by the BDF-2 time
4323 * integration scheme, see the introduction in @ref step_31 "step-31". What is new in this
4324 * tutorial program (in addition to the use of MPI parallelization and the
4325 * WorkStream class), is that we now precompute the temperature
4326 * preconditioner as well. The reason is that the setup of the Jacobi
4327 * preconditioner takes a noticeable time compared to the solver because we
4328 * usually only need between 10 and 20 iterations for solving the
4329 * temperature system (this might sound strange, as Jacobi really only
4330 * consists of a diagonal, but in Trilinos it is derived from more general
4331 * framework for point relaxation preconditioners which is a bit
4332 * inefficient). Hence, it is more efficient to precompute the
4333 * preconditioner, even though the matrix entries may slightly change
4334 * because the time step might change. This is not too big a problem because
4335 * we remesh every few time steps (and regenerate the preconditioner then).
4336 *
4337 * @code
4338 * template <int dim>
4339 * void BoussinesqFlowProblem<dim>::assemble_temperature_system(
4340 * const double maximal_velocity)
4341 * {
4342 * const bool use_bdf2_scheme = (timestep_number != 0);
4343 *
4344 * if (use_bdf2_scheme == true)
4345 * {
4346 * temperature_matrix.copy_from(temperature_mass_matrix);
4347 * temperature_matrix *=
4348 * (2 * time_step + old_time_step) / (time_step + old_time_step);
4349 * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4350 * }
4351 * else
4352 * {
4353 * temperature_matrix.copy_from(temperature_mass_matrix);
4354 * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4355 * }
4356 *
4357 * if (rebuild_temperature_preconditioner == true)
4358 * {
4359 * T_preconditioner =
4360 * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
4361 * T_preconditioner->initialize(temperature_matrix);
4362 * rebuild_temperature_preconditioner = false;
4363 * }
4364 *
4365 * @endcode
4366 *
4367 * The next part is computing the right hand side vectors. To do so, we
4368 * first compute the average temperature @f$T_m@f$ that we use for evaluating
4369 * the artificial viscosity stabilization through the residual @f$E(T) =
4370 * (T-T_m)^2@f$. We do this by defining the midpoint between maximum and
4371 * minimum temperature as average temperature in the definition of the
4372 * entropy viscosity. An alternative would be to use the integral average,
4373 * but the results are not very sensitive to this choice. The rest then
4374 * only requires calling WorkStream::run again, binding the arguments to
4375 * the <code>local_assemble_temperature_rhs</code> function that are the
4376 * same in every call to the correct values:
4377 *
4378 * @code
4379 * temperature_rhs = 0;
4380 *
4381 * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4382 * const std::pair<double, double> global_T_range =
4383 * get_extrapolated_temperature_range();
4384 *
4385 * const double average_temperature =
4386 * 0.5 * (global_T_range.first + global_T_range.second);
4387 * const double global_entropy_variation =
4388 * get_entropy_variation(average_temperature);
4389 *
4390 * using CellFilter =
4392 *
4393 * auto worker =
4394 * [this, global_T_range, maximal_velocity, global_entropy_variation](
4395 * const typename DoFHandler<dim>::active_cell_iterator &cell,
4396 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4397 * Assembly::CopyData::TemperatureRHS<dim> & data) {
4398 * this->local_assemble_temperature_rhs(global_T_range,
4399 * maximal_velocity,
4400 * global_entropy_variation,
4401 * cell,
4402 * scratch,
4403 * data);
4404 * };
4405 *
4406 * auto copier = [this](const Assembly::CopyData::TemperatureRHS<dim> &data) {
4407 * this->copy_local_to_global_temperature_rhs(data);
4408 * };
4409 *
4411 * temperature_dof_handler.begin_active()),
4412 * CellFilter(IteratorFilters::LocallyOwnedCell(),
4413 * temperature_dof_handler.end()),
4414 * worker,
4415 * copier,
4416 * Assembly::Scratch::TemperatureRHS<dim>(
4417 * temperature_fe, stokes_fe, mapping, quadrature_formula),
4418 * Assembly::CopyData::TemperatureRHS<dim>(temperature_fe));
4419 *
4420 * temperature_rhs.compress(VectorOperation::add);
4421 * }
4422 *
4423 *
4424 *
4425 * @endcode
4426 *
4427 *
4428 * <a name="BoussinesqFlowProblemsolve"></a>
4429 * <h4>BoussinesqFlowProblem::solve</h4>
4430 *
4431
4432 *
4433 * This function solves the linear systems in each time step of the
4434 * Boussinesq problem. First, we work on the Stokes system and then on the
4435 * temperature system. In essence, it does the same things as the respective
4436 * function in @ref step_31 "step-31". However, there are a few changes here.
4437 *
4438
4439 *
4440 * The first change is related to the way we store our solution: we keep the
4441 * vectors with locally owned degrees of freedom plus ghost nodes on each
4442 * MPI node. When we enter a solver which is supposed to perform
4443 * matrix-vector products with a distributed matrix, this is not the
4444 * appropriate form, though. There, we will want to have the solution vector
4445 * to be distributed in the same way as the matrix, i.e. without any
4446 * ghosts. So what we do first is to generate a distributed vector called
4447 * <code>distributed_stokes_solution</code> and put only the locally owned
4448 * dofs into that, which is neatly done by the <code>operator=</code> of the
4449 * Trilinos vector.
4450 *
4451
4452 *
4453 * Next, we scale the pressure solution (or rather, the initial guess) for
4454 * the solver so that it matches with the length scales in the matrices, as
4455 * discussed in the introduction. We also immediately scale the pressure
4456 * solution back to the correct units after the solution is completed. We
4457 * also need to set the pressure values at hanging nodes to zero. This we
4458 * also did in @ref step_31 "step-31" in order not to disturb the Schur complement by some
4459 * vector entries that actually are irrelevant during the solve stage. As a
4460 * difference to @ref step_31 "step-31", here we do it only for the locally owned pressure
4461 * dofs. After solving for the Stokes solution, each processor copies the
4462 * distributed solution back into the solution vector that also includes
4463 * ghost elements.
4464 *
4465
4466 *
4467 * The third and most obvious change is that we have two variants for the
4468 * Stokes solver: A fast solver that sometimes breaks down, and a robust
4469 * solver that is slower. This is what we already discussed in the
4470 * introduction. Here is how we realize it: First, we perform 30 iterations
4471 * with the fast solver based on the simple preconditioner based on the AMG
4472 * V-cycle instead of an approximate solve (this is indicated by the
4473 * <code>false</code> argument to the
4474 * <code>LinearSolvers::BlockSchurPreconditioner</code> object). If we
4475 * converge, everything is fine. If we do not converge, the solver control
4476 * object will throw an exception SolverControl::NoConvergence. Usually,
4477 * this would abort the program because we don't catch them in our usual
4478 * <code>solve()</code> functions. This is certainly not what we want to
4479 * happen here. Rather, we want to switch to the strong solver and continue
4480 * the solution process with whatever vector we got so far. Hence, we catch
4481 * the exception with the C++ try/catch mechanism. We then simply go through
4482 * the same solver sequence again in the <code>catch</code> clause, this
4483 * time passing the @p true flag to the preconditioner for the strong
4484 * solver, signaling an approximate CG solve.
4485 *
4486 * @code
4487 * template <int dim>
4488 * void BoussinesqFlowProblem<dim>::solve()
4489 * {
4490 * {
4491 * TimerOutput::Scope timer_section(computing_timer,
4492 * " Solve Stokes system");
4493 *
4494 * pcout << " Solving Stokes system... " << std::flush;
4495 *
4496 * TrilinosWrappers::MPI::BlockVector distributed_stokes_solution(
4497 * stokes_rhs);
4498 * distributed_stokes_solution = stokes_solution;
4499 *
4500 * distributed_stokes_solution.block(1) /= EquationData::pressure_scaling;
4501 *
4502 * const unsigned int
4503 * start = (distributed_stokes_solution.block(0).size() +
4504 * distributed_stokes_solution.block(1).local_range().first),
4505 * end = (distributed_stokes_solution.block(0).size() +
4506 * distributed_stokes_solution.block(1).local_range().second);
4507 * for (unsigned int i = start; i < end; ++i)
4508 * if (stokes_constraints.is_constrained(i))
4509 * distributed_stokes_solution(i) = 0;
4510 *
4511 *
4512 * PrimitiveVectorMemory<TrilinosWrappers::MPI::BlockVector> mem;
4513 *
4514 * unsigned int n_iterations = 0;
4515 * const double solver_tolerance = 1e-8 * stokes_rhs.l2_norm();
4516 * SolverControl solver_control(30, solver_tolerance);
4517 *
4518 * try
4519 * {
4520 * const LinearSolvers::BlockSchurPreconditioner<
4521 * TrilinosWrappers::PreconditionAMG,
4522 * TrilinosWrappers::PreconditionJacobi>
4523 * preconditioner(stokes_matrix,
4524 * stokes_preconditioner_matrix,
4525 * *Mp_preconditioner,
4526 * *Amg_preconditioner,
4527 * false);
4528 *
4529 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4530 * solver_control,
4531 * mem,
4532 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4533 * 30));
4534 * solver.solve(stokes_matrix,
4535 * distributed_stokes_solution,
4536 * stokes_rhs,
4537 * preconditioner);
4538 *
4539 * n_iterations = solver_control.last_step();
4540 * }
4541 *
4542 * catch (SolverControl::NoConvergence &)
4543 * {
4544 * const LinearSolvers::BlockSchurPreconditioner<
4545 * TrilinosWrappers::PreconditionAMG,
4546 * TrilinosWrappers::PreconditionJacobi>
4547 * preconditioner(stokes_matrix,
4548 * stokes_preconditioner_matrix,
4549 * *Mp_preconditioner,
4550 * *Amg_preconditioner,
4551 * true);
4552 *
4553 * SolverControl solver_control_refined(stokes_matrix.m(),
4554 * solver_tolerance);
4555 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4556 * solver_control_refined,
4557 * mem,
4558 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4559 * 50));
4560 * solver.solve(stokes_matrix,
4561 * distributed_stokes_solution,
4562 * stokes_rhs,
4563 * preconditioner);
4564 *
4565 * n_iterations =
4566 * (solver_control.last_step() + solver_control_refined.last_step());
4567 * }
4568 *
4569 *
4570 * stokes_constraints.distribute(distributed_stokes_solution);
4571 *
4572 * distributed_stokes_solution.block(1) *= EquationData::pressure_scaling;
4573 *
4574 * stokes_solution = distributed_stokes_solution;
4575 * pcout << n_iterations << " iterations." << std::endl;
4576 * }
4577 *
4578 *
4579 * @endcode
4580 *
4581 * Now let's turn to the temperature part: First, we compute the time step
4582 * size. We found that we need smaller time steps for 3D than for 2D for
4583 * the shell geometry. This is because the cells are more distorted in
4584 * that case (it is the smallest edge length that determines the CFL
4585 * number). Instead of computing the time step from maximum velocity and
4586 * minimal mesh size as in @ref step_31 "step-31", we compute local CFL numbers, i.e., on
4587 * each cell we compute the maximum velocity times the mesh size, and
4588 * compute the maximum of them. Hence, we need to choose the factor in
4589 * front of the time step slightly smaller.
4590 *
4591
4592 *
4593 * After temperature right hand side assembly, we solve the linear system
4594 * for temperature (with fully distributed vectors without any ghosts),
4595 * apply constraints and copy the vector back to one with ghosts.
4596 *
4597
4598 *
4599 * In the end, we extract the temperature range similarly to @ref step_31 "step-31" to
4600 * produce some output (for example in order to help us choose the
4601 * stabilization constants, as discussed in the introduction). The only
4602 * difference is that we need to exchange maxima over all processors.
4603 *
4604 * @code
4605 * {
4606 * TimerOutput::Scope timer_section(computing_timer,
4607 * " Assemble temperature rhs");
4608 *
4609 * old_time_step = time_step;
4610 *
4611 * const double scaling = (dim == 3 ? 0.25 : 1.0);
4612 * time_step = (scaling / (2.1 * dim * std::sqrt(1. * dim)) /
4613 * (parameters.temperature_degree * get_cfl_number()));
4614 *
4615 * const double maximal_velocity = get_maximal_velocity();
4616 * pcout << " Maximal velocity: "
4617 * << maximal_velocity * EquationData::year_in_seconds * 100
4618 * << " cm/year" << std::endl;
4619 * pcout << " "
4620 * << "Time step: " << time_step / EquationData::year_in_seconds
4621 * << " years" << std::endl;
4622 *
4623 * temperature_solution = old_temperature_solution;
4624 * assemble_temperature_system(maximal_velocity);
4625 * }
4626 *
4627 * {
4628 * TimerOutput::Scope timer_section(computing_timer,
4629 * " Solve temperature system");
4630 *
4631 * SolverControl solver_control(temperature_matrix.m(),
4632 * 1e-12 * temperature_rhs.l2_norm());
4633 * SolverCG<TrilinosWrappers::MPI::Vector> cg(solver_control);
4634 *
4635 * TrilinosWrappers::MPI::Vector distributed_temperature_solution(
4636 * temperature_rhs);
4637 * distributed_temperature_solution = temperature_solution;
4638 *
4639 * cg.solve(temperature_matrix,
4640 * distributed_temperature_solution,
4641 * temperature_rhs,
4642 * *T_preconditioner);
4643 *
4644 * temperature_constraints.distribute(distributed_temperature_solution);
4645 * temperature_solution = distributed_temperature_solution;
4646 *
4647 * pcout << " " << solver_control.last_step()
4648 * << " CG iterations for temperature" << std::endl;
4649 *
4650 * double temperature[2] = {std::numeric_limits<double>::max(),
4652 * double global_temperature[2];
4653 *
4654 * for (unsigned int i =
4655 * distributed_temperature_solution.local_range().first;
4656 * i < distributed_temperature_solution.local_range().second;
4657 * ++i)
4658 * {
4659 * temperature[0] =
4660 * std::min<double>(temperature[0],
4661 * distributed_temperature_solution(i));
4662 * temperature[1] =
4663 * std::max<double>(temperature[1],
4664 * distributed_temperature_solution(i));
4665 * }
4666 *
4667 * temperature[0] *= -1.0;
4668 * Utilities::MPI::max(temperature, MPI_COMM_WORLD, global_temperature);
4669 * global_temperature[0] *= -1.0;
4670 *
4671 * pcout << " Temperature range: " << global_temperature[0] << ' '
4672 * << global_temperature[1] << std::endl;
4673 * }
4674 * }
4675 *
4676 *
4677 * @endcode
4678 *
4679 *
4680 * <a name="BoussinesqFlowProblemoutput_results"></a>
4681 * <h4>BoussinesqFlowProblem::output_results</h4>
4682 *
4683
4684 *
4685 * Next comes the function that generates the output. The quantities to
4686 * output could be introduced manually like we did in @ref step_31 "step-31". An
4687 * alternative is to hand this task over to a class PostProcessor that
4688 * inherits from the class DataPostprocessor, which can be attached to
4689 * DataOut. This allows us to output derived quantities from the solution,
4690 * like the friction heating included in this example. It overloads the
4691 * virtual function DataPostprocessor::evaluate_vector_field(),
4692 * which is then internally called from DataOut::build_patches(). We have to
4693 * give it values of the numerical solution, its derivatives, normals to the
4694 * cell, the actual evaluation points and any additional quantities. This
4695 * follows the same procedure as discussed in @ref step_29 "step-29" and other programs.
4696 *
4697 * @code
4698 * template <int dim>
4699 * class BoussinesqFlowProblem<dim>::Postprocessor
4700 * : public DataPostprocessor<dim>
4701 * {
4702 * public:
4703 * Postprocessor(const unsigned int partition, const double minimal_pressure);
4704 *
4705 * virtual void evaluate_vector_field(
4707 * std::vector<Vector<double>> &computed_quantities) const override;
4708 *
4709 * virtual std::vector<std::string> get_names() const override;
4710 *
4711 * virtual std::vector<
4713 * get_data_component_interpretation() const override;
4714 *
4715 * virtual UpdateFlags get_needed_update_flags() const override;
4716 *
4717 * private:
4718 * const unsigned int partition;
4719 * const double minimal_pressure;
4720 * };
4721 *
4722 *
4723 * template <int dim>
4724 * BoussinesqFlowProblem<dim>::Postprocessor::Postprocessor(
4725 * const unsigned int partition,
4726 * const double minimal_pressure)
4727 * : partition(partition)
4728 * , minimal_pressure(minimal_pressure)
4729 * {}
4730 *
4731 *
4732 * @endcode
4733 *
4734 * Here we define the names for the variables we want to output. These are
4735 * the actual solution values for velocity, pressure, and temperature, as
4736 * well as the friction heating and to each cell the number of the processor
4737 * that owns it. This allows us to visualize the partitioning of the domain
4738 * among the processors. Except for the velocity, which is vector-valued,
4739 * all other quantities are scalar.
4740 *
4741 * @code
4742 * template <int dim>
4743 * std::vector<std::string>
4744 * BoussinesqFlowProblem<dim>::Postprocessor::get_names() const
4745 * {
4746 * std::vector<std::string> solution_names(dim, "velocity");
4747 * solution_names.emplace_back("p");
4748 * solution_names.emplace_back("T");
4749 * solution_names.emplace_back("friction_heating");
4750 * solution_names.emplace_back("partition");
4751 *
4752 * return solution_names;
4753 * }
4754 *
4755 *
4756 * template <int dim>
4757 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4758 * BoussinesqFlowProblem<dim>::Postprocessor::get_data_component_interpretation()
4759 * const
4760 * {
4761 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4762 * interpretation(dim,
4764 *
4765 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4766 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4767 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4768 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4769 *
4770 * return interpretation;
4771 * }
4772 *
4773 *
4774 * template <int dim>
4775 * UpdateFlags
4776 * BoussinesqFlowProblem<dim>::Postprocessor::get_needed_update_flags() const
4777 * {
4779 * }
4780 *
4781 *
4782 * @endcode
4783 *
4784 * Now we implement the function that computes the derived quantities. As we
4785 * also did for the output, we rescale the velocity from its SI units to
4786 * something more readable, namely cm/year. Next, the pressure is scaled to
4787 * be between 0 and the maximum pressure. This makes it more easily
4788 * comparable -- in essence making all pressure variables positive or
4789 * zero. Temperature is taken as is, and the friction heating is computed as
4790 * @f$2 \eta \varepsilon(\mathbf{u}) \cdot \varepsilon(\mathbf{u})@f$.
4791 *
4792
4793 *
4794 * The quantities we output here are more for illustration, rather than for
4795 * actual scientific value. We come back to this briefly in the results
4796 * section of this program and explain what one may in fact be interested in.
4797 *
4798 * @code
4799 * template <int dim>
4800 * void BoussinesqFlowProblem<dim>::Postprocessor::evaluate_vector_field(
4802 * std::vector<Vector<double>> & computed_quantities) const
4803 * {
4804 * const unsigned int n_quadrature_points = inputs.solution_values.size();
4805 * Assert(inputs.solution_gradients.size() == n_quadrature_points,
4806 * ExcInternalError());
4807 * Assert(computed_quantities.size() == n_quadrature_points,
4808 * ExcInternalError());
4809 * Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError());
4810 *
4811 * for (unsigned int q = 0; q < n_quadrature_points; ++q)
4812 * {
4813 * for (unsigned int d = 0; d < dim; ++d)
4814 * computed_quantities[q](d) = (inputs.solution_values[q](d) *
4815 * EquationData::year_in_seconds * 100);
4816 *
4817 * const double pressure =
4818 * (inputs.solution_values[q](dim) - minimal_pressure);
4819 * computed_quantities[q](dim) = pressure;
4820 *
4821 * const double temperature = inputs.solution_values[q](dim + 1);
4822 * computed_quantities[q](dim + 1) = temperature;
4823 *
4824 * Tensor<2, dim> grad_u;
4825 * for (unsigned int d = 0; d < dim; ++d)
4826 * grad_u[d] = inputs.solution_gradients[q][d];
4827 * const SymmetricTensor<2, dim> strain_rate = symmetrize(grad_u);
4828 * computed_quantities[q](dim + 2) =
4829 * 2 * EquationData::eta * strain_rate * strain_rate;
4830 *
4831 * computed_quantities[q](dim + 3) = partition;
4832 * }
4833 * }
4834 *
4835 *
4836 * @endcode
4837 *
4838 * The <code>output_results()</code> function has a similar task to the one
4839 * in @ref step_31 "step-31". However, here we are going to demonstrate a different
4840 * technique on how to merge output from different DoFHandler objects. The
4841 * way we're going to achieve this recombination is to create a joint
4842 * DoFHandler that collects both components, the Stokes solution and the
4843 * temperature solution. This can be nicely done by combining the finite
4844 * elements from the two systems to form one FESystem, and let this
4845 * collective system define a new DoFHandler object. To be sure that
4846 * everything was done correctly, we perform a sanity check that ensures
4847 * that we got all the dofs from both Stokes and temperature even in the
4848 * combined system. We then combine the data vectors. Unfortunately, there
4849 * is no straight-forward relation that tells us how to sort Stokes and
4850 * temperature vector into the joint vector. The way we can get around this
4851 * trouble is to rely on the information collected in the FESystem. For each
4852 * dof on a cell, the joint finite element knows to which equation component
4853 * (velocity component, pressure, or temperature) it belongs – that's the
4854 * information we need! So we step through all cells (with iterators into
4855 * all three DoFHandlers moving in sync), and for each joint cell dof, we
4856 * read out that component using the FiniteElement::system_to_base_index
4857 * function (see there for a description of what the various parts of its
4858 * return value contain). We also need to keep track whether we're on a
4859 * Stokes dof or a temperature dof, which is contained in
4860 * joint_fe.system_to_base_index(i).first.first. Eventually, the dof_indices
4861 * data structures on either of the three systems tell us how the relation
4862 * between global vector and local dofs looks like on the present cell,
4863 * which concludes this tedious work. We make sure that each processor only
4864 * works on the subdomain it owns locally (and not on ghost or artificial
4865 * cells) when building the joint solution vector. The same will then have
4866 * to be done in DataOut::build_patches(), but that function does so
4867 * automatically.
4868 *
4869
4870 *
4871 * What we end up with is a set of patches that we can write using the
4872 * functions in DataOutBase in a variety of output formats. Here, we then
4873 * have to pay attention that what each processor writes is really only its
4874 * own part of the domain, i.e. we will want to write each processor's
4875 * contribution into a separate file. This we do by adding an additional
4876 * number to the filename when we write the solution. This is not really
4877 * new, we did it similarly in @ref step_40 "step-40". Note that we write in the compressed
4878 * format @p .vtu instead of plain vtk files, which saves quite some
4879 * storage.
4880 *
4881
4882 *
4883 * All the rest of the work is done in the PostProcessor class.
4884 *
4885 * @code
4886 * template <int dim>
4887 * void BoussinesqFlowProblem<dim>::output_results()
4888 * {
4889 * TimerOutput::Scope timer_section(computing_timer, "Postprocessing");
4890 *
4891 * const FESystem<dim> joint_fe(stokes_fe, 1, temperature_fe, 1);
4892 *
4893 * DoFHandler<dim> joint_dof_handler(triangulation);
4894 * joint_dof_handler.distribute_dofs(joint_fe);
4895 * Assert(joint_dof_handler.n_dofs() ==
4896 * stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(),
4897 * ExcInternalError());
4898 *
4899 * TrilinosWrappers::MPI::Vector joint_solution;
4900 * joint_solution.reinit(joint_dof_handler.locally_owned_dofs(),
4901 * MPI_COMM_WORLD);
4902 *
4903 * {
4904 * std::vector<types::global_dof_index> local_joint_dof_indices(
4905 * joint_fe.n_dofs_per_cell());
4906 * std::vector<types::global_dof_index> local_stokes_dof_indices(
4907 * stokes_fe.n_dofs_per_cell());
4908 * std::vector<types::global_dof_index> local_temperature_dof_indices(
4909 * temperature_fe.n_dofs_per_cell());
4910 *
4912 * joint_cell = joint_dof_handler.begin_active(),
4913 * joint_endc = joint_dof_handler.end(),
4914 * stokes_cell = stokes_dof_handler.begin_active(),
4915 * temperature_cell = temperature_dof_handler.begin_active();
4916 * for (; joint_cell != joint_endc;
4917 * ++joint_cell, ++stokes_cell, ++temperature_cell)
4918 * if (joint_cell->is_locally_owned())
4919 * {
4920 * joint_cell->get_dof_indices(local_joint_dof_indices);
4921 * stokes_cell->get_dof_indices(local_stokes_dof_indices);
4922 * temperature_cell->get_dof_indices(local_temperature_dof_indices);
4923 *
4924 * for (unsigned int i = 0; i < joint_fe.n_dofs_per_cell(); ++i)
4925 * if (joint_fe.system_to_base_index(i).first.first == 0)
4926 * {
4927 * Assert(joint_fe.system_to_base_index(i).second <
4928 * local_stokes_dof_indices.size(),
4929 * ExcInternalError());
4930 *
4931 * joint_solution(local_joint_dof_indices[i]) = stokes_solution(
4932 * local_stokes_dof_indices[joint_fe.system_to_base_index(i)
4933 * .second]);
4934 * }
4935 * else
4936 * {
4937 * Assert(joint_fe.system_to_base_index(i).first.first == 1,
4938 * ExcInternalError());
4939 * Assert(joint_fe.system_to_base_index(i).second <
4940 * local_temperature_dof_indices.size(),
4941 * ExcInternalError());
4942 * joint_solution(local_joint_dof_indices[i]) =
4943 * temperature_solution(
4944 * local_temperature_dof_indices
4945 * [joint_fe.system_to_base_index(i).second]);
4946 * }
4947 * }
4948 * }
4949 *
4950 * joint_solution.compress(VectorOperation::insert);
4951 *
4952 * IndexSet locally_relevant_joint_dofs(joint_dof_handler.n_dofs());
4953 * DoFTools::extract_locally_relevant_dofs(joint_dof_handler,
4954 * locally_relevant_joint_dofs);
4955 * TrilinosWrappers::MPI::Vector locally_relevant_joint_solution;
4956 * locally_relevant_joint_solution.reinit(locally_relevant_joint_dofs,
4957 * MPI_COMM_WORLD);
4958 * locally_relevant_joint_solution = joint_solution;
4959 *
4960 * Postprocessor postprocessor(Utilities::MPI::this_mpi_process(
4961 * MPI_COMM_WORLD),
4962 * stokes_solution.block(1).min());
4963 *
4964 * DataOut<dim> data_out;
4965 * data_out.attach_dof_handler(joint_dof_handler);
4966 * data_out.add_data_vector(locally_relevant_joint_solution, postprocessor);
4967 * data_out.build_patches();
4968 *
4969 * static int out_index = 0;
4970 * data_out.write_vtu_with_pvtu_record(
4971 * "./", "solution", out_index, MPI_COMM_WORLD, 5);
4972 *
4973 * out_index++;
4974 * }
4975 *
4976 *
4977 *
4978 * @endcode
4979 *
4980 *
4981 * <a name="BoussinesqFlowProblemrefine_mesh"></a>
4982 * <h4>BoussinesqFlowProblem::refine_mesh</h4>
4983 *
4984
4985 *
4986 * This function isn't really new either. Since the <code>setup_dofs</code>
4987 * function that we call in the middle has its own timer section, we split
4988 * timing this function into two sections. It will also allow us to easily
4989 * identify which of the two is more expensive.
4990 *
4991
4992 *
4993 * One thing of note, however, is that we only want to compute error
4994 * indicators on the locally owned subdomain. In order to achieve this, we
4995 * pass one additional argument to the KellyErrorEstimator::estimate
4996 * function. Note that the vector for error estimates is resized to the
4997 * number of active cells present on the current process, which is less than
4998 * the total number of active cells on all processors (but more than the
4999 * number of locally owned active cells); each processor only has a few
5000 * coarse cells around the locally owned ones, as also explained in @ref step_40 "step-40".
5001 *
5002
5003 *
5004 * The local error estimates are then handed to a %parallel version of
5005 * GridRefinement (in namespace parallel::distributed::GridRefinement, see
5006 * also @ref step_40 "step-40") which looks at the errors and finds the cells that need
5007 * refinement by comparing the error values across processors. As in
5008 * @ref step_31 "step-31", we want to limit the maximum grid level. So in case some cells
5009 * have been marked that are already at the finest level, we simply clear
5010 * the refine flags.
5011 *
5012 * @code
5013 * template <int dim>
5014 * void
5015 * BoussinesqFlowProblem<dim>::refine_mesh(const unsigned int max_grid_level)
5016 * {
5017 * parallel::distributed::SolutionTransfer<dim, TrilinosWrappers::MPI::Vector>
5018 * temperature_trans(temperature_dof_handler);
5019 * parallel::distributed::SolutionTransfer<dim,
5020 * TrilinosWrappers::MPI::BlockVector>
5021 * stokes_trans(stokes_dof_handler);
5022 *
5023 * {
5024 * TimerOutput::Scope timer_section(computing_timer,
5025 * "Refine mesh structure, part 1");
5026 *
5027 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
5028 *
5029 * KellyErrorEstimator<dim>::estimate(
5030 * temperature_dof_handler,
5031 * QGauss<dim - 1>(parameters.temperature_degree + 1),
5032 * std::map<types::boundary_id, const Function<dim> *>(),
5033 * temperature_solution,
5034 * estimated_error_per_cell,
5035 * ComponentMask(),
5036 * nullptr,
5037 * 0,
5038 * triangulation.locally_owned_subdomain());
5039 *
5040 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction(
5041 * triangulation, estimated_error_per_cell, 0.3, 0.1);
5042 *
5043 * if (triangulation.n_levels() > max_grid_level)
5044 * for (typename Triangulation<dim>::active_cell_iterator cell =
5045 * triangulation.begin_active(max_grid_level);
5046 * cell != triangulation.end();
5047 * ++cell)
5048 * cell->clear_refine_flag();
5049 *
5050 * @endcode
5051 *
5052 * With all flags marked as necessary, we can then tell the
5053 * parallel::distributed::SolutionTransfer objects to get ready to
5054 * transfer data from one mesh to the next, which they will do when
5055 * notified by
5056 * Triangulation as part of the @p execute_coarsening_and_refinement() call.
5057 * The syntax is similar to the non-%parallel solution transfer (with the
5058 * exception that here a pointer to the vector entries is enough). The
5059 * remainder of the function further down below is then concerned with
5060 * setting up the data structures again after mesh refinement and
5061 * restoring the solution vectors on the new mesh.
5062 *
5063 * @code
5064 * std::vector<const TrilinosWrappers::MPI::Vector *> x_temperature(2);
5065 * x_temperature[0] = &temperature_solution;
5066 * x_temperature[1] = &old_temperature_solution;
5067 * std::vector<const TrilinosWrappers::MPI::BlockVector *> x_stokes(2);
5068 * x_stokes[0] = &stokes_solution;
5069 * x_stokes[1] = &old_stokes_solution;
5070 *
5071 * triangulation.prepare_coarsening_and_refinement();
5072 *
5073 * temperature_trans.prepare_for_coarsening_and_refinement(x_temperature);
5074 * stokes_trans.prepare_for_coarsening_and_refinement(x_stokes);
5075 *
5076 * triangulation.execute_coarsening_and_refinement();
5077 * }
5078 *
5079 * setup_dofs();
5080 *
5081 * {
5082 * TimerOutput::Scope timer_section(computing_timer,
5083 * "Refine mesh structure, part 2");
5084 *
5085 * {
5086 * TrilinosWrappers::MPI::Vector distributed_temp1(temperature_rhs);
5087 * TrilinosWrappers::MPI::Vector distributed_temp2(temperature_rhs);
5088 *
5089 * std::vector<TrilinosWrappers::MPI::Vector *> tmp(2);
5090 * tmp[0] = &(distributed_temp1);
5091 * tmp[1] = &(distributed_temp2);
5092 * temperature_trans.interpolate(tmp);
5093 *
5094 * @endcode
5095 *
5096 * enforce constraints to make the interpolated solution conforming on
5097 * the new mesh:
5098 *
5099 * @code
5100 * temperature_constraints.distribute(distributed_temp1);
5101 * temperature_constraints.distribute(distributed_temp2);
5102 *
5103 * temperature_solution = distributed_temp1;
5104 * old_temperature_solution = distributed_temp2;
5105 * }
5106 *
5107 * {
5108 * TrilinosWrappers::MPI::BlockVector distributed_stokes(stokes_rhs);
5109 * TrilinosWrappers::MPI::BlockVector old_distributed_stokes(stokes_rhs);
5110 *
5111 * std::vector<TrilinosWrappers::MPI::BlockVector *> stokes_tmp(2);
5112 * stokes_tmp[0] = &(distributed_stokes);
5113 * stokes_tmp[1] = &(old_distributed_stokes);
5114 *
5115 * stokes_trans.interpolate(stokes_tmp);
5116 *
5117 * @endcode
5118 *
5119 * enforce constraints to make the interpolated solution conforming on
5120 * the new mesh:
5121 *
5122 * @code
5123 * stokes_constraints.distribute(distributed_stokes);
5124 * stokes_constraints.distribute(old_distributed_stokes);
5125 *
5126 * stokes_solution = distributed_stokes;
5127 * old_stokes_solution = old_distributed_stokes;
5128 * }
5129 * }
5130 * }
5131 *
5132 *
5133 *
5134 * @endcode
5135 *
5136 *
5137 * <a name="BoussinesqFlowProblemrun"></a>
5138 * <h4>BoussinesqFlowProblem::run</h4>
5139 *
5140
5141 *
5142 * This is the final and controlling function in this class. It, in fact,
5143 * runs the entire rest of the program and is, once more, very similar to
5144 * @ref step_31 "step-31". The only substantial difference is that we use a different mesh
5145 * now (a GridGenerator::hyper_shell instead of a simple cube geometry).
5146 *
5147 * @code
5148 * template <int dim>
5149 * void BoussinesqFlowProblem<dim>::run()
5150 * {
5151 * GridGenerator::hyper_shell(triangulation,
5152 * Point<dim>(),
5153 * EquationData::R0,
5154 * EquationData::R1,
5155 * (dim == 3) ? 96 : 12,
5156 * true);
5157 *
5158 * global_Omega_diameter = GridTools::diameter(triangulation);
5159 *
5160 * triangulation.refine_global(parameters.initial_global_refinement);
5161 *
5162 * setup_dofs();
5163 *
5164 * unsigned int pre_refinement_step = 0;
5165 *
5166 * start_time_iteration:
5167 *
5168 * {
5169 * TrilinosWrappers::MPI::Vector solution(
5170 * temperature_dof_handler.locally_owned_dofs());
5171 * @endcode
5172 *
5173 * VectorTools::project supports parallel vector classes with most
5174 * standard finite elements via deal.II's own native MatrixFree framework:
5175 * since we use standard Lagrange elements of moderate order this function
5176 * works well here.
5177 *
5178 * @code
5179 * VectorTools::project(temperature_dof_handler,
5180 * temperature_constraints,
5181 * QGauss<dim>(parameters.temperature_degree + 2),
5182 * EquationData::TemperatureInitialValues<dim>(),
5183 * solution);
5184 * @endcode
5185 *
5186 * Having so computed the current temperature field, let us set the member
5187 * variable that holds the temperature nodes. Strictly speaking, we really
5188 * only need to set <code>old_temperature_solution</code> since the first
5189 * thing we will do is to compute the Stokes solution that only requires
5190 * the previous time step's temperature field. That said, nothing good can
5191 * come from not initializing the other vectors as well (especially since
5192 * it's a relatively cheap operation and we only have to do it once at the
5193 * beginning of the program) if we ever want to extend our numerical
5194 * method or physical model, and so we initialize
5195 * <code>old_temperature_solution</code> and
5196 * <code>old_old_temperature_solution</code> as well. The assignment makes
5197 * sure that the vectors on the left hand side (which where initialized to
5198 * contain ghost elements as well) also get the correct ghost elements. In
5199 * other words, the assignment here requires communication between
5200 * processors:
5201 *
5202 * @code
5203 * temperature_solution = solution;
5204 * old_temperature_solution = solution;
5205 * old_old_temperature_solution = solution;
5206 * }
5207 *
5208 * timestep_number = 0;
5209 * time_step = old_time_step = 0;
5210 *
5211 * double time = 0;
5212 *
5213 * do
5214 * {
5215 * pcout << "Timestep " << timestep_number
5216 * << ": t=" << time / EquationData::year_in_seconds << " years"
5217 * << std::endl;
5218 *
5219 * assemble_stokes_system();
5220 * build_stokes_preconditioner();
5221 * assemble_temperature_matrix();
5222 *
5223 * solve();
5224 *
5225 * pcout << std::endl;
5226 *
5227 * if ((timestep_number == 0) &&
5228 * (pre_refinement_step < parameters.initial_adaptive_refinement))
5229 * {
5230 * refine_mesh(parameters.initial_global_refinement +
5231 * parameters.initial_adaptive_refinement);
5232 * ++pre_refinement_step;
5233 * goto start_time_iteration;
5234 * }
5235 * else if ((timestep_number > 0) &&
5236 * (timestep_number % parameters.adaptive_refinement_interval ==
5237 * 0))
5238 * refine_mesh(parameters.initial_global_refinement +
5239 * parameters.initial_adaptive_refinement);
5240 *
5241 * if ((parameters.generate_graphical_output == true) &&
5242 * (timestep_number % parameters.graphical_output_interval == 0))
5243 * output_results();
5244 *
5245 * @endcode
5246 *
5247 * In order to speed up linear solvers, we extrapolate the solutions
5248 * from the old time levels to the new one. This gives a very good
5249 * initial guess, cutting the number of iterations needed in solvers
5250 * by more than one half. We do not need to extrapolate in the last
5251 * iteration, so if we reached the final time, we stop here.
5252 *
5253
5254 *
5255 * As the last thing during a time step (before actually bumping up
5256 * the number of the time step), we check whether the current time
5257 * step number is divisible by 100, and if so we let the computing
5258 * timer print a summary of CPU times spent so far.
5259 *
5260 * @code
5261 * if (time > parameters.end_time * EquationData::year_in_seconds)
5262 * break;
5263 *
5264 * TrilinosWrappers::MPI::BlockVector old_old_stokes_solution;
5265 * old_old_stokes_solution = old_stokes_solution;
5266 * old_stokes_solution = stokes_solution;
5267 * old_old_temperature_solution = old_temperature_solution;
5268 * old_temperature_solution = temperature_solution;
5269 * if (old_time_step > 0)
5270 * {
5271 * @endcode
5272 *
5273 * Trilinos sadd does not like ghost vectors even as input. Copy
5274 * into distributed vectors for now:
5275 *
5276 * @code
5277 * {
5278 * TrilinosWrappers::MPI::BlockVector distr_solution(stokes_rhs);
5279 * distr_solution = stokes_solution;
5280 * TrilinosWrappers::MPI::BlockVector distr_old_solution(stokes_rhs);
5281 * distr_old_solution = old_old_stokes_solution;
5282 * distr_solution.sadd(1. + time_step / old_time_step,
5283 * -time_step / old_time_step,
5284 * distr_old_solution);
5285 * stokes_solution = distr_solution;
5286 * }
5287 * {
5288 * TrilinosWrappers::MPI::Vector distr_solution(temperature_rhs);
5289 * distr_solution = temperature_solution;
5290 * TrilinosWrappers::MPI::Vector distr_old_solution(temperature_rhs);
5291 * distr_old_solution = old_old_temperature_solution;
5292 * distr_solution.sadd(1. + time_step / old_time_step,
5293 * -time_step / old_time_step,
5294 * distr_old_solution);
5295 * temperature_solution = distr_solution;
5296 * }
5297 * }
5298 *
5299 * if ((timestep_number > 0) && (timestep_number % 100 == 0))
5300 * computing_timer.print_summary();
5301 *
5302 * time += time_step;
5303 * ++timestep_number;
5304 * }
5305 * while (true);
5306 *
5307 * @endcode
5308 *
5309 * If we are generating graphical output, do so also for the last time
5310 * step unless we had just done so before we left the do-while loop
5311 *
5312 * @code
5313 * if ((parameters.generate_graphical_output == true) &&
5314 * !((timestep_number - 1) % parameters.graphical_output_interval == 0))
5315 * output_results();
5316 * }
5317 * } // namespace Step32
5318 *
5319 *
5320 *
5321 * @endcode
5322 *
5323 *
5324 * <a name="Thecodemaincodefunction"></a>
5325 * <h3>The <code>main</code> function</h3>
5326 *
5327
5328 *
5329 * The main function is short as usual and very similar to the one in
5330 * @ref step_31 "step-31". Since we use a parameter file which is specified as an argument in
5331 * the command line, we have to read it in here and pass it on to the
5332 * Parameters class for parsing. If no filename is given in the command line,
5333 * we simply use the <code>step-32.prm</code> file which is distributed
5334 * together with the program.
5335 *
5336
5337 *
5338 * Because 3d computations are simply very slow unless you throw a lot of
5339 * processors at them, the program defaults to 2d. You can get the 3d version
5340 * by changing the constant dimension below to 3.
5341 *
5342 * @code
5343 * int main(int argc, char *argv[])
5344 * {
5345 * try
5346 * {
5347 * using namespace Step32;
5348 * using namespace dealii;
5349 *
5350 * Utilities::MPI::MPI_InitFinalize mpi_initialization(
5351 * argc, argv, numbers::invalid_unsigned_int);
5352 *
5353 * std::string parameter_filename;
5354 * if (argc >= 2)
5355 * parameter_filename = argv[1];
5356 * else
5357 * parameter_filename = "step-32.prm";
5358 *
5359 * const int dim = 2;
5360 * BoussinesqFlowProblem<dim>::Parameters parameters(parameter_filename);
5361 * BoussinesqFlowProblem<dim> flow_problem(parameters);
5362 * flow_problem.run();
5363 * }
5364 * catch (std::exception &exc)
5365 * {
5366 * std::cerr << std::endl
5367 * << std::endl
5368 * << "----------------------------------------------------"
5369 * << std::endl;
5370 * std::cerr << "Exception on processing: " << std::endl
5371 * << exc.what() << std::endl
5372 * << "Aborting!" << std::endl
5373 * << "----------------------------------------------------"
5374 * << std::endl;
5375 *
5376 * return 1;
5377 * }
5378 * catch (...)
5379 * {
5380 * std::cerr << std::endl
5381 * << std::endl
5382 * << "----------------------------------------------------"
5383 * << std::endl;
5384 * std::cerr << "Unknown exception!" << std::endl
5385 * << "Aborting!" << std::endl
5386 * << "----------------------------------------------------"
5387 * << std::endl;
5388 * return 1;
5389 * }
5390 *
5391 * return 0;
5392 * }
5393 * @endcode
5394<a name="Results"></a><h1>Results</h1>
5395
5396
5397When run, the program simulates convection in 3d in much the same way
5398as @ref step_31 "step-31" did, though with an entirely different testcase.
5399
5400
5401<a name="Comparisonofresultswithstep31"></a><h3>Comparison of results with step-31</h3>
5402
5403
5404Before we go to this testcase, however, let us show a few results from a
5405slightly earlier version of this program that was solving exactly the
5406testcase we used in @ref step_31 "step-31", just that we now solve it in parallel and with
5407much higher resolution. We show these results mainly for comparison.
5408
5409Here are two images that show this higher resolution if we choose a 3d
5410computation in <code>main()</code> and if we set
5411<code>initial_refinement=3</code> and
5412<code>n_pre_refinement_steps=4</code>. At the time steps shown, the
5413meshes had around 72,000 and 236,000 cells, for a total of 2,680,000
5414and 8,250,000 degrees of freedom, respectively, more than an order of
5415magnitude more than we had available in @ref step_31 "step-31":
5416
5417<table align="center" class="doxtable">
5418 <tr>
5419 <td>
5420 <img src="https://www.dealii.org/images/steps/developer/step-32.3d.cube.0.png" alt="">
5421 </td>
5422 </tr>
5423 <tr>
5424 <td>
5425 <img src="https://www.dealii.org/images/steps/developer/step-32.3d.cube.1.png" alt="">
5426 </td>
5427 </tr>
5428</table>
5429
5430The computation was done on a subset of 50 processors of the Brazos
5431cluster at Texas A&amp;M University.
5432
5433
5434<a name="Resultsfora2dcircularshelltestcase"></a><h3>Results for a 2d circular shell testcase</h3>
5435
5436
5437Next, we will run @ref step_32 "step-32" with the parameter file in the directory with one
5438change: we increase the final time to 1e9. Here we are using 16 processors. The
5439command to launch is (note that @ref step_32 "step-32".prm is the default):
5440
5441<code>
5442<pre>
5443$ mpirun -np 16 ./step-32
5444</pre>
5445</code>
5446
5447Note that running a job on a cluster typically requires going through a job
5448scheduler, which we won't discuss here. The output will look roughly like
5449this:
5450
5451<code>
5452<pre>
5453$ mpirun -np 16 ./step-32
5454Number of active cells: 12,288 (on 6 levels)
5455Number of degrees of freedom: 186,624 (99,840+36,864+49,920)
5456
5457Timestep 0: t=0 years
5458
5459 Rebuilding Stokes preconditioner...
5460 Solving Stokes system... 41 iterations.
5461 Maximal velocity: 60.4935 cm/year
5462 Time step: 18166.9 years
5463 17 CG iterations for temperature
5464 Temperature range: 973 4273.16
5465
5466Number of active cells: 15,921 (on 7 levels)
5467Number of degrees of freedom: 252,723 (136,640+47,763+68,320)
5468
5469Timestep 0: t=0 years
5470
5471 Rebuilding Stokes preconditioner...
5472 Solving Stokes system... 50 iterations.
5473 Maximal velocity: 60.3223 cm/year
5474 Time step: 10557.6 years
5475 19 CG iterations for temperature
5476 Temperature range: 973 4273.16
5477
5478Number of active cells: 19,926 (on 8 levels)
5479Number of degrees of freedom: 321,246 (174,312+59,778+87,156)
5480
5481Timestep 0: t=0 years
5482
5483 Rebuilding Stokes preconditioner...
5484 Solving Stokes system... 50 iterations.
5485 Maximal velocity: 57.8396 cm/year
5486 Time step: 5453.78 years
5487 18 CG iterations for temperature
5488 Temperature range: 973 4273.16
5489
5490Timestep 1: t=5453.78 years
5491
5492 Solving Stokes system... 49 iterations.
5493 Maximal velocity: 59.0231 cm/year
5494 Time step: 5345.86 years
5495 18 CG iterations for temperature
5496 Temperature range: 973 4273.16
5497
5498Timestep 2: t=10799.6 years
5499
5500 Solving Stokes system... 24 iterations.
5501 Maximal velocity: 60.2139 cm/year
5502 Time step: 5241.51 years
5503 17 CG iterations for temperature
5504 Temperature range: 973 4273.16
5505
5506[...]
5507
5508Timestep 100: t=272151 years
5509
5510 Solving Stokes system... 21 iterations.
5511 Maximal velocity: 161.546 cm/year
5512 Time step: 1672.96 years
5513 17 CG iterations for temperature
5514 Temperature range: 973 4282.57
5515
5516Number of active cells: 56,085 (on 8 levels)
5517Number of degrees of freedom: 903,408 (490,102+168,255+245,051)
5518
5519
5520
5521+---------------------------------------------+------------+------------+
5522| Total wallclock time elapsed since start | 115s | |
5523| | | |
5524| Section | no. calls | wall time | % of total |
5525+---------------------------------+-----------+------------+------------+
5526| Assemble Stokes system | 103 | 2.82s | 2.5% |
5527| Assemble temperature matrices | 12 | 0.452s | 0.39% |
5528| Assemble temperature rhs | 103 | 11.5s | 10% |
5529| Build Stokes preconditioner | 12 | 2.09s | 1.8% |
5530| Solve Stokes system | 103 | 90.4s | 79% |
5531| Solve temperature system | 103 | 1.53s | 1.3% |
5532| Postprocessing | 3 | 0.532s | 0.46% |
5533| Refine mesh structure, part 1 | 12 | 0.93s | 0.81% |
5534| Refine mesh structure, part 2 | 12 | 0.384s | 0.33% |
5535| Setup dof systems | 13 | 2.96s | 2.6% |
5536+---------------------------------+-----------+------------+------------+
5537
5538[...]
5539
5540+---------------------------------------------+------------+------------+
5541| Total wallclock time elapsed since start | 9.14e+04s | |
5542| | | |
5543| Section | no. calls | wall time | % of total |
5544+---------------------------------+-----------+------------+------------+
5545| Assemble Stokes system | 47045 | 2.05e+03s | 2.2% |
5546| Assemble temperature matrices | 4707 | 310s | 0.34% |
5547| Assemble temperature rhs | 47045 | 8.7e+03s | 9.5% |
5548| Build Stokes preconditioner | 4707 | 1.48e+03s | 1.6% |
5549| Solve Stokes system | 47045 | 7.34e+04s | 80% |
5550| Solve temperature system | 47045 | 1.46e+03s | 1.6% |
5551| Postprocessing | 1883 | 222s | 0.24% |
5552| Refine mesh structure, part 1 | 4706 | 641s | 0.7% |
5553| Refine mesh structure, part 2 | 4706 | 259s | 0.28% |
5554| Setup dof systems | 4707 | 1.86e+03s | 2% |
5555+---------------------------------+-----------+------------+------------+
5556</pre>
5557</code>
5558
5559The simulation terminates when the time reaches the 1 billion years
5560selected in the input file. You can extrapolate from this how long a
5561simulation would take for a different final time (the time step size
5562ultimately settles on somewhere around 20,000 years, so computing for
5563two billion years will take 100,000 time steps, give or take 20%). As
5564can be seen here, we spend most of the compute time in assembling
5565linear systems and &mdash; above all &mdash; in solving Stokes
5566systems.
5567
5568
5569To demonstrate the output we show the output from every 1250th time step here:
5570<table>
5571 <tr>
5572 <td>
5573 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-000.png" alt="">
5574 </td>
5575 <td>
5576 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-050.png" alt="">
5577 </td>
5578 <td>
5579 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-100.png" alt="">
5580 </td>
5581 </tr>
5582 <tr>
5583 <td>
5584 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-150.png" alt="">
5585 </td>
5586 <td>
5587 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-200.png" alt="">
5588 </td>
5589 <td>
5590 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-250.png" alt="">
5591 </td>
5592 </tr>
5593 <tr>
5594 <td>
5595 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-300.png" alt="">
5596 </td>
5597 <td>
5598 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-350.png" alt="">
5599 </td>
5600 <td>
5601 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-400.png" alt="">
5602 </td>
5603 </tr>
5604 <tr>
5605 <td>
5606 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-450.png" alt="">
5607 </td>
5608 <td>
5609 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-500.png" alt="">
5610 </td>
5611 <td>
5612 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-550.png" alt="">
5613 </td>
5614 </tr>
5615 <tr>
5616 <td>
5617 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-600.png" alt="">
5618 </td>
5619 <td>
5620 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-cells.png" alt="">
5621 </td>
5622 <td>
5623 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-partition.png" alt="">
5624 </td>
5625 </tr>
5626</table>
5627
5628The last two images show the grid as well as the partitioning of the mesh for
5629the same computation with 16 subdomains and 16 processors. The full dynamics of
5630this simulation are really only visible by looking at an animation, for example
5631the one <a
5632href="https://www.dealii.org/images/steps/developer/step-32-2d-temperature.webm">shown
5633on this site</a>. This image is well worth watching due to its artistic quality
5634and entrancing depiction of the evolution of the magma plumes.
5635
5636If you watch the movie, you'll see that the convection pattern goes
5637through several stages: First, it gets rid of the instable temperature
5638layering with the hot material overlain by the dense cold
5639material. After this great driver is removed and we have a sort of
5640stable situation, a few blobs start to separate from the hot boundary
5641layer at the inner ring and rise up, with a few cold fingers also
5642dropping down from the outer boundary layer. During this phase, the solution
5643remains mostly symmetric, reflecting the 12-fold symmetry of the
5644original mesh. In a final phase, the fluid enters vigorous chaotic
5645stirring in which all symmetries are lost. This is a pattern that then
5646continues to dominate flow.
5647
5648These different phases can also be identified if we look at the
5649maximal velocity as a function of time in the simulation:
5650
5651<img src="https://www.dealii.org/images/steps/developer/step-32.2d.t_vs_vmax.png" alt="">
5652
5653Here, the velocity (shown in centimeters per year) becomes very large,
5654to the order of several meters per year) at the beginning when the
5655temperature layering is instable. It then calms down to relatively
5656small values before picking up again in the chaotic stirring
5657regime. There, it remains in the range of 10-40 centimeters per year,
5658quite within the physically expected region.
5659
5660
5661<a name="Resultsfora3dsphericalshelltestcase"></a><h3>Results for a 3d spherical shell testcase</h3>
5662
5663
56643d computations are very expensive computationally. Furthermore, as
5665seen above, interesting behavior only starts after quite a long time
5666requiring more CPU hours than is available on a typical
5667cluster. Consequently, rather than showing a complete simulation here,
5668let us simply show a couple of pictures we have obtained using the
5669successor to this program, called <i>ASPECT</i> (short for <i>Advanced
5670%Solver for Problems in Earth's ConvecTion</i>), that is being
5671developed independently of deal.II and that already incorporates some
5672of the extensions discussed below. The following two pictures show
5673isocontours of the temperature and the partition of the domain (along
5674with the mesh) onto 512 processors:
5675
5676<p align="center">
5677<img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.solution.png" alt="">
5678
5679<img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.partition.png" alt="">
5680</p>
5681
5682
5683<a name="extensions"></a>
5684<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
5685
5686
5687There are many directions in which this program could be extended. As
5688mentioned at the end of the introduction, most of these are under active
5689development in the <i>ASPECT</i> (short for <i>Advanced %Solver for Problems
5690in Earth's ConvecTion</i>) code at the time this tutorial program is being
5691finished. Specifically, the following are certainly topics that one should
5692address to make the program more useful:
5693
5694<ul>
5695 <li> <b>Adiabatic heating/cooling:</b>
5696 The temperature field we get in our simulations after a while
5697 is mostly constant with boundary layers at the inner and outer
5698 boundary, and streamers of cold and hot material mixing
5699 everything. Yet, this doesn't match our expectation that things
5700 closer to the earth core should be hotter than closer to the
5701 surface. The reason is that the energy equation we have used does
5702 not include a term that describes adiabatic cooling and heating:
5703 rock, like gas, heats up as you compress it. Consequently, material
5704 that rises up cools adiabatically, and cold material that sinks down
5705 heats adiabatically. The correct temperature equation would
5706 therefore look somewhat like this:
5707 @f{eqnarray*}
5708 \frac{D T}{Dt}
5709 -
5710 \nabla \cdot \kappa \nabla T &=& \gamma + \tau\frac{Dp}{Dt},
5711 @f}
5712 or, expanding the advected derivative @f$\frac{D}{Dt} =
5713 \frac{\partial}{\partial t} + \mathbf u \cdot \nabla@f$:
5714 @f{eqnarray*}
5715 \frac{\partial T}{\partial t}
5716 +
5717 {\mathbf u} \cdot \nabla T
5718 -
5719 \nabla \cdot \kappa \nabla T &=& \gamma +
5720 \tau\left\{\frac{\partial
5721 p}{\partial t} + \mathbf u \cdot \nabla p \right\}.
5722 @f}
5723 In other words, as pressure increases in a rock volume
5724 (@f$\frac{Dp}{Dt}>0@f$) we get an additional heat source, and vice
5725 versa.
5726
5727 The time derivative of the pressure is a bit awkward to
5728 implement. If necessary, one could approximate using the fact
5729 outlined in the introduction that the pressure can be decomposed
5730 into a dynamic component due to temperature differences and the
5731 resulting flow, and a static component that results solely from the
5732 static pressure of the overlying rock. Since the latter is much
5733 bigger, one may approximate @f$p\approx p_{\text{static}}=-\rho_{\text{ref}}
5734 [1+\beta T_{\text{ref}}] \varphi@f$, and consequently
5735 @f$\frac{Dp}{Dt} \approx \left\{- \mathbf u \cdot \nabla \rho_{\text{ref}}
5736 [1+\beta T_{\text{ref}}]\varphi\right\} = \rho_{\text{ref}}
5737 [1+\beta T_{\text{ref}}] \mathbf u \cdot \mathbf g@f$.
5738 In other words, if the fluid is moving in the direction of gravity
5739 (downward) it will be compressed and because in that case @f$\mathbf u
5740 \cdot \mathbf g > 0@f$ we get a positive heat source. Conversely, the
5741 fluid will cool down if it moves against the direction of gravity.
5742
5743<li> <b>Compressibility:</b>
5744 As already hinted at in the temperature model above,
5745 mantle rocks are not incompressible. Rather, given the enormous pressures in
5746 the earth mantle (at the core-mantle boundary, the pressure is approximately
5747 140 GPa, equivalent to 1,400,000 times atmospheric pressure), rock actually
5748 does compress to something around 1.5 times the density it would have
5749 at surface pressure. Modeling this presents any number of
5750 difficulties. Primarily, the mass conservation equation is no longer
5751 @f$\textrm{div}\;\mathbf u=0@f$ but should read
5752 @f$\textrm{div}(\rho\mathbf u)=0@f$ where the density @f$\rho@f$ is now no longer
5753 spatially constant but depends on temperature and pressure. A consequence is
5754 that the model is now no longer linear; a linearized version of the Stokes
5755 equation is also no longer symmetric requiring us to rethink preconditioners
5756 and, possibly, even the discretization. We won't go into detail here as to
5757 how this can be resolved.
5758
5759<li> <b>Nonlinear material models:</b> As already hinted at in various places,
5760 material parameters such as the density, the viscosity, and the various
5761 thermal parameters are not constant throughout the earth mantle. Rather,
5762 they nonlinearly depend on the pressure and temperature, and in the case of
5763 the viscosity on the strain rate @f$\varepsilon(\mathbf u)@f$. For complicated
5764 models, the only way to solve such models accurately may be to actually
5765 iterate this dependence out in each time step, rather than simply freezing
5766 coefficients at values extrapolated from the previous time step(s).
5767
5768<li> <b>Checkpoint/restart:</b> Running this program in 2d on a number of
5769 processors allows solving realistic models in a day or two. However, in 3d,
5770 compute times are so large that one runs into two typical problems: (i) On
5771 most compute clusters, the queuing system limits run times for individual
5772 jobs are to 2 or 3 days; (ii) losing the results of a computation due to
5773 hardware failures, misconfigurations, or power outages is a shame when
5774 running on hundreds of processors for a couple of days. Both of these
5775 problems can be addressed by periodically saving the state of the program
5776 and, if necessary, restarting the program at this point. This technique is
5777 commonly called <i>checkpoint/restart</i> and it requires that the entire
5778 state of the program is written to a permanent storage location (e.g. a hard
5779 drive). Given the complexity of the data structures of this program, this is
5780 not entirely trivial (it may also involve writing gigabytes or more of
5781 data), but it can be made easier by realizing that one can save the state
5782 between two time steps where it essentially only consists of the mesh and
5783 solution vectors; during restart one would then first re-enumerate degrees
5784 of freedom in the same way as done before and then re-assemble
5785 matrices. Nevertheless, given the distributed nature of the data structures
5786 involved here, saving and restoring the state of a program is not
5787 trivial. An additional complexity is introduced by the fact that one may
5788 want to change the number of processors between runs, for example because
5789 one may wish to continue computing on a mesh that is finer than the one used
5790 to precompute a starting temperature field at an intermediate time.
5791
5792<li> <b>Predictive postprocessing:</b> The point of computations like this is
5793 not simply to solve the equations. Rather, it is typically the exploration
5794 of different physical models and their comparison with things that we can
5795 measure at the earth surface, in order to find which models are realistic
5796 and which are contradicted by reality. To this end, we need to compute
5797 quantities from our solution vectors that are related to what we can
5798 observe. Among these are, for example, heatfluxes at the surface of the
5799 earth, as well as seismic velocities throughout the mantle as these affect
5800 earthquake waves that are recorded by seismographs.
5801
5802<li> <b>Better refinement criteria:</b> As can be seen above for the
58033d case, the mesh in 3d is primarily refined along the inner
5804boundary. This is because the boundary layer there is stronger than
5805any other transition in the domain, leading us to refine there almost
5806exclusively and basically not at all following the plumes. One
5807certainly needs better refinement criteria to track the parts of the
5808solution we are really interested in better than the criterion used
5809here, namely the KellyErrorEstimator applied to the temperature, is
5810able to.
5811</ul>
5812
5813
5814There are many other ways to extend the current program. However, rather than
5815discussing them here, let us point to the much larger open
5816source code ASPECT (see https://aspect.geodynamics.org/ ) that constitutes the
5817further development of @ref step_32 "step-32" and that already includes many such possible
5818extensions.
5819 *
5820 *
5821<a name="PlainProg"></a>
5822<h1> The plain program</h1>
5823@include "step-32.cc"
5824*/
void attach_dof_handler(const DoFHandlerType &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1085
virtual UpdateFlags get_needed_update_flags() const =0
virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const
virtual std::vector< std::string > get_names() const =0
virtual std::vector< DataComponentInterpretation::DataComponentInterpretation > get_data_component_interpretation() const
const FiniteElement< dim, spacedim > & get_fe(const unsigned int index=0) const
void reinit(const Triangulation< dim, spacedim > &tria)
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: fe_dgp.h:310
Definition: fe_q.h:549
unsigned int n_dofs_per_cell() const
std::pair< std::pair< unsigned int, unsigned int >, unsigned int > system_to_base_index(const unsigned int index) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
virtual void parse_input(std::istream &input, const std::string &filename="input file", const std::string &last_line="", const bool skip_undefined=false)
std::ostream & print_parameters(std::ostream &out, const OutputStyle style) const
long int get_integer(const std::string &entry_string) const
bool get_bool(const std::string &entry_name) const
void declare_entry(const std::string &entry, const std::string &default_value, const Patterns::PatternBase &pattern=Patterns::Anything(), const std::string &documentation="", const bool has_to_be_set=false)
double get_double(const std::string &entry_name) const
void enter_subsection(const std::string &subsection)
Definition: point.h:111
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
numbers::NumberTraits< Number >::real_type norm() const
@ wall_times
Definition: timer.h:653
@ summary
Definition: timer.h:609
Definition: vector.h:110
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
Point< 3 > vertices[4]
UpdateFlags
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
Point< 2 > second
Definition: grid_out.cc:4588
Point< 2 > first
Definition: grid_out.cc:4587
unsigned int level
Definition: grid_out.cc:4590
__global__ void set(Number *val, const Number s, const size_type N)
__global__ void sadd(const Number s, Number *val, const Number a, const Number *V_val, const size_type N)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
#define Assert(cond, exc)
Definition: exceptions.h:1465
void sadd(const value_type s, const BlockVectorBase &V)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
typename ActiveSelector::active_cell_iterator active_cell_iterator
Definition: dof_handler.h:438
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:439
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
void compress(::VectorOperation::values operation)
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
const Event initial
Definition: event.cc:65
const Event remesh
Definition: event.cc:66
void approximate(SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
Expression atan2(const Expression &y, const Expression &x)
Expression sign(const Expression &x)
void extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:1210
void extract_constant_modes(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask, std::vector< std::vector< bool > > &constant_modes)
Definition: dof_tools.cc:1304
void extrapolate(const DoFHandler< dim, spacedim > &dof1, const InVector &z1, const DoFHandler< dim, spacedim > &dof2, OutVector &z2)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2042
double volume(const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping=(ReferenceCells::get_hypercube< dim >() .template get_default_linear_mapping< dim, spacedim >()))
Definition: grid_tools.cc:137
double diameter(const Triangulation< dim, spacedim > &tria)
Definition: grid_tools.cc:81
@ valid
Iterator points to a valid object.
static const char L
static const types::blas_int zero
@ matrix
Contents is actually a matrix.
static const char A
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
static const char T
static const types::blas_int one
static const char V
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition: divergence.h:472
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
void free(T *&pointer)
Definition: cuda.h:97
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
T sum(const T &t, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
std::string compress(const std::string &input)
Definition: utilities.cc:392
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >(0)), const bool project_to_boundary_first=false)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:472
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1252
void abort(const ExceptionBase &exc) noexcept
Definition: exceptions.cc:449
long double gamma(const unsigned int n)
constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition: loop.h:71
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:618
static constexpr double PI
Definition: numbers.h:231
static const unsigned int invalid_unsigned_int
Definition: types.h:196
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
Definition: types.h:32
unsigned int subdomain_id
Definition: types.h:43
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
std::vector<::Vector< double > > solution_values
std::vector< std::vector< Tensor< 1, spacedim > > > solution_gradients