Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-32.h
Go to the documentation of this file.
1 
1517  * constexpr double kappa = 1e-6; /* m^2 / s */
1518  * constexpr double reference_density = 3300; /* kg / m^3 */
1519  * constexpr double reference_temperature = 293; /* K */
1520  * constexpr double expansion_coefficient = 2e-5; /* 1/K */
1521  * constexpr double specific_heat = 1250; /* J / K / kg */
1522  * constexpr double radiogenic_heating = 7.4e-12; /* W / kg */
1523  *
1524  *
1525  * constexpr double R0 = 6371000. - 2890000.; /* m */
1526  * constexpr double R1 = 6371000. - 35000.; /* m */
1527  *
1528  * constexpr double T0 = 4000 + 273; /* K */
1529  * constexpr double T1 = 700 + 273; /* K */
1530  *
1531  *
1532  * @endcode
1533  *
1534  * The next set of definitions are for functions that encode the density
1535  * as a function of temperature, the gravity vector, and the initial
1536  * values for the temperature. Again, all of these (along with the values
1537  * they compute) are discussed in the introduction:
1538  *
1539  * @code
1540  * double density(const double temperature)
1541  * {
1542  * return (
1543  * reference_density *
1544  * (1 - expansion_coefficient * (temperature - reference_temperature)));
1545  * }
1546  *
1547  *
1548  * template <int dim>
1549  * Tensor<1, dim> gravity_vector(const Point<dim> &p)
1550  * {
1551  * const double r = p.norm();
1552  * return -(1.245e-6 * r + 7.714e13 / r / r) * p / r;
1553  * }
1554  *
1555  *
1556  *
1557  * template <int dim>
1558  * class TemperatureInitialValues : public Function<dim>
1559  * {
1560  * public:
1561  * TemperatureInitialValues()
1562  * : Function<dim>(1)
1563  * {}
1564  *
1565  * virtual double value(const Point<dim> & p,
1566  * const unsigned int component = 0) const override;
1567  *
1568  * virtual void vector_value(const Point<dim> &p,
1569  * Vector<double> & value) const override;
1570  * };
1571  *
1572  *
1573  *
1574  * template <int dim>
1576  * const unsigned int) const
1577  * {
1578  * const double r = p.norm();
1579  * const double h = R1 - R0;
1580  *
1581  * const double s = (r - R0) / h;
1582  * const double q =
1583  * (dim == 3) ? std::max(0.0, cos(numbers::PI * abs(p(2) / R1))) : 1.0;
1584  * const double phi = std::atan2(p(0), p(1));
1585  * const double tau = s + 0.2 * s * (1 - s) * std::sin(6 * phi) * q;
1586  *
1587  * return T0 * (1.0 - tau) + T1 * tau;
1588  * }
1589  *
1590  *
1591  * template <int dim>
1592  * void
1593  * TemperatureInitialValues<dim>::vector_value(const Point<dim> &p,
1594  * Vector<double> & values) const
1595  * {
1596  * for (unsigned int c = 0; c < this->n_components; ++c)
1597  * values(c) = TemperatureInitialValues<dim>::value(p, c);
1598  * }
1599  *
1600  *
1601  * @endcode
1602  *
1603  * As mentioned in the introduction we need to rescale the pressure to
1604  * avoid the relative ill-conditioning of the momentum and mass
1605  * conservation equations. The scaling factor is @f$\frac{\eta}{L}@f$ where
1606  * @f$L@f$ was a typical length scale. By experimenting it turns out that a
1607  * good length scale is the diameter of plumes, which is around 10 km:
1608  *
1609  * @code
1610  * constexpr double pressure_scaling = eta / 10000;
1611  *
1612  * @endcode
1613  *
1614  * The final number in this namespace is a constant that denotes the
1615  * number of seconds per (average, tropical) year. We use this only when
1616  * generating screen output: internally, all computations of this program
1617  * happen in SI units (kilogram, meter, seconds) but writing geological
1618  * times in seconds yields numbers that one can't relate to reality, and
1619  * so we convert to years using the factor defined here:
1620  *
1621  * @code
1622  * const double year_in_seconds = 60 * 60 * 24 * 365.2425;
1623  *
1624  * } // namespace EquationData
1625  *
1626  *
1627  *
1628  * @endcode
1629  *
1630  *
1631  * <a name="PreconditioningtheStokessystem"></a>
1632  * <h3>Preconditioning the Stokes system</h3>
1633  *
1634 
1635  *
1636  * This namespace implements the preconditioner. As discussed in the
1637  * introduction, this preconditioner differs in a number of key portions
1638  * from the one used in @ref step_31 "step-31". Specifically, it is a right preconditioner,
1639  * implementing the matrix
1640  * @f{align*}
1641  * \left(\begin{array}{cc}A^{-1} & B^T
1642  * \\0 & S^{-1}
1643  * \end{array}\right)
1644  * @f}
1645  * where the two inverse matrix operations
1646  * are approximated by linear solvers or, if the right flag is given to the
1647  * constructor of this class, by a single AMG V-cycle for the velocity
1648  * block. The three code blocks of the <code>vmult</code> function implement
1649  * the multiplications with the three blocks of this preconditioner matrix
1650  * and should be self explanatory if you have read through @ref step_31 "step-31" or the
1651  * discussion of composing solvers in @ref step_20 "step-20".
1652  *
1653  * @code
1654  * namespace LinearSolvers
1655  * {
1656  * template <class PreconditionerTypeA, class PreconditionerTypeMp>
1657  * class BlockSchurPreconditioner : public Subscriptor
1658  * {
1659  * public:
1660  * BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
1661  * const TrilinosWrappers::BlockSparseMatrix &Spre,
1662  * const PreconditionerTypeMp &Mppreconditioner,
1663  * const PreconditionerTypeA & Apreconditioner,
1664  * const bool do_solve_A)
1665  * : stokes_matrix(&S)
1666  * , stokes_preconditioner_matrix(&Spre)
1667  * , mp_preconditioner(Mppreconditioner)
1668  * , a_preconditioner(Apreconditioner)
1669  * , do_solve_A(do_solve_A)
1670  * {}
1671  *
1672  * void vmult(TrilinosWrappers::MPI::BlockVector & dst,
1673  * const TrilinosWrappers::MPI::BlockVector &src) const
1674  * {
1675  * TrilinosWrappers::MPI::Vector utmp(src.block(0));
1676  *
1677  * {
1678  * SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
1679  *
1680  * SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
1681  *
1682  * solver.solve(stokes_preconditioner_matrix->block(1, 1),
1683  * dst.block(1),
1684  * src.block(1),
1685  * mp_preconditioner);
1686  *
1687  * dst.block(1) *= -1.0;
1688  * }
1689  *
1690  * {
1691  * stokes_matrix->block(0, 1).vmult(utmp, dst.block(1));
1692  * utmp *= -1.0;
1693  * utmp.add(src.block(0));
1694  * }
1695  *
1696  * if (do_solve_A == true)
1697  * {
1698  * SolverControl solver_control(5000, utmp.l2_norm() * 1e-2);
1699  * TrilinosWrappers::SolverCG solver(solver_control);
1700  * solver.solve(stokes_matrix->block(0, 0),
1701  * dst.block(0),
1702  * utmp,
1703  * a_preconditioner);
1704  * }
1705  * else
1706  * a_preconditioner.vmult(dst.block(0), utmp);
1707  * }
1708  *
1709  * private:
1710  * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1711  * stokes_matrix;
1712  * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1713  * stokes_preconditioner_matrix;
1714  * const PreconditionerTypeMp &mp_preconditioner;
1715  * const PreconditionerTypeA & a_preconditioner;
1716  * const bool do_solve_A;
1717  * };
1718  * } // namespace LinearSolvers
1719  *
1720  *
1721  *
1722  * @endcode
1723  *
1724  *
1725  * <a name="Definitionofassemblydatastructures"></a>
1726  * <h3>Definition of assembly data structures</h3>
1727  *
1728 
1729  *
1730  * As described in the introduction, we will use the WorkStream mechanism
1731  * discussed in the @ref threads module to parallelize operations among the
1732  * processors of a single machine. The WorkStream class requires that data
1733  * is passed around in two kinds of data structures, one for scratch data
1734  * and one to pass data from the assembly function to the function that
1735  * copies local contributions into global objects.
1736  *
1737 
1738  *
1739  * The following namespace (and the two sub-namespaces) contains a
1740  * collection of data structures that serve this purpose, one pair for each
1741  * of the four operations discussed in the introduction that we will want to
1742  * parallelize. Each assembly routine gets two sets of data: a Scratch array
1743  * that collects all the classes and arrays that are used for the
1744  * calculation of the cell contribution, and a CopyData array that keeps
1745  * local matrices and vectors which will be written into the global
1746  * matrix. Whereas CopyData is a container for the final data that is
1747  * written into the global matrices and vector (and, thus, absolutely
1748  * necessary), the Scratch arrays are merely there for performance reasons
1749  * &mdash; it would be much more expensive to set up a FEValues object on
1750  * each cell, than creating it only once and updating some derivative data.
1751  *
1752 
1753  *
1754  * @ref step_31 "step-31" had four assembly routines: One for the preconditioner matrix of
1755  * the Stokes system, one for the Stokes matrix and right hand side, one for
1756  * the temperature matrices and one for the right hand side of the
1757  * temperature equation. We here organize the scratch arrays and CopyData
1758  * objects for each of those four assembly components using a
1759  * <code>struct</code> environment (since we consider these as temporary
1760  * objects we pass around, rather than classes that implement functionality
1761  * of their own, though this is a more subjective point of view to
1762  * distinguish between <code>struct</code>s and <code>class</code>es).
1763  *
1764 
1765  *
1766  * Regarding the Scratch objects, each struct is equipped with a constructor
1767  * that creates an @ref FEValues object using the @ref FiniteElement,
1768  * Quadrature, @ref Mapping (which describes the interpolation of curved
1769  * boundaries), and @ref UpdateFlags instances. Moreover, we manually
1770  * implement a copy constructor (since the FEValues class is not copyable by
1771  * itself), and provide some additional vector fields that are used to hold
1772  * intermediate data during the computation of local contributions.
1773  *
1774 
1775  *
1776  * Let us start with the scratch arrays and, specifically, the one used for
1777  * assembly of the Stokes preconditioner:
1778  *
1779  * @code
1780  * namespace Assembly
1781  * {
1782  * namespace Scratch
1783  * {
1784  * template <int dim>
1785  * struct StokesPreconditioner
1786  * {
1787  * StokesPreconditioner(const FiniteElement<dim> &stokes_fe,
1788  * const Quadrature<dim> & stokes_quadrature,
1789  * const Mapping<dim> & mapping,
1790  * const UpdateFlags update_flags);
1791  *
1792  * StokesPreconditioner(const StokesPreconditioner &data);
1793  *
1794  *
1795  * FEValues<dim> stokes_fe_values;
1796  *
1797  * std::vector<Tensor<2, dim>> grad_phi_u;
1798  * std::vector<double> phi_p;
1799  * };
1800  *
1801  * template <int dim>
1802  * StokesPreconditioner<dim>::StokesPreconditioner(
1803  * const FiniteElement<dim> &stokes_fe,
1804  * const Quadrature<dim> & stokes_quadrature,
1805  * const Mapping<dim> & mapping,
1806  * const UpdateFlags update_flags)
1807  * : stokes_fe_values(mapping, stokes_fe, stokes_quadrature, update_flags)
1808  * , grad_phi_u(stokes_fe.dofs_per_cell)
1809  * , phi_p(stokes_fe.dofs_per_cell)
1810  * {}
1811  *
1812  *
1813  *
1814  * template <int dim>
1815  * StokesPreconditioner<dim>::StokesPreconditioner(
1816  * const StokesPreconditioner &scratch)
1817  * : stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
1818  * scratch.stokes_fe_values.get_fe(),
1819  * scratch.stokes_fe_values.get_quadrature(),
1820  * scratch.stokes_fe_values.get_update_flags())
1821  * , grad_phi_u(scratch.grad_phi_u)
1822  * , phi_p(scratch.phi_p)
1823  * {}
1824  *
1825  *
1826  *
1827  * @endcode
1828  *
1829  * The next one is the scratch object used for the assembly of the full
1830  * Stokes system. Observe that we derive the StokesSystem scratch class
1831  * from the StokesPreconditioner class above. We do this because all the
1832  * objects that are necessary for the assembly of the preconditioner are
1833  * also needed for the actual matrix system and right hand side, plus
1834  * some extra data. This makes the program more compact. Note also that
1835  * the assembly of the Stokes system and the temperature right hand side
1836  * further down requires data from temperature and velocity,
1837  * respectively, so we actually need two FEValues objects for those two
1838  * cases.
1839  *
1840  * @code
1841  * template <int dim>
1842  * struct StokesSystem : public StokesPreconditioner<dim>
1843  * {
1844  * StokesSystem(const FiniteElement<dim> &stokes_fe,
1845  * const Mapping<dim> & mapping,
1846  * const Quadrature<dim> & stokes_quadrature,
1847  * const UpdateFlags stokes_update_flags,
1848  * const FiniteElement<dim> &temperature_fe,
1849  * const UpdateFlags temperature_update_flags);
1850  *
1851  * StokesSystem(const StokesSystem<dim> &data);
1852  *
1853  *
1854  * FEValues<dim> temperature_fe_values;
1855  *
1856  * std::vector<Tensor<1, dim>> phi_u;
1857  * std::vector<SymmetricTensor<2, dim>> grads_phi_u;
1858  * std::vector<double> div_phi_u;
1859  *
1860  * std::vector<double> old_temperature_values;
1861  * };
1862  *
1863  *
1864  * template <int dim>
1865  * StokesSystem<dim>::StokesSystem(
1866  * const FiniteElement<dim> &stokes_fe,
1867  * const Mapping<dim> & mapping,
1868  * const Quadrature<dim> & stokes_quadrature,
1869  * const UpdateFlags stokes_update_flags,
1870  * const FiniteElement<dim> &temperature_fe,
1871  * const UpdateFlags temperature_update_flags)
1872  * : StokesPreconditioner<dim>(stokes_fe,
1873  * stokes_quadrature,
1874  * mapping,
1875  * stokes_update_flags)
1876  * , temperature_fe_values(mapping,
1877  * temperature_fe,
1878  * stokes_quadrature,
1879  * temperature_update_flags)
1880  * , phi_u(stokes_fe.dofs_per_cell)
1881  * , grads_phi_u(stokes_fe.dofs_per_cell)
1882  * , div_phi_u(stokes_fe.dofs_per_cell)
1883  * , old_temperature_values(stokes_quadrature.size())
1884  * {}
1885  *
1886  *
1887  * template <int dim>
1888  * StokesSystem<dim>::StokesSystem(const StokesSystem<dim> &scratch)
1889  * : StokesPreconditioner<dim>(scratch)
1890  * , temperature_fe_values(
1891  * scratch.temperature_fe_values.get_mapping(),
1892  * scratch.temperature_fe_values.get_fe(),
1893  * scratch.temperature_fe_values.get_quadrature(),
1894  * scratch.temperature_fe_values.get_update_flags())
1895  * , phi_u(scratch.phi_u)
1896  * , grads_phi_u(scratch.grads_phi_u)
1897  * , div_phi_u(scratch.div_phi_u)
1898  * , old_temperature_values(scratch.old_temperature_values)
1899  * {}
1900  *
1901  *
1902  * @endcode
1903  *
1904  * After defining the objects used in the assembly of the Stokes system,
1905  * we do the same for the assembly of the matrices necessary for the
1906  * temperature system. The general structure is very similar:
1907  *
1908  * @code
1909  * template <int dim>
1910  * struct TemperatureMatrix
1911  * {
1912  * TemperatureMatrix(const FiniteElement<dim> &temperature_fe,
1913  * const Mapping<dim> & mapping,
1914  * const Quadrature<dim> & temperature_quadrature);
1915  *
1916  * TemperatureMatrix(const TemperatureMatrix &data);
1917  *
1918  *
1919  * FEValues<dim> temperature_fe_values;
1920  *
1921  * std::vector<double> phi_T;
1922  * std::vector<Tensor<1, dim>> grad_phi_T;
1923  * };
1924  *
1925  *
1926  * template <int dim>
1927  * TemperatureMatrix<dim>::TemperatureMatrix(
1928  * const FiniteElement<dim> &temperature_fe,
1929  * const Mapping<dim> & mapping,
1930  * const Quadrature<dim> & temperature_quadrature)
1931  * : temperature_fe_values(mapping,
1932  * temperature_fe,
1933  * temperature_quadrature,
1934  * update_values | update_gradients |
1935  * update_JxW_values)
1936  * , phi_T(temperature_fe.dofs_per_cell)
1937  * , grad_phi_T(temperature_fe.dofs_per_cell)
1938  * {}
1939  *
1940  *
1941  * template <int dim>
1942  * TemperatureMatrix<dim>::TemperatureMatrix(
1943  * const TemperatureMatrix &scratch)
1944  * : temperature_fe_values(
1945  * scratch.temperature_fe_values.get_mapping(),
1946  * scratch.temperature_fe_values.get_fe(),
1947  * scratch.temperature_fe_values.get_quadrature(),
1948  * scratch.temperature_fe_values.get_update_flags())
1949  * , phi_T(scratch.phi_T)
1950  * , grad_phi_T(scratch.grad_phi_T)
1951  * {}
1952  *
1953  *
1954  * @endcode
1955  *
1956  * The final scratch object is used in the assembly of the right hand
1957  * side of the temperature system. This object is significantly larger
1958  * than the ones above because a lot more quantities enter the
1959  * computation of the right hand side of the temperature equation. In
1960  * particular, the temperature values and gradients of the previous two
1961  * time steps need to be evaluated at the quadrature points, as well as
1962  * the velocities and the strain rates (i.e. the symmetric gradients of
1963  * the velocity) that enter the right hand side as friction heating
1964  * terms. Despite the number of terms, the following should be rather
1965  * self explanatory:
1966  *
1967  * @code
1968  * template <int dim>
1969  * struct TemperatureRHS
1970  * {
1971  * TemperatureRHS(const FiniteElement<dim> &temperature_fe,
1972  * const FiniteElement<dim> &stokes_fe,
1973  * const Mapping<dim> & mapping,
1974  * const Quadrature<dim> & quadrature);
1975  *
1976  * TemperatureRHS(const TemperatureRHS &data);
1977  *
1978  *
1979  * FEValues<dim> temperature_fe_values;
1980  * FEValues<dim> stokes_fe_values;
1981  *
1982  * std::vector<double> phi_T;
1983  * std::vector<Tensor<1, dim>> grad_phi_T;
1984  *
1985  * std::vector<Tensor<1, dim>> old_velocity_values;
1986  * std::vector<Tensor<1, dim>> old_old_velocity_values;
1987  *
1988  * std::vector<SymmetricTensor<2, dim>> old_strain_rates;
1989  * std::vector<SymmetricTensor<2, dim>> old_old_strain_rates;
1990  *
1991  * std::vector<double> old_temperature_values;
1992  * std::vector<double> old_old_temperature_values;
1993  * std::vector<Tensor<1, dim>> old_temperature_grads;
1994  * std::vector<Tensor<1, dim>> old_old_temperature_grads;
1995  * std::vector<double> old_temperature_laplacians;
1996  * std::vector<double> old_old_temperature_laplacians;
1997  * };
1998  *
1999  *
2000  * template <int dim>
2001  * TemperatureRHS<dim>::TemperatureRHS(
2002  * const FiniteElement<dim> &temperature_fe,
2003  * const FiniteElement<dim> &stokes_fe,
2004  * const Mapping<dim> & mapping,
2005  * const Quadrature<dim> & quadrature)
2006  * : temperature_fe_values(mapping,
2007  * temperature_fe,
2008  * quadrature,
2009  * update_values | update_gradients |
2010  * update_hessians | update_quadrature_points |
2011  * update_JxW_values)
2012  * , stokes_fe_values(mapping,
2013  * stokes_fe,
2014  * quadrature,
2015  * update_values | update_gradients)
2016  * , phi_T(temperature_fe.dofs_per_cell)
2017  * , grad_phi_T(temperature_fe.dofs_per_cell)
2018  * ,
2019  *
2020  * old_velocity_values(quadrature.size())
2021  * , old_old_velocity_values(quadrature.size())
2022  * , old_strain_rates(quadrature.size())
2023  * , old_old_strain_rates(quadrature.size())
2024  * ,
2025  *
2026  * old_temperature_values(quadrature.size())
2027  * , old_old_temperature_values(quadrature.size())
2028  * , old_temperature_grads(quadrature.size())
2029  * , old_old_temperature_grads(quadrature.size())
2030  * , old_temperature_laplacians(quadrature.size())
2031  * , old_old_temperature_laplacians(quadrature.size())
2032  * {}
2033  *
2034  *
2035  * template <int dim>
2036  * TemperatureRHS<dim>::TemperatureRHS(const TemperatureRHS &scratch)
2037  * : temperature_fe_values(
2038  * scratch.temperature_fe_values.get_mapping(),
2039  * scratch.temperature_fe_values.get_fe(),
2040  * scratch.temperature_fe_values.get_quadrature(),
2041  * scratch.temperature_fe_values.get_update_flags())
2042  * , stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
2043  * scratch.stokes_fe_values.get_fe(),
2044  * scratch.stokes_fe_values.get_quadrature(),
2045  * scratch.stokes_fe_values.get_update_flags())
2046  * , phi_T(scratch.phi_T)
2047  * , grad_phi_T(scratch.grad_phi_T)
2048  * ,
2049  *
2050  * old_velocity_values(scratch.old_velocity_values)
2051  * , old_old_velocity_values(scratch.old_old_velocity_values)
2052  * , old_strain_rates(scratch.old_strain_rates)
2053  * , old_old_strain_rates(scratch.old_old_strain_rates)
2054  * ,
2055  *
2056  * old_temperature_values(scratch.old_temperature_values)
2057  * , old_old_temperature_values(scratch.old_old_temperature_values)
2058  * , old_temperature_grads(scratch.old_temperature_grads)
2059  * , old_old_temperature_grads(scratch.old_old_temperature_grads)
2060  * , old_temperature_laplacians(scratch.old_temperature_laplacians)
2061  * , old_old_temperature_laplacians(scratch.old_old_temperature_laplacians)
2062  * {}
2063  * } // namespace Scratch
2064  *
2065  *
2066  * @endcode
2067  *
2068  * The CopyData objects are even simpler than the Scratch objects as all
2069  * they have to do is to store the results of local computations until
2070  * they can be copied into the global matrix or vector objects. These
2071  * structures therefore only need to provide a constructor, a copy
2072  * operation, and some arrays for local matrix, local vectors and the
2073  * relation between local and global degrees of freedom (a.k.a.
2074  * <code>local_dof_indices</code>). Again, we have one such structure for
2075  * each of the four operations we will parallelize using the WorkStream
2076  * class:
2077  *
2078  * @code
2079  * namespace CopyData
2080  * {
2081  * template <int dim>
2082  * struct StokesPreconditioner
2083  * {
2084  * StokesPreconditioner(const FiniteElement<dim> &stokes_fe);
2085  * StokesPreconditioner(const StokesPreconditioner &data);
2086  * StokesPreconditioner &operator=(const StokesPreconditioner &) = default;
2087  *
2088  * FullMatrix<double> local_matrix;
2089  * std::vector<types::global_dof_index> local_dof_indices;
2090  * };
2091  *
2092  * template <int dim>
2093  * StokesPreconditioner<dim>::StokesPreconditioner(
2094  * const FiniteElement<dim> &stokes_fe)
2095  * : local_matrix(stokes_fe.dofs_per_cell, stokes_fe.dofs_per_cell)
2096  * , local_dof_indices(stokes_fe.dofs_per_cell)
2097  * {}
2098  *
2099  * template <int dim>
2100  * StokesPreconditioner<dim>::StokesPreconditioner(
2101  * const StokesPreconditioner &data)
2102  * : local_matrix(data.local_matrix)
2103  * , local_dof_indices(data.local_dof_indices)
2104  * {}
2105  *
2106  *
2107  *
2108  * template <int dim>
2109  * struct StokesSystem : public StokesPreconditioner<dim>
2110  * {
2111  * StokesSystem(const FiniteElement<dim> &stokes_fe);
2112  *
2113  * Vector<double> local_rhs;
2114  * };
2115  *
2116  * template <int dim>
2117  * StokesSystem<dim>::StokesSystem(const FiniteElement<dim> &stokes_fe)
2118  * : StokesPreconditioner<dim>(stokes_fe)
2119  * , local_rhs(stokes_fe.dofs_per_cell)
2120  * {}
2121  *
2122  *
2123  *
2124  * template <int dim>
2125  * struct TemperatureMatrix
2126  * {
2127  * TemperatureMatrix(const FiniteElement<dim> &temperature_fe);
2128  *
2129  * FullMatrix<double> local_mass_matrix;
2130  * FullMatrix<double> local_stiffness_matrix;
2131  * std::vector<types::global_dof_index> local_dof_indices;
2132  * };
2133  *
2134  * template <int dim>
2135  * TemperatureMatrix<dim>::TemperatureMatrix(
2136  * const FiniteElement<dim> &temperature_fe)
2137  * : local_mass_matrix(temperature_fe.dofs_per_cell,
2138  * temperature_fe.dofs_per_cell)
2139  * , local_stiffness_matrix(temperature_fe.dofs_per_cell,
2140  * temperature_fe.dofs_per_cell)
2141  * , local_dof_indices(temperature_fe.dofs_per_cell)
2142  * {}
2143  *
2144  *
2145  *
2146  * template <int dim>
2147  * struct TemperatureRHS
2148  * {
2149  * TemperatureRHS(const FiniteElement<dim> &temperature_fe);
2150  *
2151  * Vector<double> local_rhs;
2152  * std::vector<types::global_dof_index> local_dof_indices;
2153  * FullMatrix<double> matrix_for_bc;
2154  * };
2155  *
2156  * template <int dim>
2157  * TemperatureRHS<dim>::TemperatureRHS(
2158  * const FiniteElement<dim> &temperature_fe)
2159  * : local_rhs(temperature_fe.dofs_per_cell)
2160  * , local_dof_indices(temperature_fe.dofs_per_cell)
2161  * , matrix_for_bc(temperature_fe.dofs_per_cell,
2162  * temperature_fe.dofs_per_cell)
2163  * {}
2164  * } // namespace CopyData
2165  * } // namespace Assembly
2166  *
2167  *
2168  *
2169  * @endcode
2170  *
2171  *
2172  * <a name="ThecodeBoussinesqFlowProblemcodeclasstemplate"></a>
2173  * <h3>The <code>BoussinesqFlowProblem</code> class template</h3>
2174  *
2175 
2176  *
2177  * This is the declaration of the main class. It is very similar to @ref step_31 "step-31"
2178  * but there are a number differences we will comment on below.
2179  *
2180 
2181  *
2182  * The top of the class is essentially the same as in @ref step_31 "step-31", listing the
2183  * public methods and a set of private functions that do the heavy
2184  * lifting. Compared to @ref step_31 "step-31" there are only two additions to this
2185  * section: the function <code>get_cfl_number()</code> that computes the
2186  * maximum CFL number over all cells which we then compute the global time
2187  * step from, and the function <code>get_entropy_variation()</code> that is
2188  * used in the computation of the entropy stabilization. It is akin to the
2189  * <code>get_extrapolated_temperature_range()</code> we have used in @ref step_31 "step-31"
2190  * for this purpose, but works on the entropy instead of the temperature
2191  * instead.
2192  *
2193  * @code
2194  * template <int dim>
2195  * class BoussinesqFlowProblem
2196  * {
2197  * public:
2198  * struct Parameters;
2199  * BoussinesqFlowProblem(Parameters &parameters);
2200  * void run();
2201  *
2202  * private:
2203  * void setup_dofs();
2204  * void assemble_stokes_preconditioner();
2205  * void build_stokes_preconditioner();
2206  * void assemble_stokes_system();
2207  * void assemble_temperature_matrix();
2208  * void assemble_temperature_system(const double maximal_velocity);
2209  * double get_maximal_velocity() const;
2210  * double get_cfl_number() const;
2211  * double get_entropy_variation(const double average_temperature) const;
2212  * std::pair<double, double> get_extrapolated_temperature_range() const;
2213  * void solve();
2214  * void output_results();
2215  * void refine_mesh(const unsigned int max_grid_level);
2216  *
2217  * double compute_viscosity(
2218  * const std::vector<double> & old_temperature,
2219  * const std::vector<double> & old_old_temperature,
2220  * const std::vector<Tensor<1, dim>> &old_temperature_grads,
2221  * const std::vector<Tensor<1, dim>> &old_old_temperature_grads,
2222  * const std::vector<double> & old_temperature_laplacians,
2223  * const std::vector<double> & old_old_temperature_laplacians,
2224  * const std::vector<Tensor<1, dim>> &old_velocity_values,
2225  * const std::vector<Tensor<1, dim>> &old_old_velocity_values,
2226  * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
2227  * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
2228  * const double global_u_infty,
2229  * const double global_T_variation,
2230  * const double average_temperature,
2231  * const double global_entropy_variation,
2232  * const double cell_diameter) const;
2233  *
2234  * public:
2235  * @endcode
2236  *
2237  * The first significant new component is the definition of a struct for
2238  * the parameters according to the discussion in the introduction. This
2239  * structure is initialized by reading from a parameter file during
2240  * construction of this object.
2241  *
2242  * @code
2243  * struct Parameters
2244  * {
2245  * Parameters(const std::string &parameter_filename);
2246  *
2247  * static void declare_parameters(ParameterHandler &prm);
2248  * void parse_parameters(ParameterHandler &prm);
2249  *
2250  * double end_time;
2251  *
2252  * unsigned int initial_global_refinement;
2253  * unsigned int initial_adaptive_refinement;
2254  *
2255  * bool generate_graphical_output;
2256  * unsigned int graphical_output_interval;
2257  *
2258  * unsigned int adaptive_refinement_interval;
2259  *
2260  * double stabilization_alpha;
2261  * double stabilization_c_R;
2262  * double stabilization_beta;
2263  *
2264  * unsigned int stokes_velocity_degree;
2265  * bool use_locally_conservative_discretization;
2266  *
2267  * unsigned int temperature_degree;
2268  * };
2269  *
2270  * private:
2271  * Parameters &parameters;
2272  *
2273  * @endcode
2274  *
2275  * The <code>pcout</code> (for <i>%parallel <code>std::cout</code></i>)
2276  * object is used to simplify writing output: each MPI process can use
2277  * this to generate output as usual, but since each of these processes
2278  * will (hopefully) produce the same output it will just be replicated
2279  * many times over; with the ConditionalOStream class, only the output
2280  * generated by one MPI process will actually be printed to screen,
2281  * whereas the output by all the other threads will simply be forgotten.
2282  *
2283  * @code
2284  * ConditionalOStream pcout;
2285  *
2286  * @endcode
2287  *
2288  * The following member variables will then again be similar to those in
2289  * @ref step_31 "step-31" (and to other tutorial programs). As mentioned in the
2290  * introduction, we fully distribute computations, so we will have to use
2291  * the parallel::distributed::Triangulation class (see @ref step_40 "step-40") but the
2292  * remainder of these variables is rather standard with two exceptions:
2293  *
2294 
2295  *
2296  * - The <code>mapping</code> variable is used to denote a higher-order
2297  * polynomial mapping. As mentioned in the introduction, we use this
2298  * mapping when forming integrals through quadrature for all cells that
2299  * are adjacent to either the inner or outer boundaries of our domain
2300  * where the boundary is curved.
2301  *
2302 
2303  *
2304  * - In a bit of naming confusion, you will notice below that some of the
2305  * variables from namespace TrilinosWrappers are taken from namespace
2306  * TrilinosWrappers::MPI (such as the right hand side vectors) whereas
2307  * others are not (such as the various matrices). This is due to legacy
2308  * reasons. We will frequently have to query velocities
2309  * and temperatures at arbitrary quadrature points; consequently, rather
2310  * than importing ghost information of a vector whenever we need access
2311  * to degrees of freedom that are relevant locally but owned by another
2312  * processor, we solve linear systems in %parallel but then immediately
2313  * initialize a vector including ghost entries of the solution for further
2314  * processing. The various <code>*_solution</code> vectors are therefore
2315  * filled immediately after solving their respective linear system in
2316  * %parallel and will always contain values for all
2317  * @ref GlossLocallyRelevantDof "locally relevant degrees of freedom";
2318  * the fully distributed vectors that we obtain from the solution process
2319  * and that only ever contain the
2320  * @ref GlossLocallyOwnedDof "locally owned degrees of freedom" are
2321  * destroyed immediately after the solution process and after we have
2322  * copied the relevant values into the member variable vectors.
2323  *
2324  * @code
2325  * parallel::distributed::Triangulation<dim> triangulation;
2326  * double global_Omega_diameter;
2327  *
2328  * const MappingQ<dim> mapping;
2329  *
2330  * const FESystem<dim> stokes_fe;
2331  * DoFHandler<dim> stokes_dof_handler;
2332  * AffineConstraints<double> stokes_constraints;
2333  *
2334  * TrilinosWrappers::BlockSparseMatrix stokes_matrix;
2335  * TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
2336  *
2337  * TrilinosWrappers::MPI::BlockVector stokes_solution;
2338  * TrilinosWrappers::MPI::BlockVector old_stokes_solution;
2339  * TrilinosWrappers::MPI::BlockVector stokes_rhs;
2340  *
2341  *
2342  * FE_Q<dim> temperature_fe;
2343  * DoFHandler<dim> temperature_dof_handler;
2344  * AffineConstraints<double> temperature_constraints;
2345  *
2346  * TrilinosWrappers::SparseMatrix temperature_mass_matrix;
2347  * TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
2348  * TrilinosWrappers::SparseMatrix temperature_matrix;
2349  *
2350  * TrilinosWrappers::MPI::Vector temperature_solution;
2351  * TrilinosWrappers::MPI::Vector old_temperature_solution;
2352  * TrilinosWrappers::MPI::Vector old_old_temperature_solution;
2353  * TrilinosWrappers::MPI::Vector temperature_rhs;
2354  *
2355  *
2356  * double time_step;
2357  * double old_time_step;
2358  * unsigned int timestep_number;
2359  *
2360  * std::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
2361  * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
2362  * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
2363  *
2364  * bool rebuild_stokes_matrix;
2365  * bool rebuild_stokes_preconditioner;
2366  * bool rebuild_temperature_matrices;
2367  * bool rebuild_temperature_preconditioner;
2368  *
2369  * @endcode
2370  *
2371  * The next member variable, <code>computing_timer</code> is used to
2372  * conveniently account for compute time spent in certain "sections" of
2373  * the code that are repeatedly entered. For example, we will enter (and
2374  * leave) sections for Stokes matrix assembly and would like to accumulate
2375  * the run time spent in this section over all time steps. Every so many
2376  * time steps as well as at the end of the program (through the destructor
2377  * of the TimerOutput class) we will then produce a nice summary of the
2378  * times spent in the different sections into which we categorize the
2379  * run-time of this program.
2380  *
2381  * @code
2382  * TimerOutput computing_timer;
2383  *
2384  * @endcode
2385  *
2386  * After these member variables we have a number of auxiliary functions
2387  * that have been broken out of the ones listed above. Specifically, there
2388  * are first three functions that we call from <code>setup_dofs</code> and
2389  * then the ones that do the assembling of linear systems:
2390  *
2391  * @code
2392  * void setup_stokes_matrix(
2393  * const std::vector<IndexSet> &stokes_partitioning,
2394  * const std::vector<IndexSet> &stokes_relevant_partitioning);
2395  * void setup_stokes_preconditioner(
2396  * const std::vector<IndexSet> &stokes_partitioning,
2397  * const std::vector<IndexSet> &stokes_relevant_partitioning);
2398  * void setup_temperature_matrices(
2399  * const IndexSet &temperature_partitioning,
2400  * const IndexSet &temperature_relevant_partitioning);
2401  *
2402  *
2403  * @endcode
2404  *
2405  * Following the @ref MTWorkStream "task-based parallelization" paradigm,
2406  * we split all the assembly routines into two parts: a first part that
2407  * can do all the calculations on a certain cell without taking care of
2408  * other threads, and a second part (which is writing the local data into
2409  * the global matrices and vectors) which can be entered by only one
2410  * thread at a time. In order to implement that, we provide functions for
2411  * each of those two steps for all the four assembly routines that we use
2412  * in this program. The following eight functions do exactly this:
2413  *
2414  * @code
2415  * void local_assemble_stokes_preconditioner(
2416  * const typename DoFHandler<dim>::active_cell_iterator &cell,
2417  * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
2418  * Assembly::CopyData::StokesPreconditioner<dim> & data);
2419  *
2420  * void copy_local_to_global_stokes_preconditioner(
2421  * const Assembly::CopyData::StokesPreconditioner<dim> &data);
2422  *
2423  *
2424  * void local_assemble_stokes_system(
2425  * const typename DoFHandler<dim>::active_cell_iterator &cell,
2426  * Assembly::Scratch::StokesSystem<dim> & scratch,
2427  * Assembly::CopyData::StokesSystem<dim> & data);
2428  *
2429  * void copy_local_to_global_stokes_system(
2430  * const Assembly::CopyData::StokesSystem<dim> &data);
2431  *
2432  *
2433  * void local_assemble_temperature_matrix(
2434  * const typename DoFHandler<dim>::active_cell_iterator &cell,
2435  * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
2436  * Assembly::CopyData::TemperatureMatrix<dim> & data);
2437  *
2438  * void copy_local_to_global_temperature_matrix(
2439  * const Assembly::CopyData::TemperatureMatrix<dim> &data);
2440  *
2441  *
2442  *
2443  * void local_assemble_temperature_rhs(
2444  * const std::pair<double, double> global_T_range,
2445  * const double global_max_velocity,
2446  * const double global_entropy_variation,
2447  * const typename DoFHandler<dim>::active_cell_iterator &cell,
2448  * Assembly::Scratch::TemperatureRHS<dim> & scratch,
2449  * Assembly::CopyData::TemperatureRHS<dim> & data);
2450  *
2451  * void copy_local_to_global_temperature_rhs(
2452  * const Assembly::CopyData::TemperatureRHS<dim> &data);
2453  *
2454  * @endcode
2455  *
2456  * Finally, we forward declare a member class that we will define later on
2457  * and that will be used to compute a number of quantities from our
2458  * solution vectors that we'd like to put into the output files for
2459  * visualization.
2460  *
2461  * @code
2462  * class Postprocessor;
2463  * };
2464  *
2465  *
2466  * @endcode
2467  *
2468  *
2469  * <a name="BoussinesqFlowProblemclassimplementation"></a>
2470  * <h3>BoussinesqFlowProblem class implementation</h3>
2471  *
2472 
2473  *
2474  *
2475  * <a name="BoussinesqFlowProblemParameters"></a>
2476  * <h4>BoussinesqFlowProblem::Parameters</h4>
2477  *
2478 
2479  *
2480  * Here comes the definition of the parameters for the Stokes problem. We
2481  * allow to set the end time for the simulation, the level of refinements
2482  * (both global and adaptive, which in the sum specify what maximum level
2483  * the cells are allowed to have), and the interval between refinements in
2484  * the time stepping.
2485  *
2486 
2487  *
2488  * Then, we let the user specify constants for the stabilization parameters
2489  * (as discussed in the introduction), the polynomial degree for the Stokes
2490  * velocity space, whether to use the locally conservative discretization
2491  * based on FE_DGP elements for the pressure or not (FE_Q elements for
2492  * pressure), and the polynomial degree for the temperature interpolation.
2493  *
2494 
2495  *
2496  * The constructor checks for a valid input file (if not, a file with
2497  * default parameters for the quantities is written), and eventually parses
2498  * the parameters.
2499  *
2500  * @code
2501  * template <int dim>
2502  * BoussinesqFlowProblem<dim>::Parameters::Parameters(
2503  * const std::string &parameter_filename)
2504  * : end_time(1e8)
2505  * , initial_global_refinement(2)
2506  * , initial_adaptive_refinement(2)
2507  * , adaptive_refinement_interval(10)
2508  * , stabilization_alpha(2)
2509  * , stabilization_c_R(0.11)
2510  * , stabilization_beta(0.078)
2511  * , stokes_velocity_degree(2)
2512  * , use_locally_conservative_discretization(true)
2513  * , temperature_degree(2)
2514  * {
2515  * ParameterHandler prm;
2516  * BoussinesqFlowProblem<dim>::Parameters::declare_parameters(prm);
2517  *
2518  * std::ifstream parameter_file(parameter_filename);
2519  *
2520  * if (!parameter_file)
2521  * {
2522  * parameter_file.close();
2523  *
2524  * std::ofstream parameter_out(parameter_filename);
2525  * prm.print_parameters(parameter_out, ParameterHandler::Text);
2526  *
2527  * AssertThrow(
2528  * false,
2529  * ExcMessage(
2530  * "Input parameter file <" + parameter_filename +
2531  * "> not found. Creating a template file of the same name."));
2532  * }
2533  *
2534  * prm.parse_input(parameter_file);
2535  * parse_parameters(prm);
2536  * }
2537  *
2538  *
2539  *
2540  * @endcode
2541  *
2542  * Next we have a function that declares the parameters that we expect in
2543  * the input file, together with their data types, default values and a
2544  * description:
2545  *
2546  * @code
2547  * template <int dim>
2548  * void BoussinesqFlowProblem<dim>::Parameters::declare_parameters(
2549  * ParameterHandler &prm)
2550  * {
2551  * prm.declare_entry("End time",
2552  * "1e8",
2553  * Patterns::Double(0),
2554  * "The end time of the simulation in years.");
2555  * prm.declare_entry("Initial global refinement",
2556  * "2",
2557  * Patterns::Integer(0),
2558  * "The number of global refinement steps performed on "
2559  * "the initial coarse mesh, before the problem is first "
2560  * "solved there.");
2561  * prm.declare_entry("Initial adaptive refinement",
2562  * "2",
2563  * Patterns::Integer(0),
2564  * "The number of adaptive refinement steps performed after "
2565  * "initial global refinement.");
2566  * prm.declare_entry("Time steps between mesh refinement",
2567  * "10",
2568  * Patterns::Integer(1),
2569  * "The number of time steps after which the mesh is to be "
2570  * "adapted based on computed error indicators.");
2571  * prm.declare_entry("Generate graphical output",
2572  * "false",
2573  * Patterns::Bool(),
2574  * "Whether graphical output is to be generated or not. "
2575  * "You may not want to get graphical output if the number "
2576  * "of processors is large.");
2577  * prm.declare_entry("Time steps between graphical output",
2578  * "50",
2579  * Patterns::Integer(1),
2580  * "The number of time steps between each generation of "
2581  * "graphical output files.");
2582  *
2583  * prm.enter_subsection("Stabilization parameters");
2584  * {
2585  * prm.declare_entry("alpha",
2586  * "2",
2587  * Patterns::Double(1, 2),
2588  * "The exponent in the entropy viscosity stabilization.");
2589  * prm.declare_entry("c_R",
2590  * "0.11",
2591  * Patterns::Double(0),
2592  * "The c_R factor in the entropy viscosity "
2593  * "stabilization.");
2594  * prm.declare_entry("beta",
2595  * "0.078",
2596  * Patterns::Double(0),
2597  * "The beta factor in the artificial viscosity "
2598  * "stabilization. An appropriate value for 2d is 0.052 "
2599  * "and 0.078 for 3d.");
2600  * }
2601  * prm.leave_subsection();
2602  *
2603  * prm.enter_subsection("Discretization");
2604  * {
2605  * prm.declare_entry(
2606  * "Stokes velocity polynomial degree",
2607  * "2",
2608  * Patterns::Integer(1),
2609  * "The polynomial degree to use for the velocity variables "
2610  * "in the Stokes system.");
2611  * prm.declare_entry(
2612  * "Temperature polynomial degree",
2613  * "2",
2614  * Patterns::Integer(1),
2615  * "The polynomial degree to use for the temperature variable.");
2616  * prm.declare_entry(
2617  * "Use locally conservative discretization",
2618  * "true",
2619  * Patterns::Bool(),
2620  * "Whether to use a Stokes discretization that is locally "
2621  * "conservative at the expense of a larger number of degrees "
2622  * "of freedom, or to go with a cheaper discretization "
2623  * "that does not locally conserve mass (although it is "
2624  * "globally conservative.");
2625  * }
2626  * prm.leave_subsection();
2627  * }
2628  *
2629  *
2630  *
2631  * @endcode
2632  *
2633  * And then we need a function that reads the contents of the
2634  * ParameterHandler object we get by reading the input file and puts the
2635  * results into variables that store the values of the parameters we have
2636  * previously declared:
2637  *
2638  * @code
2639  * template <int dim>
2640  * void BoussinesqFlowProblem<dim>::Parameters::parse_parameters(
2641  * ParameterHandler &prm)
2642  * {
2643  * end_time = prm.get_double("End time");
2644  * initial_global_refinement = prm.get_integer("Initial global refinement");
2645  * initial_adaptive_refinement =
2646  * prm.get_integer("Initial adaptive refinement");
2647  *
2648  * adaptive_refinement_interval =
2649  * prm.get_integer("Time steps between mesh refinement");
2650  *
2651  * generate_graphical_output = prm.get_bool("Generate graphical output");
2652  * graphical_output_interval =
2653  * prm.get_integer("Time steps between graphical output");
2654  *
2655  * prm.enter_subsection("Stabilization parameters");
2656  * {
2657  * stabilization_alpha = prm.get_double("alpha");
2658  * stabilization_c_R = prm.get_double("c_R");
2659  * stabilization_beta = prm.get_double("beta");
2660  * }
2661  * prm.leave_subsection();
2662  *
2663  * prm.enter_subsection("Discretization");
2664  * {
2665  * stokes_velocity_degree =
2666  * prm.get_integer("Stokes velocity polynomial degree");
2667  * temperature_degree = prm.get_integer("Temperature polynomial degree");
2668  * use_locally_conservative_discretization =
2669  * prm.get_bool("Use locally conservative discretization");
2670  * }
2671  * prm.leave_subsection();
2672  * }
2673  *
2674  *
2675  *
2676  * @endcode
2677  *
2678  *
2679  * <a name="BoussinesqFlowProblemBoussinesqFlowProblem"></a>
2680  * <h4>BoussinesqFlowProblem::BoussinesqFlowProblem</h4>
2681  *
2682 
2683  *
2684  * The constructor of the problem is very similar to the constructor in
2685  * @ref step_31 "step-31". What is different is the %parallel communication: Trilinos uses
2686  * a message passing interface (MPI) for data distribution. When entering
2687  * the BoussinesqFlowProblem class, we have to decide how the parallelization
2688  * is to be done. We choose a rather simple strategy and let all processors
2689  * that are running the program work together, specified by the communicator
2690  * <code>MPI_COMM_WORLD</code>. Next, we create the output stream (as we
2691  * already did in @ref step_18 "step-18") that only generates output on the first MPI
2692  * process and is completely forgetful on all others. The implementation of
2693  * this idea is to check the process number when <code>pcout</code> gets a
2694  * true argument, and it uses the <code>std::cout</code> stream for
2695  * output. If we are one processor five, for instance, then we will give a
2696  * <code>false</code> argument to <code>pcout</code>, which means that the
2697  * output of that processor will not be printed. With the exception of the
2698  * mapping object (for which we use polynomials of degree 4) all but the
2699  * final member variable are exactly the same as in @ref step_31 "step-31".
2700  *
2701 
2702  *
2703  * This final object, the TimerOutput object, is then told to restrict
2704  * output to the <code>pcout</code> stream (processor 0), and then we
2705  * specify that we want to get a summary table at the end of the program
2706  * which shows us wallclock times (as opposed to CPU times). We will
2707  * manually also request intermediate summaries every so many time steps in
2708  * the <code>run()</code> function below.
2709  *
2710  * @code
2711  * template <int dim>
2712  * BoussinesqFlowProblem<dim>::BoussinesqFlowProblem(Parameters &parameters_)
2713  * : parameters(parameters_)
2714  * , pcout(std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0))
2715  * ,
2716  *
2717  * triangulation(MPI_COMM_WORLD,
2721  * ,
2722  *
2723  * global_Omega_diameter(0.)
2724  * ,
2725  *
2726  * mapping(4)
2727  * ,
2728  *
2729  * stokes_fe(FE_Q<dim>(parameters.stokes_velocity_degree),
2730  * dim,
2731  * (parameters.use_locally_conservative_discretization ?
2732  * static_cast<const FiniteElement<dim> &>(
2733  * FE_DGP<dim>(parameters.stokes_velocity_degree - 1)) :
2734  * static_cast<const FiniteElement<dim> &>(
2735  * FE_Q<dim>(parameters.stokes_velocity_degree - 1))),
2736  * 1)
2737  * ,
2738  *
2739  * stokes_dof_handler(triangulation)
2740  * ,
2741  *
2742  * temperature_fe(parameters.temperature_degree)
2743  * , temperature_dof_handler(triangulation)
2744  * ,
2745  *
2746  * time_step(0)
2747  * , old_time_step(0)
2748  * , timestep_number(0)
2749  * , rebuild_stokes_matrix(true)
2750  * , rebuild_stokes_preconditioner(true)
2751  * , rebuild_temperature_matrices(true)
2752  * , rebuild_temperature_preconditioner(true)
2753  * ,
2754  *
2755  * computing_timer(MPI_COMM_WORLD,
2756  * pcout,
2759  * {}
2760  *
2761  *
2762  *
2763  * @endcode
2764  *
2765  *
2766  * <a name="TheBoussinesqFlowProblemhelperfunctions"></a>
2767  * <h4>The BoussinesqFlowProblem helper functions</h4>
2768  *
2769  * <a name="BoussinesqFlowProblemget_maximal_velocity"></a>
2770  * <h5>BoussinesqFlowProblem::get_maximal_velocity</h5>
2771  *
2772 
2773  *
2774  * Except for two small details, the function to compute the global maximum
2775  * of the velocity is the same as in @ref step_31 "step-31". The first detail is actually
2776  * common to all functions that implement loops over all cells in the
2777  * triangulation: When operating in %parallel, each processor can only work
2778  * on a chunk of cells since each processor only has a certain part of the
2779  * entire triangulation. This chunk of cells that we want to work on is
2780  * identified via a so-called <code>subdomain_id</code>, as we also did in
2781  * @ref step_18 "step-18". All we need to change is hence to perform the cell-related
2782  * operations only on cells that are owned by the current process (as
2783  * opposed to ghost or artificial cells), i.e. for which the subdomain id
2784  * equals the number of the process ID. Since this is a commonly used
2785  * operation, there is a shortcut for this operation: we can ask whether the
2786  * cell is owned by the current processor using
2787  * <code>cell-@>is_locally_owned()</code>.
2788  *
2789 
2790  *
2791  * The second difference is the way we calculate the maximum value. Before,
2792  * we could simply have a <code>double</code> variable that we checked
2793  * against on each quadrature point for each cell. Now, we have to be a bit
2794  * more careful since each processor only operates on a subset of
2795  * cells. What we do is to first let each processor calculate the maximum
2796  * among its cells, and then do a global communication operation
2797  * <code>Utilities::MPI::max</code> that computes the maximum value among
2798  * all the maximum values of the individual processors. MPI provides such a
2799  * call, but it's even simpler to use the respective function in namespace
2800  * Utilities::MPI using the MPI communicator object since that will do the
2801  * right thing even if we work without MPI and on a single machine only. The
2802  * call to <code>Utilities::MPI::max</code> needs two arguments, namely the
2803  * local maximum (input) and the MPI communicator, which is MPI_COMM_WORLD
2804  * in this example.
2805  *
2806  * @code
2807  * template <int dim>
2808  * double BoussinesqFlowProblem<dim>::get_maximal_velocity() const
2809  * {
2810  * const QIterated<dim> quadrature_formula(QTrapez<1>(),
2811  * parameters.stokes_velocity_degree);
2812  * const unsigned int n_q_points = quadrature_formula.size();
2813  *
2814  * FEValues<dim> fe_values(mapping,
2815  * stokes_fe,
2816  * quadrature_formula,
2817  * update_values);
2818  * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2819  *
2820  * const FEValuesExtractors::Vector velocities(0);
2821  *
2822  * double max_local_velocity = 0;
2823  *
2824  * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2825  * if (cell->is_locally_owned())
2826  * {
2827  * fe_values.reinit(cell);
2828  * fe_values[velocities].get_function_values(stokes_solution,
2829  * velocity_values);
2830  *
2831  * for (unsigned int q = 0; q < n_q_points; ++q)
2832  * max_local_velocity =
2833  * std::max(max_local_velocity, velocity_values[q].norm());
2834  * }
2835  *
2836  * return Utilities::MPI::max(max_local_velocity, MPI_COMM_WORLD);
2837  * }
2838  *
2839  *
2840  * @endcode
2841  *
2842  *
2843  * <a name="BoussinesqFlowProblemget_cfl_number"></a>
2844  * <h5>BoussinesqFlowProblem::get_cfl_number</h5>
2845  *
2846 
2847  *
2848  * The next function does something similar, but we now compute the CFL
2849  * number, i.e., maximal velocity on a cell divided by the cell
2850  * diameter. This number is necessary to determine the time step size, as we
2851  * use a semi-explicit time stepping scheme for the temperature equation
2852  * (see @ref step_31 "step-31" for a discussion). We compute it in the same way as above:
2853  * Compute the local maximum over all locally owned cells, then exchange it
2854  * via MPI to find the global maximum.
2855  *
2856  * @code
2857  * template <int dim>
2858  * double BoussinesqFlowProblem<dim>::get_cfl_number() const
2859  * {
2860  * const QIterated<dim> quadrature_formula(QTrapez<1>(),
2861  * parameters.stokes_velocity_degree);
2862  * const unsigned int n_q_points = quadrature_formula.size();
2863  *
2864  * FEValues<dim> fe_values(mapping,
2865  * stokes_fe,
2866  * quadrature_formula,
2867  * update_values);
2868  * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2869  *
2870  * const FEValuesExtractors::Vector velocities(0);
2871  *
2872  * double max_local_cfl = 0;
2873  *
2874  * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2875  * if (cell->is_locally_owned())
2876  * {
2877  * fe_values.reinit(cell);
2878  * fe_values[velocities].get_function_values(stokes_solution,
2879  * velocity_values);
2880  *
2881  * double max_local_velocity = 1e-10;
2882  * for (unsigned int q = 0; q < n_q_points; ++q)
2883  * max_local_velocity =
2884  * std::max(max_local_velocity, velocity_values[q].norm());
2885  * max_local_cfl =
2886  * std::max(max_local_cfl, max_local_velocity / cell->diameter());
2887  * }
2888  *
2889  * return Utilities::MPI::max(max_local_cfl, MPI_COMM_WORLD);
2890  * }
2891  *
2892  *
2893  * @endcode
2894  *
2895  *
2896  * <a name="BoussinesqFlowProblemget_entropy_variation"></a>
2897  * <h5>BoussinesqFlowProblem::get_entropy_variation</h5>
2898  *
2899 
2900  *
2901  * Next comes the computation of the global entropy variation
2902  * @f$\|E(T)-\bar{E}(T)\|_\infty@f$ where the entropy @f$E@f$ is defined as
2903  * discussed in the introduction. This is needed for the evaluation of the
2904  * stabilization in the temperature equation as explained in the
2905  * introduction. The entropy variation is actually only needed if we use
2906  * @f$\alpha=2@f$ as a power in the residual computation. The infinity norm is
2907  * computed by the maxima over quadrature points, as usual in discrete
2908  * computations.
2909  *
2910 
2911  *
2912  * In order to compute this quantity, we first have to find the
2913  * space-average @f$\bar{E}(T)@f$ and then evaluate the maximum. However, that
2914  * means that we would need to perform two loops. We can avoid the overhead
2915  * by noting that @f$\|E(T)-\bar{E}(T)\|_\infty =
2916  * \max\big(E_{\textrm{max}}(T)-\bar{E}(T),
2917  * \bar{E}(T)-E_{\textrm{min}}(T)\big)@f$, i.e., the maximum out of the
2918  * deviation from the average entropy in positive and negative
2919  * directions. The four quantities we need for the latter formula (maximum
2920  * entropy, minimum entropy, average entropy, area) can all be evaluated in
2921  * the same loop over all cells, so we choose this simpler variant.
2922  *
2923  * @code
2924  * template <int dim>
2925  * double BoussinesqFlowProblem<dim>::get_entropy_variation(
2926  * const double average_temperature) const
2927  * {
2928  * if (parameters.stabilization_alpha != 2)
2929  * return 1.;
2930  *
2931  * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 1);
2932  * const unsigned int n_q_points = quadrature_formula.size();
2933  *
2934  * FEValues<dim> fe_values(temperature_fe,
2935  * quadrature_formula,
2936  * update_values | update_JxW_values);
2937  * std::vector<double> old_temperature_values(n_q_points);
2938  * std::vector<double> old_old_temperature_values(n_q_points);
2939  *
2940  * @endcode
2941  *
2942  * In the two functions above we computed the maximum of numbers that were
2943  * all non-negative, so we knew that zero was certainly a lower bound. On
2944  * the other hand, here we need to find the maximum deviation from the
2945  * average value, i.e., we will need to know the maximal and minimal
2946  * values of the entropy for which we don't a priori know the sign.
2947  *
2948 
2949  *
2950  * To compute it, we can therefore start with the largest and smallest
2951  * possible values we can store in a double precision number: The minimum
2952  * is initialized with a bigger and the maximum with a smaller number than
2953  * any one that is going to appear. We are then guaranteed that these
2954  * numbers will be overwritten in the loop on the first cell or, if this
2955  * processor does not own any cells, in the communication step at the
2956  * latest. The following loop then computes the minimum and maximum local
2957  * entropy as well as keeps track of the area/volume of the part of the
2958  * domain we locally own and the integral over the entropy on it:
2959  *
2960  * @code
2961  * double min_entropy = std::numeric_limits<double>::max(),
2962  * max_entropy = -std::numeric_limits<double>::max(), area = 0,
2963  * entropy_integrated = 0;
2964  *
2965  * for (const auto &cell : temperature_dof_handler.active_cell_iterators())
2966  * if (cell->is_locally_owned())
2967  * {
2968  * fe_values.reinit(cell);
2969  * fe_values.get_function_values(old_temperature_solution,
2970  * old_temperature_values);
2971  * fe_values.get_function_values(old_old_temperature_solution,
2972  * old_old_temperature_values);
2973  * for (unsigned int q = 0; q < n_q_points; ++q)
2974  * {
2975  * const double T =
2976  * (old_temperature_values[q] + old_old_temperature_values[q]) / 2;
2977  * const double entropy =
2978  * ((T - average_temperature) * (T - average_temperature));
2979  *
2980  * min_entropy = std::min(min_entropy, entropy);
2981  * max_entropy = std::max(max_entropy, entropy);
2982  * area += fe_values.JxW(q);
2983  * entropy_integrated += fe_values.JxW(q) * entropy;
2984  * }
2985  * }
2986  *
2987  * @endcode
2988  *
2989  * Now we only need to exchange data between processors: we need to sum
2990  * the two integrals (<code>area</code>, <code>entropy_integrated</code>),
2991  * and get the extrema for maximum and minimum. We could do this through
2992  * four different data exchanges, but we can it with two:
2993  * Utilities::MPI::sum also exists in a variant that takes an array of
2994  * values that are all to be summed up. And we can also utilize the
2995  * Utilities::MPI::max function by realizing that forming the minimum over
2996  * the minimal entropies equals forming the negative of the maximum over
2997  * the negative of the minimal entropies; this maximum can then be
2998  * combined with forming the maximum over the maximal entropies.
2999  *
3000  * @code
3001  * const double local_sums[2] = {entropy_integrated, area},
3002  * local_maxima[2] = {-min_entropy, max_entropy};
3003  * double global_sums[2], global_maxima[2];
3004  *
3005  * Utilities::MPI::sum(local_sums, MPI_COMM_WORLD, global_sums);
3006  * Utilities::MPI::max(local_maxima, MPI_COMM_WORLD, global_maxima);
3007  *
3008  * @endcode
3009  *
3010  * Having computed everything this way, we can then compute the average
3011  * entropy and find the @f$L^\infty@f$ norm by taking the larger of the
3012  * deviation of the maximum or minimum from the average:
3013  *
3014  * @code
3015  * const double average_entropy = global_sums[0] / global_sums[1];
3016  * const double entropy_diff = std::max(global_maxima[1] - average_entropy,
3017  * average_entropy - (-global_maxima[0]));
3018  * return entropy_diff;
3019  * }
3020  *
3021  *
3022  *
3023  * @endcode
3024  *
3025  *
3026  * <a name="BoussinesqFlowProblemget_extrapolated_temperature_range"></a>
3027  * <h5>BoussinesqFlowProblem::get_extrapolated_temperature_range</h5>
3028  *
3029 
3030  *
3031  * The next function computes the minimal and maximal value of the
3032  * extrapolated temperature over the entire domain. Again, this is only a
3033  * slightly modified version of the respective function in @ref step_31 "step-31". As in
3034  * the function above, we collect local minima and maxima and then compute
3035  * the global extrema using the same trick as above.
3036  *
3037 
3038  *
3039  * As already discussed in @ref step_31 "step-31", the function needs to distinguish
3040  * between the first and all following time steps because it uses a higher
3041  * order temperature extrapolation scheme when at least two previous time
3042  * steps are available.
3043  *
3044  * @code
3045  * template <int dim>
3046  * std::pair<double, double>
3047  * BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range() const
3048  * {
3049  * const QIterated<dim> quadrature_formula(QTrapez<1>(),
3050  * parameters.temperature_degree);
3051  * const unsigned int n_q_points = quadrature_formula.size();
3052  *
3053  * FEValues<dim> fe_values(mapping,
3054  * temperature_fe,
3055  * quadrature_formula,
3056  * update_values);
3057  * std::vector<double> old_temperature_values(n_q_points);
3058  * std::vector<double> old_old_temperature_values(n_q_points);
3059  *
3060  * double min_local_temperature = std::numeric_limits<double>::max(),
3061  * max_local_temperature = -std::numeric_limits<double>::max();
3062  *
3063  * if (timestep_number != 0)
3064  * {
3065  * for (const auto &cell : temperature_dof_handler.active_cell_iterators())
3066  * if (cell->is_locally_owned())
3067  * {
3068  * fe_values.reinit(cell);
3069  * fe_values.get_function_values(old_temperature_solution,
3070  * old_temperature_values);
3071  * fe_values.get_function_values(old_old_temperature_solution,
3072  * old_old_temperature_values);
3073  *
3074  * for (unsigned int q = 0; q < n_q_points; ++q)
3075  * {
3076  * const double temperature =
3077  * (1. + time_step / old_time_step) *
3078  * old_temperature_values[q] -
3079  * time_step / old_time_step * old_old_temperature_values[q];
3080  *
3081  * min_local_temperature =
3082  * std::min(min_local_temperature, temperature);
3083  * max_local_temperature =
3084  * std::max(max_local_temperature, temperature);
3085  * }
3086  * }
3087  * }
3088  * else
3089  * {
3090  * for (const auto &cell : temperature_dof_handler.active_cell_iterators())
3091  * if (cell->is_locally_owned())
3092  * {
3093  * fe_values.reinit(cell);
3094  * fe_values.get_function_values(old_temperature_solution,
3095  * old_temperature_values);
3096  *
3097  * for (unsigned int q = 0; q < n_q_points; ++q)
3098  * {
3099  * const double temperature = old_temperature_values[q];
3100  *
3101  * min_local_temperature =
3102  * std::min(min_local_temperature, temperature);
3103  * max_local_temperature =
3104  * std::max(max_local_temperature, temperature);
3105  * }
3106  * }
3107  * }
3108  *
3109  * double local_extrema[2] = {-min_local_temperature, max_local_temperature};
3110  * double global_extrema[2];
3111  * Utilities::MPI::max(local_extrema, MPI_COMM_WORLD, global_extrema);
3112  *
3113  * return std::make_pair(-global_extrema[0], global_extrema[1]);
3114  * }
3115  *
3116  *
3117  * @endcode
3118  *
3119  *
3120  * <a name="BoussinesqFlowProblemcompute_viscosity"></a>
3121  * <h5>BoussinesqFlowProblem::compute_viscosity</h5>
3122  *
3123 
3124  *
3125  * The function that calculates the viscosity is purely local and so needs
3126  * no communication at all. It is mostly the same as in @ref step_31 "step-31" but with an
3127  * updated formulation of the viscosity if @f$\alpha=2@f$ is chosen:
3128  *
3129  * @code
3130  * template <int dim>
3131  * double BoussinesqFlowProblem<dim>::compute_viscosity(
3132  * const std::vector<double> & old_temperature,
3133  * const std::vector<double> & old_old_temperature,
3134  * const std::vector<Tensor<1, dim>> & old_temperature_grads,
3135  * const std::vector<Tensor<1, dim>> & old_old_temperature_grads,
3136  * const std::vector<double> & old_temperature_laplacians,
3137  * const std::vector<double> & old_old_temperature_laplacians,
3138  * const std::vector<Tensor<1, dim>> & old_velocity_values,
3139  * const std::vector<Tensor<1, dim>> & old_old_velocity_values,
3140  * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
3141  * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
3142  * const double global_u_infty,
3143  * const double global_T_variation,
3144  * const double average_temperature,
3145  * const double global_entropy_variation,
3146  * const double cell_diameter) const
3147  * {
3148  * if (global_u_infty == 0)
3149  * return 5e-3 * cell_diameter;
3150  *
3151  * const unsigned int n_q_points = old_temperature.size();
3152  *
3153  * double max_residual = 0;
3154  * double max_velocity = 0;
3155  *
3156  * for (unsigned int q = 0; q < n_q_points; ++q)
3157  * {
3158  * const Tensor<1, dim> u =
3159  * (old_velocity_values[q] + old_old_velocity_values[q]) / 2;
3160  *
3161  * const SymmetricTensor<2, dim> strain_rate =
3162  * (old_strain_rates[q] + old_old_strain_rates[q]) / 2;
3163  *
3164  * const double T = (old_temperature[q] + old_old_temperature[q]) / 2;
3165  * const double dT_dt =
3166  * (old_temperature[q] - old_old_temperature[q]) / old_time_step;
3167  * const double u_grad_T =
3168  * u * (old_temperature_grads[q] + old_old_temperature_grads[q]) / 2;
3169  *
3170  * const double kappa_Delta_T =
3171  * EquationData::kappa *
3172  * (old_temperature_laplacians[q] + old_old_temperature_laplacians[q]) /
3173  * 2;
3174  * const double gamma =
3175  * ((EquationData::radiogenic_heating * EquationData::density(T) +
3176  * 2 * EquationData::eta * strain_rate * strain_rate) /
3177  * (EquationData::density(T) * EquationData::specific_heat));
3178  *
3179  * double residual = std::abs(dT_dt + u_grad_T - kappa_Delta_T - gamma);
3180  * if (parameters.stabilization_alpha == 2)
3181  * residual *= std::abs(T - average_temperature);
3182  *
3183  * max_residual = std::max(residual, max_residual);
3184  * max_velocity = std::max(std::sqrt(u * u), max_velocity);
3185  * }
3186  *
3187  * const double max_viscosity =
3188  * (parameters.stabilization_beta * max_velocity * cell_diameter);
3189  * if (timestep_number == 0)
3190  * return max_viscosity;
3191  * else
3192  * {
3193  * Assert(old_time_step > 0, ExcInternalError());
3194  *
3195  * double entropy_viscosity;
3196  * if (parameters.stabilization_alpha == 2)
3197  * entropy_viscosity =
3198  * (parameters.stabilization_c_R * cell_diameter * cell_diameter *
3199  * max_residual / global_entropy_variation);
3200  * else
3201  * entropy_viscosity =
3202  * (parameters.stabilization_c_R * cell_diameter *
3203  * global_Omega_diameter * max_velocity * max_residual /
3204  * (global_u_infty * global_T_variation));
3205  *
3206  * return std::min(max_viscosity, entropy_viscosity);
3207  * }
3208  * }
3209  *
3210  *
3211  *
3212  * @endcode
3213  *
3214  *
3215  * <a name="TheBoussinesqFlowProblemsetupfunctions"></a>
3216  * <h4>The BoussinesqFlowProblem setup functions</h4>
3217  *
3218 
3219  *
3220  * The following three functions set up the Stokes matrix, the matrix used
3221  * for the Stokes preconditioner, and the temperature matrix. The code is
3222  * mostly the same as in @ref step_31 "step-31", but it has been broken out into three
3223  * functions of their own for simplicity.
3224  *
3225 
3226  *
3227  * The main functional difference between the code here and that in @ref step_31 "step-31"
3228  * is that the matrices we want to set up are distributed across multiple
3229  * processors. Since we still want to build up the sparsity pattern first
3230  * for efficiency reasons, we could continue to build the <i>entire</i>
3231  * sparsity pattern as a BlockDynamicSparsityPattern, as we did in
3232  * @ref step_31 "step-31". However, that would be inefficient: every processor would build
3233  * the same sparsity pattern, but only initialize a small part of the matrix
3234  * using it. It also violates the principle that every processor should only
3235  * work on those cells it owns (and, if necessary the layer of ghost cells
3236  * around it).
3237  *
3238 
3239  *
3240  * Rather, we use an object of type TrilinosWrappers::BlockSparsityPattern,
3241  * which is (obviously) a wrapper around a sparsity pattern object provided
3242  * by Trilinos. The advantage is that the Trilinos sparsity pattern class
3243  * can communicate across multiple processors: if this processor fills in
3244  * all the nonzero entries that result from the cells it owns, and every
3245  * other processor does so as well, then at the end after some MPI
3246  * communication initiated by the <code>compress()</code> call, we will have
3247  * the globally assembled sparsity pattern available with which the global
3248  * matrix can be initialized.
3249  *
3250 
3251  *
3252  * There is one important aspect when initializing Trilinos sparsity
3253  * patterns in parallel: In addition to specifying the locally owned rows
3254  * and columns of the matrices via the @p stokes_partitioning index set, we
3255  * also supply information about all the rows we are possibly going to write
3256  * into when assembling on a certain processor. The set of locally relevant
3257  * rows contains all such rows (possibly also a few unnecessary ones, but it
3258  * is difficult to find the exact row indices before actually getting
3259  * indices on all cells and resolving constraints). This additional
3260  * information allows to exactly determine the structure for the
3261  * off-processor data found during assembly. While Trilinos matrices are
3262  * able to collect this information on the fly as well (when initializing
3263  * them from some other reinit method), it is less efficient and leads to
3264  * problems when assembling matrices with multiple threads. In this program,
3265  * we pessimistically assume that only one processor at a time can write
3266  * into the matrix while assembly (whereas the computation is parallel),
3267  * which is fine for Trilinos matrices. In practice, one can do better by
3268  * hinting WorkStream at cells that do not share vertices, allowing for
3269  * parallelism among those cells (see the graph coloring algorithms and
3270  * WorkStream with colored iterators argument). However, that only works
3271  * when only one MPI processor is present because Trilinos' internal data
3272  * structures for accumulating off-processor data on the fly are not thread
3273  * safe. With the initialization presented here, there is no such problem
3274  * and one could safely introduce graph coloring for this algorithm.
3275  *
3276 
3277  *
3278  * The only other change we need to make is to tell the
3279  * DoFTools::make_sparsity_pattern() function that it is only supposed to
3280  * work on a subset of cells, namely the ones whose
3281  * <code>subdomain_id</code> equals the number of the current processor, and
3282  * to ignore all other cells.
3283  *
3284 
3285  *
3286  * This strategy is replicated across all three of the following functions.
3287  *
3288 
3289  *
3290  * Note that Trilinos matrices store the information contained in the
3291  * sparsity patterns, so we can safely release the <code>sp</code> variable
3292  * once the matrix has been given the sparsity structure.
3293  *
3294  * @code
3295  * template <int dim>
3296  * void BoussinesqFlowProblem<dim>::setup_stokes_matrix(
3297  * const std::vector<IndexSet> &stokes_partitioning,
3298  * const std::vector<IndexSet> &stokes_relevant_partitioning)
3299  * {
3300  * stokes_matrix.clear();
3301  *
3302  * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3303  * stokes_partitioning,
3304  * stokes_relevant_partitioning,
3305  * MPI_COMM_WORLD);
3306  *
3307  * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3308  * for (unsigned int c = 0; c < dim + 1; ++c)
3309  * for (unsigned int d = 0; d < dim + 1; ++d)
3310  * if (!((c == dim) && (d == dim)))
3311  * coupling[c][d] = DoFTools::always;
3312  * else
3313  * coupling[c][d] = DoFTools::none;
3314  *
3315  * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3316  * coupling,
3317  * sp,
3318  * stokes_constraints,
3319  * false,
3320  * Utilities::MPI::this_mpi_process(
3321  * MPI_COMM_WORLD));
3322  * sp.compress();
3323  *
3324  * stokes_matrix.reinit(sp);
3325  * }
3326  *
3327  *
3328  *
3329  * template <int dim>
3330  * void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner(
3331  * const std::vector<IndexSet> &stokes_partitioning,
3332  * const std::vector<IndexSet> &stokes_relevant_partitioning)
3333  * {
3334  * Amg_preconditioner.reset();
3335  * Mp_preconditioner.reset();
3336  *
3337  * stokes_preconditioner_matrix.clear();
3338  *
3339  * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3340  * stokes_partitioning,
3341  * stokes_relevant_partitioning,
3342  * MPI_COMM_WORLD);
3343  *
3344  * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3345  * for (unsigned int c = 0; c < dim + 1; ++c)
3346  * for (unsigned int d = 0; d < dim + 1; ++d)
3347  * if (c == d)
3348  * coupling[c][d] = DoFTools::always;
3349  * else
3350  * coupling[c][d] = DoFTools::none;
3351  *
3352  * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3353  * coupling,
3354  * sp,
3355  * stokes_constraints,
3356  * false,
3357  * Utilities::MPI::this_mpi_process(
3358  * MPI_COMM_WORLD));
3359  * sp.compress();
3360  *
3361  * stokes_preconditioner_matrix.reinit(sp);
3362  * }
3363  *
3364  *
3365  * template <int dim>
3366  * void BoussinesqFlowProblem<dim>::setup_temperature_matrices(
3367  * const IndexSet &temperature_partitioner,
3368  * const IndexSet &temperature_relevant_partitioner)
3369  * {
3370  * T_preconditioner.reset();
3371  * temperature_mass_matrix.clear();
3372  * temperature_stiffness_matrix.clear();
3373  * temperature_matrix.clear();
3374  *
3375  * TrilinosWrappers::SparsityPattern sp(temperature_partitioner,
3376  * temperature_partitioner,
3377  * temperature_relevant_partitioner,
3378  * MPI_COMM_WORLD);
3379  * DoFTools::make_sparsity_pattern(temperature_dof_handler,
3380  * sp,
3381  * temperature_constraints,
3382  * false,
3383  * Utilities::MPI::this_mpi_process(
3384  * MPI_COMM_WORLD));
3385  * sp.compress();
3386  *
3387  * temperature_matrix.reinit(sp);
3388  * temperature_mass_matrix.reinit(sp);
3389  * temperature_stiffness_matrix.reinit(sp);
3390  * }
3391  *
3392  *
3393  *
3394  * @endcode
3395  *
3396  * The remainder of the setup function (after splitting out the three
3397  * functions above) mostly has to deal with the things we need to do for
3398  * parallelization across processors. Because setting all of this up is a
3399  * significant compute time expense of the program, we put everything we do
3400  * here into a timer group so that we can get summary information about the
3401  * fraction of time spent in this part of the program at its end.
3402  *
3403 
3404  *
3405  * At the top as usual we enumerate degrees of freedom and sort them by
3406  * component/block, followed by writing their numbers to the screen from
3407  * processor zero. The DoFHandler::distributed_dofs() function, when applied
3408  * to a parallel::distributed::Triangulation object, sorts degrees of
3409  * freedom in such a way that all degrees of freedom associated with
3410  * subdomain zero come before all those associated with subdomain one,
3411  * etc. For the Stokes part, this entails, however, that velocities and
3412  * pressures become intermixed, but this is trivially solved by sorting
3413  * again by blocks; it is worth noting that this latter operation leaves the
3414  * relative ordering of all velocities and pressures alone, i.e. within the
3415  * velocity block we will still have all those associated with subdomain
3416  * zero before all velocities associated with subdomain one, etc. This is
3417  * important since we store each of the blocks of this matrix distributed
3418  * across all processors and want this to be done in such a way that each
3419  * processor stores that part of the matrix that is roughly equal to the
3420  * degrees of freedom located on those cells that it will actually work on.
3421  *
3422 
3423  *
3424  * When printing the numbers of degrees of freedom, note that these numbers
3425  * are going to be large if we use many processors. Consequently, we let the
3426  * stream put a comma separator in between every three digits. The state of
3427  * the stream, using the locale, is saved from before to after this
3428  * operation. While slightly opaque, the code works because the default
3429  * locale (which we get using the constructor call
3430  * <code>std::locale("")</code>) implies printing numbers with a comma
3431  * separator for every third digit (i.e., thousands, millions, billions).
3432  *
3433 
3434  *
3435  * In this function as well as many below, we measure how much time
3436  * we spend here and collect that in a section called "Setup dof
3437  * systems" across function invocations. This is done using an
3438  * TimerOutput::Scope object that gets a timer going in the section
3439  * with above name of the `computing_timer` object upon construction
3440  * of the local variable; the timer is stopped again when the
3441  * destructor of the `timing_section` variable is called. This, of
3442  * course, happens either at the end of the function, or if we leave
3443  * the function through a `return` statement or when an exception is
3444  * thrown somewhere -- in other words, whenever we leave this
3445  * function in any way. The use of such "scope" objects therefore
3446  * makes sure that we do not have to manually add code that tells
3447  * the timer to stop at every location where this function may be
3448  * left.
3449  *
3450  * @code
3451  * template <int dim>
3452  * void BoussinesqFlowProblem<dim>::setup_dofs()
3453  * {
3454  * TimerOutput::Scope timing_section(computing_timer, "Setup dof systems");
3455  *
3456  * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
3457  * stokes_sub_blocks[dim] = 1;
3458  * stokes_dof_handler.distribute_dofs(stokes_fe);
3459  * DoFRenumbering::component_wise(stokes_dof_handler, stokes_sub_blocks);
3460  *
3461  * temperature_dof_handler.distribute_dofs(temperature_fe);
3462  *
3463  * const std::vector<types::global_dof_index> stokes_dofs_per_block =
3464  * DoFTools::count_dofs_per_fe_block(stokes_dof_handler, stokes_sub_blocks);
3465  *
3466  * const unsigned int n_u = stokes_dofs_per_block[0],
3467  * n_p = stokes_dofs_per_block[1],
3468  * n_T = temperature_dof_handler.n_dofs();
3469  *
3470  * std::locale s = pcout.get_stream().getloc();
3471  * pcout.get_stream().imbue(std::locale(""));
3472  * pcout << "Number of active cells: " << triangulation.n_global_active_cells()
3473  * << " (on " << triangulation.n_levels() << " levels)" << std::endl
3474  * << "Number of degrees of freedom: " << n_u + n_p + n_T << " (" << n_u
3475  * << '+' << n_p << '+' << n_T << ')' << std::endl
3476  * << std::endl;
3477  * pcout.get_stream().imbue(s);
3478  *
3479  *
3480  * @endcode
3481  *
3482  * After this, we have to set up the various partitioners (of type
3483  * <code>IndexSet</code>, see the introduction) that describe which parts
3484  * of each matrix or vector will be stored where, then call the functions
3485  * that actually set up the matrices, and at the end also resize the
3486  * various vectors we keep around in this program.
3487  *
3488  * @code
3489  * std::vector<IndexSet> stokes_partitioning, stokes_relevant_partitioning;
3490  * IndexSet temperature_partitioning(n_T),
3491  * temperature_relevant_partitioning(n_T);
3492  * IndexSet stokes_relevant_set;
3493  * {
3494  * IndexSet stokes_index_set = stokes_dof_handler.locally_owned_dofs();
3495  * stokes_partitioning.push_back(stokes_index_set.get_view(0, n_u));
3496  * stokes_partitioning.push_back(stokes_index_set.get_view(n_u, n_u + n_p));
3497  *
3498  * DoFTools::extract_locally_relevant_dofs(stokes_dof_handler,
3499  * stokes_relevant_set);
3500  * stokes_relevant_partitioning.push_back(
3501  * stokes_relevant_set.get_view(0, n_u));
3502  * stokes_relevant_partitioning.push_back(
3503  * stokes_relevant_set.get_view(n_u, n_u + n_p));
3504  *
3505  * temperature_partitioning = temperature_dof_handler.locally_owned_dofs();
3506  * DoFTools::extract_locally_relevant_dofs(
3507  * temperature_dof_handler, temperature_relevant_partitioning);
3508  * }
3509  *
3510  * @endcode
3511  *
3512  * Following this, we can compute constraints for the solution vectors,
3513  * including hanging node constraints and homogeneous and inhomogeneous
3514  * boundary values for the Stokes and temperature fields. Note that as for
3515  * everything else, the constraint objects can not hold <i>all</i>
3516  * constraints on every processor. Rather, each processor needs to store
3517  * only those that are actually necessary for correctness given that it
3518  * only assembles linear systems on cells it owns. As discussed in the
3519  * @ref distributed_paper "this paper", the set of constraints we need to
3520  * know about is exactly the set of constraints on all locally relevant
3521  * degrees of freedom, so this is what we use to initialize the constraint
3522  * objects.
3523  *
3524  * @code
3525  * {
3526  * stokes_constraints.clear();
3527  * stokes_constraints.reinit(stokes_relevant_set);
3528  *
3529  * DoFTools::make_hanging_node_constraints(stokes_dof_handler,
3530  * stokes_constraints);
3531  *
3532  * FEValuesExtractors::Vector velocity_components(0);
3533  * VectorTools::interpolate_boundary_values(
3534  * stokes_dof_handler,
3535  * 0,
3536  * Functions::ZeroFunction<dim>(dim + 1),
3537  * stokes_constraints,
3538  * stokes_fe.component_mask(velocity_components));
3539  *
3540  * std::set<types::boundary_id> no_normal_flux_boundaries;
3541  * no_normal_flux_boundaries.insert(1);
3542  * VectorTools::compute_no_normal_flux_constraints(stokes_dof_handler,
3543  * 0,
3544  * no_normal_flux_boundaries,
3545  * stokes_constraints,
3546  * mapping);
3547  * stokes_constraints.close();
3548  * }
3549  * {
3550  * temperature_constraints.clear();
3551  * temperature_constraints.reinit(temperature_relevant_partitioning);
3552  *
3553  * DoFTools::make_hanging_node_constraints(temperature_dof_handler,
3554  * temperature_constraints);
3555  * VectorTools::interpolate_boundary_values(
3556  * temperature_dof_handler,
3557  * 0,
3558  * EquationData::TemperatureInitialValues<dim>(),
3559  * temperature_constraints);
3560  * VectorTools::interpolate_boundary_values(
3561  * temperature_dof_handler,
3562  * 1,
3563  * EquationData::TemperatureInitialValues<dim>(),
3564  * temperature_constraints);
3565  * temperature_constraints.close();
3566  * }
3567  *
3568  * @endcode
3569  *
3570  * All this done, we can then initialize the various matrix and vector
3571  * objects to their proper sizes. At the end, we also record that all
3572  * matrices and preconditioners have to be re-computed at the beginning of
3573  * the next time step. Note how we initialize the vectors for the Stokes
3574  * and temperature right hand sides: These are writable vectors (last
3575  * boolean argument set to @p true) that have the correct one-to-one
3576  * partitioning of locally owned elements but are still given the relevant
3577  * partitioning for means of figuring out the vector entries that are
3578  * going to be set right away. As for matrices, this allows for writing
3579  * local contributions into the vector with multiple threads (always
3580  * assuming that the same vector entry is not accessed by multiple threads
3581  * at the same time). The other vectors only allow for read access of
3582  * individual elements, including ghosts, but are not suitable for
3583  * solvers.
3584  *
3585  * @code
3586  * setup_stokes_matrix(stokes_partitioning, stokes_relevant_partitioning);
3587  * setup_stokes_preconditioner(stokes_partitioning,
3588  * stokes_relevant_partitioning);
3589  * setup_temperature_matrices(temperature_partitioning,
3590  * temperature_relevant_partitioning);
3591  *
3592  * stokes_rhs.reinit(stokes_partitioning,
3593  * stokes_relevant_partitioning,
3594  * MPI_COMM_WORLD,
3595  * true);
3596  * stokes_solution.reinit(stokes_relevant_partitioning, MPI_COMM_WORLD);
3597  * old_stokes_solution.reinit(stokes_solution);
3598  *
3599  * temperature_rhs.reinit(temperature_partitioning,
3600  * temperature_relevant_partitioning,
3601  * MPI_COMM_WORLD,
3602  * true);
3603  * temperature_solution.reinit(temperature_relevant_partitioning,
3604  * MPI_COMM_WORLD);
3605  * old_temperature_solution.reinit(temperature_solution);
3606  * old_old_temperature_solution.reinit(temperature_solution);
3607  *
3608  * rebuild_stokes_matrix = true;
3609  * rebuild_stokes_preconditioner = true;
3610  * rebuild_temperature_matrices = true;
3611  * rebuild_temperature_preconditioner = true;
3612  * }
3613  *
3614  *
3615  *
3616  * @endcode
3617  *
3618  *
3619  * <a name="TheBoussinesqFlowProblemassemblyfunctions"></a>
3620  * <h4>The BoussinesqFlowProblem assembly functions</h4>
3621  *
3622 
3623  *
3624  * Following the discussion in the introduction and in the @ref threads
3625  * module, we split the assembly functions into different parts:
3626  *
3627 
3628  *
3629  * <ul> <li> The local calculations of matrices and right hand sides, given
3630  * a certain cell as input (these functions are named
3631  * <code>local_assemble_*</code> below). The resulting function is, in other
3632  * words, essentially the body of the loop over all cells in @ref step_31 "step-31". Note,
3633  * however, that these functions store the result from the local
3634  * calculations in variables of classes from the CopyData namespace.
3635  *
3636 
3637  *
3638  * <li>These objects are then given to the second step which writes the
3639  * local data into the global data structures (these functions are named
3640  * <code>copy_local_to_global_*</code> below). These functions are pretty
3641  * trivial.
3642  *
3643 
3644  *
3645  * <li>These two subfunctions are then used in the respective assembly
3646  * routine (called <code>assemble_*</code> below), where a WorkStream object
3647  * is set up and runs over all the cells that belong to the processor's
3648  * subdomain. </ul>
3649  *
3650 
3651  *
3652  *
3653  * <a name="Stokespreconditionerassembly"></a>
3654  * <h5>Stokes preconditioner assembly</h5>
3655  *
3656 
3657  *
3658  * Let us start with the functions that builds the Stokes
3659  * preconditioner. The first two of these are pretty trivial, given the
3660  * discussion above. Note in particular that the main point in using the
3661  * scratch data object is that we want to avoid allocating any objects on
3662  * the free space each time we visit a new cell. As a consequence, the
3663  * assembly function below only has automatic local variables, and
3664  * everything else is accessed through the scratch data object, which is
3665  * allocated only once before we start the loop over all cells:
3666  *
3667  * @code
3668  * template <int dim>
3669  * void BoussinesqFlowProblem<dim>::local_assemble_stokes_preconditioner(
3670  * const typename DoFHandler<dim>::active_cell_iterator &cell,
3671  * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3672  * Assembly::CopyData::StokesPreconditioner<dim> & data)
3673  * {
3674  * const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell;
3675  * const unsigned int n_q_points =
3676  * scratch.stokes_fe_values.n_quadrature_points;
3677  *
3678  * const FEValuesExtractors::Vector velocities(0);
3679  * const FEValuesExtractors::Scalar pressure(dim);
3680  *
3681  * scratch.stokes_fe_values.reinit(cell);
3682  * cell->get_dof_indices(data.local_dof_indices);
3683  *
3684  * data.local_matrix = 0;
3685  *
3686  * for (unsigned int q = 0; q < n_q_points; ++q)
3687  * {
3688  * for (unsigned int k = 0; k < dofs_per_cell; ++k)
3689  * {
3690  * scratch.grad_phi_u[k] =
3691  * scratch.stokes_fe_values[velocities].gradient(k, q);
3692  * scratch.phi_p[k] = scratch.stokes_fe_values[pressure].value(k, q);
3693  * }
3694  *
3695  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
3696  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
3697  * data.local_matrix(i, j) +=
3698  * (EquationData::eta *
3699  * scalar_product(scratch.grad_phi_u[i], scratch.grad_phi_u[j]) +
3700  * (1. / EquationData::eta) * EquationData::pressure_scaling *
3701  * EquationData::pressure_scaling *
3702  * (scratch.phi_p[i] * scratch.phi_p[j])) *
3703  * scratch.stokes_fe_values.JxW(q);
3704  * }
3705  * }
3706  *
3707  *
3708  *
3709  * template <int dim>
3710  * void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_preconditioner(
3711  * const Assembly::CopyData::StokesPreconditioner<dim> &data)
3712  * {
3713  * stokes_constraints.distribute_local_to_global(data.local_matrix,
3714  * data.local_dof_indices,
3715  * stokes_preconditioner_matrix);
3716  * }
3717  *
3718  *
3719  * @endcode
3720  *
3721  * Now for the function that actually puts things together, using the
3722  * WorkStream functions. WorkStream::run needs a start and end iterator to
3723  * enumerate the cells it is supposed to work on. Typically, one would use
3724  * DoFHandler::begin_active() and DoFHandler::end() for that but here we
3725  * actually only want the subset of cells that in fact are owned by the
3726  * current processor. This is where the FilteredIterator class comes into
3727  * play: you give it a range of cells and it provides an iterator that only
3728  * iterates over that subset of cells that satisfy a certain predicate (a
3729  * predicate is a function of one argument that either returns true or
3730  * false). The predicate we use here is IteratorFilters::LocallyOwnedCell,
3731  * i.e., it returns true exactly if the cell is owned by the current
3732  * processor. The resulting iterator range is then exactly what we need.
3733  *
3734 
3735  *
3736  * With this obstacle out of the way, we call the WorkStream::run
3737  * function with this set of cells, scratch and copy objects, and
3738  * with pointers to two functions: the local assembly and
3739  * copy-local-to-global function. These functions need to have very
3740  * specific signatures: three arguments in the first and one
3741  * argument in the latter case (see the documentation of the
3742  * WorkStream::run function for the meaning of these arguments).
3743  * Note how we use a lambda functions to
3744  * create a function object that satisfies this requirement. It uses
3745  * function arguments for the local assembly function that specify
3746  * cell, scratch data, and copy data, as well as function argument
3747  * for the copy function that expects the
3748  * data to be written into the global matrix (also see the discussion in
3749  * @ref step_13 "step-13"'s <code>assemble_linear_system()</code> function). On the other
3750  * hand, the implicit zeroth argument of member functions (namely
3751  * the <code>this</code> pointer of the object on which that member
3752  * function is to operate on) is <i>bound</i> to the
3753  * <code>this</code> pointer of the current function and is captured. The
3754  * WorkStream::run function, as a consequence, does not need to know
3755  * anything about the object these functions work on.
3756  *
3757 
3758  *
3759  * When the WorkStream is executed, it will create several local assembly
3760  * routines of the first kind for several cells and let some available
3761  * processors work on them. The function that needs to be synchronized,
3762  * i.e., the write operation into the global matrix, however, is executed by
3763  * only one thread at a time in the prescribed order. Of course, this only
3764  * holds for the parallelization on a single MPI process. Different MPI
3765  * processes will have their own WorkStream objects and do that work
3766  * completely independently (and in different memory spaces). In a
3767  * distributed calculation, some data will accumulate at degrees of freedom
3768  * that are not owned by the respective processor. It would be inefficient
3769  * to send data around every time we encounter such a dof. What happens
3770  * instead is that the Trilinos sparse matrix will keep that data and send
3771  * it to the owner at the end of assembly, by calling the
3772  * <code>compress()</code> command.
3773  *
3774  * @code
3775  * template <int dim>
3776  * void BoussinesqFlowProblem<dim>::assemble_stokes_preconditioner()
3777  * {
3778  * stokes_preconditioner_matrix = 0;
3779  *
3780  * const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3781  *
3782  * using CellFilter =
3784  *
3785  * auto worker =
3786  * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
3787  * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3788  * Assembly::CopyData::StokesPreconditioner<dim> & data) {
3789  * this->local_assemble_stokes_preconditioner(cell, scratch, data);
3790  * };
3791  *
3792  * auto copier =
3793  * [this](const Assembly::CopyData::StokesPreconditioner<dim> &data) {
3794  * this->copy_local_to_global_stokes_preconditioner(data);
3795  * };
3796  *
3798  * stokes_dof_handler.begin_active()),
3799  * CellFilter(IteratorFilters::LocallyOwnedCell(),
3800  * stokes_dof_handler.end()),
3801  * worker,
3802  * copier,
3803  * Assembly::Scratch::StokesPreconditioner<dim>(
3804  * stokes_fe,
3805  * quadrature_formula,
3806  * mapping,
3808  * Assembly::CopyData::StokesPreconditioner<dim>(stokes_fe));
3809  *
3810  * stokes_preconditioner_matrix.compress(VectorOperation::add);
3811  * }
3812  *
3813  *
3814  *
3815  * @endcode
3816  *
3817  * The final function in this block initiates assembly of the Stokes
3818  * preconditioner matrix and then in fact builds the Stokes
3819  * preconditioner. It is mostly the same as in the serial case. The only
3820  * difference to @ref step_31 "step-31" is that we use a Jacobi preconditioner for the
3821  * pressure mass matrix instead of IC, as discussed in the introduction.
3822  *
3823  * @code
3824  * template <int dim>
3825  * void BoussinesqFlowProblem<dim>::build_stokes_preconditioner()
3826  * {
3827  * if (rebuild_stokes_preconditioner == false)
3828  * return;
3829  *
3830  * TimerOutput::Scope timer_section(computing_timer,
3831  * " Build Stokes preconditioner");
3832  * pcout << " Rebuilding Stokes preconditioner..." << std::flush;
3833  *
3834  * assemble_stokes_preconditioner();
3835  *
3836  * std::vector<std::vector<bool>> constant_modes;
3837  * FEValuesExtractors::Vector velocity_components(0);
3838  * DoFTools::extract_constant_modes(stokes_dof_handler,
3839  * stokes_fe.component_mask(
3840  * velocity_components),
3841  * constant_modes);
3842  *
3843  * Mp_preconditioner =
3844  * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
3845  * Amg_preconditioner = std::make_shared<TrilinosWrappers::PreconditionAMG>();
3846  *
3848  * Amg_data.constant_modes = constant_modes;
3849  * Amg_data.elliptic = true;
3850  * Amg_data.higher_order_elements = true;
3851  * Amg_data.smoother_sweeps = 2;
3852  * Amg_data.aggregation_threshold = 0.02;
3853  *
3854  * Mp_preconditioner->initialize(stokes_preconditioner_matrix.block(1, 1));
3855  * Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0, 0),
3856  * Amg_data);
3857  *
3858  * rebuild_stokes_preconditioner = false;
3859  *
3860  * pcout << std::endl;
3861  * }
3862  *
3863  *
3864  * @endcode
3865  *
3866  *
3867  * <a name="Stokessystemassembly"></a>
3868  * <h5>Stokes system assembly</h5>
3869  *
3870 
3871  *
3872  * The next three functions implement the assembly of the Stokes system,
3873  * again split up into a part performing local calculations, one for writing
3874  * the local data into the global matrix and vector, and one for actually
3875  * running the loop over all cells with the help of the WorkStream
3876  * class. Note that the assembly of the Stokes matrix needs only to be done
3877  * in case we have changed the mesh. Otherwise, just the
3878  * (temperature-dependent) right hand side needs to be calculated
3879  * here. Since we are working with distributed matrices and vectors, we have
3880  * to call the respective <code>compress()</code> functions in the end of
3881  * the assembly in order to send non-local data to the owner process.
3882  *
3883  * @code
3884  * template <int dim>
3885  * void BoussinesqFlowProblem<dim>::local_assemble_stokes_system(
3886  * const typename DoFHandler<dim>::active_cell_iterator &cell,
3887  * Assembly::Scratch::StokesSystem<dim> & scratch,
3888  * Assembly::CopyData::StokesSystem<dim> & data)
3889  * {
3890  * const unsigned int dofs_per_cell =
3891  * scratch.stokes_fe_values.get_fe().dofs_per_cell;
3892  * const unsigned int n_q_points =
3893  * scratch.stokes_fe_values.n_quadrature_points;
3894  *
3895  * const FEValuesExtractors::Vector velocities(0);
3896  * const FEValuesExtractors::Scalar pressure(dim);
3897  *
3898  * scratch.stokes_fe_values.reinit(cell);
3899  *
3900  * typename DoFHandler<dim>::active_cell_iterator temperature_cell(
3901  * &triangulation, cell->level(), cell->index(), &temperature_dof_handler);
3902  * scratch.temperature_fe_values.reinit(temperature_cell);
3903  *
3904  * if (rebuild_stokes_matrix)
3905  * data.local_matrix = 0;
3906  * data.local_rhs = 0;
3907  *
3908  * scratch.temperature_fe_values.get_function_values(
3909  * old_temperature_solution, scratch.old_temperature_values);
3910  *
3911  * for (unsigned int q = 0; q < n_q_points; ++q)
3912  * {
3913  * const double old_temperature = scratch.old_temperature_values[q];
3914  *
3915  * for (unsigned int k = 0; k < dofs_per_cell; ++k)
3916  * {
3917  * scratch.phi_u[k] = scratch.stokes_fe_values[velocities].value(k, q);
3918  * if (rebuild_stokes_matrix)
3919  * {
3920  * scratch.grads_phi_u[k] =
3921  * scratch.stokes_fe_values[velocities].symmetric_gradient(k, q);
3922  * scratch.div_phi_u[k] =
3923  * scratch.stokes_fe_values[velocities].divergence(k, q);
3924  * scratch.phi_p[k] =
3925  * scratch.stokes_fe_values[pressure].value(k, q);
3926  * }
3927  * }
3928  *
3929  * if (rebuild_stokes_matrix == true)
3930  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
3931  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
3932  * data.local_matrix(i, j) +=
3933  * (EquationData::eta * 2 *
3934  * (scratch.grads_phi_u[i] * scratch.grads_phi_u[j]) -
3935  * (EquationData::pressure_scaling * scratch.div_phi_u[i] *
3936  * scratch.phi_p[j]) -
3937  * (EquationData::pressure_scaling * scratch.phi_p[i] *
3938  * scratch.div_phi_u[j])) *
3939  * scratch.stokes_fe_values.JxW(q);
3940  *
3941  * const Tensor<1, dim> gravity = EquationData::gravity_vector(
3942  * scratch.stokes_fe_values.quadrature_point(q));
3943  *
3944  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
3945  * data.local_rhs(i) += (EquationData::density(old_temperature) *
3946  * gravity * scratch.phi_u[i]) *
3947  * scratch.stokes_fe_values.JxW(q);
3948  * }
3949  *
3950  * cell->get_dof_indices(data.local_dof_indices);
3951  * }
3952  *
3953  *
3954  *
3955  * template <int dim>
3956  * void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_system(
3957  * const Assembly::CopyData::StokesSystem<dim> &data)
3958  * {
3959  * if (rebuild_stokes_matrix == true)
3960  * stokes_constraints.distribute_local_to_global(data.local_matrix,
3961  * data.local_rhs,
3962  * data.local_dof_indices,
3963  * stokes_matrix,
3964  * stokes_rhs);
3965  * else
3966  * stokes_constraints.distribute_local_to_global(data.local_rhs,
3967  * data.local_dof_indices,
3968  * stokes_rhs);
3969  * }
3970  *
3971  *
3972  *
3973  * template <int dim>
3974  * void BoussinesqFlowProblem<dim>::assemble_stokes_system()
3975  * {
3976  * TimerOutput::Scope timer_section(computing_timer,
3977  * " Assemble Stokes system");
3978  *
3979  * if (rebuild_stokes_matrix == true)
3980  * stokes_matrix = 0;
3981  *
3982  * stokes_rhs = 0;
3983  *
3984  * const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3985  *
3986  * using CellFilter =
3988  *
3989  * WorkStream::run(
3990  * CellFilter(IteratorFilters::LocallyOwnedCell(),
3991  * stokes_dof_handler.begin_active()),
3992  * CellFilter(IteratorFilters::LocallyOwnedCell(), stokes_dof_handler.end()),
3993  * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
3994  * Assembly::Scratch::StokesSystem<dim> & scratch,
3995  * Assembly::CopyData::StokesSystem<dim> & data) {
3996  * this->local_assemble_stokes_system(cell, scratch, data);
3997  * },
3998  * [this](const Assembly::CopyData::StokesSystem<dim> &data) {
3999  * this->copy_local_to_global_stokes_system(data);
4000  * },
4001  * Assembly::Scratch::StokesSystem<dim>(
4002  * stokes_fe,
4003  * mapping,
4004  * quadrature_formula,
4006  * (rebuild_stokes_matrix == true ? update_gradients : UpdateFlags(0))),
4007  * temperature_fe,
4008  * update_values),
4009  * Assembly::CopyData::StokesSystem<dim>(stokes_fe));
4010  *
4011  * if (rebuild_stokes_matrix == true)
4012  * stokes_matrix.compress(VectorOperation::add);
4013  * stokes_rhs.compress(VectorOperation::add);
4014  *
4015  * rebuild_stokes_matrix = false;
4016  *
4017  * pcout << std::endl;
4018  * }
4019  *
4020  *
4021  * @endcode
4022  *
4023  *
4024  * <a name="Temperaturematrixassembly"></a>
4025  * <h5>Temperature matrix assembly</h5>
4026  *
4027 
4028  *
4029  * The task to be performed by the next three functions is to calculate a
4030  * mass matrix and a Laplace matrix on the temperature system. These will be
4031  * combined in order to yield the semi-implicit time stepping matrix that
4032  * consists of the mass matrix plus a time step-dependent weight factor
4033  * times the Laplace matrix. This function is again essentially the body of
4034  * the loop over all cells from @ref step_31 "step-31".
4035  *
4036 
4037  *
4038  * The two following functions perform similar services as the ones above.
4039  *
4040  * @code
4041  * template <int dim>
4042  * void BoussinesqFlowProblem<dim>::local_assemble_temperature_matrix(
4043  * const typename DoFHandler<dim>::active_cell_iterator &cell,
4044  * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4045  * Assembly::CopyData::TemperatureMatrix<dim> & data)
4046  * {
4047  * const unsigned int dofs_per_cell =
4048  * scratch.temperature_fe_values.get_fe().dofs_per_cell;
4049  * const unsigned int n_q_points =
4050  * scratch.temperature_fe_values.n_quadrature_points;
4051  *
4052  * scratch.temperature_fe_values.reinit(cell);
4053  * cell->get_dof_indices(data.local_dof_indices);
4054  *
4055  * data.local_mass_matrix = 0;
4056  * data.local_stiffness_matrix = 0;
4057  *
4058  * for (unsigned int q = 0; q < n_q_points; ++q)
4059  * {
4060  * for (unsigned int k = 0; k < dofs_per_cell; ++k)
4061  * {
4062  * scratch.grad_phi_T[k] =
4063  * scratch.temperature_fe_values.shape_grad(k, q);
4064  * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4065  * }
4066  *
4067  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
4068  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
4069  * {
4070  * data.local_mass_matrix(i, j) +=
4071  * (scratch.phi_T[i] * scratch.phi_T[j] *
4072  * scratch.temperature_fe_values.JxW(q));
4073  * data.local_stiffness_matrix(i, j) +=
4074  * (EquationData::kappa * scratch.grad_phi_T[i] *
4075  * scratch.grad_phi_T[j] * scratch.temperature_fe_values.JxW(q));
4076  * }
4077  * }
4078  * }
4079  *
4080  *
4081  *
4082  * template <int dim>
4083  * void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_matrix(
4084  * const Assembly::CopyData::TemperatureMatrix<dim> &data)
4085  * {
4086  * temperature_constraints.distribute_local_to_global(data.local_mass_matrix,
4087  * data.local_dof_indices,
4088  * temperature_mass_matrix);
4089  * temperature_constraints.distribute_local_to_global(
4090  * data.local_stiffness_matrix,
4091  * data.local_dof_indices,
4092  * temperature_stiffness_matrix);
4093  * }
4094  *
4095  *
4096  * template <int dim>
4097  * void BoussinesqFlowProblem<dim>::assemble_temperature_matrix()
4098  * {
4099  * if (rebuild_temperature_matrices == false)
4100  * return;
4101  *
4102  * TimerOutput::Scope timer_section(computing_timer,
4103  * " Assemble temperature matrices");
4104  * temperature_mass_matrix = 0;
4105  * temperature_stiffness_matrix = 0;
4106  *
4107  * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4108  *
4109  * using CellFilter =
4111  *
4112  * WorkStream::run(
4113  * CellFilter(IteratorFilters::LocallyOwnedCell(),
4114  * temperature_dof_handler.begin_active()),
4115  * CellFilter(IteratorFilters::LocallyOwnedCell(),
4116  * temperature_dof_handler.end()),
4117  * [this](const typename DoFHandler<dim>::active_cell_iterator &cell,
4118  * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4119  * Assembly::CopyData::TemperatureMatrix<dim> & data) {
4120  * this->local_assemble_temperature_matrix(cell, scratch, data);
4121  * },
4122  * [this](const Assembly::CopyData::TemperatureMatrix<dim> &data) {
4123  * this->copy_local_to_global_temperature_matrix(data);
4124  * },
4125  * Assembly::Scratch::TemperatureMatrix<dim>(temperature_fe,
4126  * mapping,
4127  * quadrature_formula),
4128  * Assembly::CopyData::TemperatureMatrix<dim>(temperature_fe));
4129  *
4130  * temperature_mass_matrix.compress(VectorOperation::add);
4131  * temperature_stiffness_matrix.compress(VectorOperation::add);
4132  *
4133  * rebuild_temperature_matrices = false;
4134  * rebuild_temperature_preconditioner = true;
4135  * }
4136  *
4137  *
4138  * @endcode
4139  *
4140  *
4141  * <a name="Temperaturerighthandsideassembly"></a>
4142  * <h5>Temperature right hand side assembly</h5>
4143  *
4144 
4145  *
4146  * This is the last assembly function. It calculates the right hand side of
4147  * the temperature system, which includes the convection and the
4148  * stabilization terms. It includes a lot of evaluations of old solutions at
4149  * the quadrature points (which are necessary for calculating the artificial
4150  * viscosity of stabilization), but is otherwise similar to the other
4151  * assembly functions. Notice, once again, how we resolve the dilemma of
4152  * having inhomogeneous boundary conditions, by just making a right hand
4153  * side at this point (compare the comments for the <code>project()</code>
4154  * function above): We create some matrix columns with exactly the values
4155  * that would be entered for the temperature stiffness matrix, in case we
4156  * have inhomogeneously constrained dofs. That will account for the correct
4157  * balance of the right hand side vector with the matrix system of
4158  * temperature.
4159  *
4160  * @code
4161  * template <int dim>
4162  * void BoussinesqFlowProblem<dim>::local_assemble_temperature_rhs(
4163  * const std::pair<double, double> global_T_range,
4164  * const double global_max_velocity,
4165  * const double global_entropy_variation,
4166  * const typename DoFHandler<dim>::active_cell_iterator &cell,
4167  * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4168  * Assembly::CopyData::TemperatureRHS<dim> & data)
4169  * {
4170  * const bool use_bdf2_scheme = (timestep_number != 0);
4171  *
4172  * const unsigned int dofs_per_cell =
4173  * scratch.temperature_fe_values.get_fe().dofs_per_cell;
4174  * const unsigned int n_q_points =
4175  * scratch.temperature_fe_values.n_quadrature_points;
4176  *
4177  * const FEValuesExtractors::Vector velocities(0);
4178  *
4179  * data.local_rhs = 0;
4180  * data.matrix_for_bc = 0;
4181  * cell->get_dof_indices(data.local_dof_indices);
4182  *
4183  * scratch.temperature_fe_values.reinit(cell);
4184  *
4185  * typename DoFHandler<dim>::active_cell_iterator stokes_cell(
4186  * &triangulation, cell->level(), cell->index(), &stokes_dof_handler);
4187  * scratch.stokes_fe_values.reinit(stokes_cell);
4188  *
4189  * scratch.temperature_fe_values.get_function_values(
4190  * old_temperature_solution, scratch.old_temperature_values);
4191  * scratch.temperature_fe_values.get_function_values(
4192  * old_old_temperature_solution, scratch.old_old_temperature_values);
4193  *
4194  * scratch.temperature_fe_values.get_function_gradients(
4195  * old_temperature_solution, scratch.old_temperature_grads);
4196  * scratch.temperature_fe_values.get_function_gradients(
4197  * old_old_temperature_solution, scratch.old_old_temperature_grads);
4198  *
4199  * scratch.temperature_fe_values.get_function_laplacians(
4200  * old_temperature_solution, scratch.old_temperature_laplacians);
4201  * scratch.temperature_fe_values.get_function_laplacians(
4202  * old_old_temperature_solution, scratch.old_old_temperature_laplacians);
4203  *
4204  * scratch.stokes_fe_values[velocities].get_function_values(
4205  * stokes_solution, scratch.old_velocity_values);
4206  * scratch.stokes_fe_values[velocities].get_function_values(
4207  * old_stokes_solution, scratch.old_old_velocity_values);
4208  * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4209  * stokes_solution, scratch.old_strain_rates);
4210  * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4211  * old_stokes_solution, scratch.old_old_strain_rates);
4212  *
4213  * const double nu =
4214  * compute_viscosity(scratch.old_temperature_values,
4215  * scratch.old_old_temperature_values,
4216  * scratch.old_temperature_grads,
4217  * scratch.old_old_temperature_grads,
4218  * scratch.old_temperature_laplacians,
4219  * scratch.old_old_temperature_laplacians,
4220  * scratch.old_velocity_values,
4221  * scratch.old_old_velocity_values,
4222  * scratch.old_strain_rates,
4223  * scratch.old_old_strain_rates,
4224  * global_max_velocity,
4225  * global_T_range.second - global_T_range.first,
4226  * 0.5 * (global_T_range.second + global_T_range.first),
4227  * global_entropy_variation,
4228  * cell->diameter());
4229  *
4230  * for (unsigned int q = 0; q < n_q_points; ++q)
4231  * {
4232  * for (unsigned int k = 0; k < dofs_per_cell; ++k)
4233  * {
4234  * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4235  * scratch.grad_phi_T[k] =
4236  * scratch.temperature_fe_values.shape_grad(k, q);
4237  * }
4238  *
4239  *
4240  * const double T_term_for_rhs =
4241  * (use_bdf2_scheme ?
4242  * (scratch.old_temperature_values[q] *
4243  * (1 + time_step / old_time_step) -
4244  * scratch.old_old_temperature_values[q] * (time_step * time_step) /
4245  * (old_time_step * (time_step + old_time_step))) :
4246  * scratch.old_temperature_values[q]);
4247  *
4248  * const double ext_T =
4249  * (use_bdf2_scheme ? (scratch.old_temperature_values[q] *
4250  * (1 + time_step / old_time_step) -
4251  * scratch.old_old_temperature_values[q] *
4252  * time_step / old_time_step) :
4253  * scratch.old_temperature_values[q]);
4254  *
4255  * const Tensor<1, dim> ext_grad_T =
4256  * (use_bdf2_scheme ? (scratch.old_temperature_grads[q] *
4257  * (1 + time_step / old_time_step) -
4258  * scratch.old_old_temperature_grads[q] * time_step /
4259  * old_time_step) :
4260  * scratch.old_temperature_grads[q]);
4261  *
4262  * const Tensor<1, dim> extrapolated_u =
4263  * (use_bdf2_scheme ?
4264  * (scratch.old_velocity_values[q] * (1 + time_step / old_time_step) -
4265  * scratch.old_old_velocity_values[q] * time_step / old_time_step) :
4266  * scratch.old_velocity_values[q]);
4267  *
4268  * const SymmetricTensor<2, dim> extrapolated_strain_rate =
4269  * (use_bdf2_scheme ?
4270  * (scratch.old_strain_rates[q] * (1 + time_step / old_time_step) -
4271  * scratch.old_old_strain_rates[q] * time_step / old_time_step) :
4272  * scratch.old_strain_rates[q]);
4273  *
4274  * const double gamma =
4275  * ((EquationData::radiogenic_heating * EquationData::density(ext_T) +
4276  * 2 * EquationData::eta * extrapolated_strain_rate *
4277  * extrapolated_strain_rate) /
4278  * (EquationData::density(ext_T) * EquationData::specific_heat));
4279  *
4280  * for (unsigned int i = 0; i < dofs_per_cell; ++i)
4281  * {
4282  * data.local_rhs(i) +=
4283  * (T_term_for_rhs * scratch.phi_T[i] -
4284  * time_step * extrapolated_u * ext_grad_T * scratch.phi_T[i] -
4285  * time_step * nu * ext_grad_T * scratch.grad_phi_T[i] +
4286  * time_step * gamma * scratch.phi_T[i]) *
4287  * scratch.temperature_fe_values.JxW(q);
4288  *
4289  * if (temperature_constraints.is_inhomogeneously_constrained(
4290  * data.local_dof_indices[i]))
4291  * {
4292  * for (unsigned int j = 0; j < dofs_per_cell; ++j)
4293  * data.matrix_for_bc(j, i) +=
4294  * (scratch.phi_T[i] * scratch.phi_T[j] *
4295  * (use_bdf2_scheme ? ((2 * time_step + old_time_step) /
4296  * (time_step + old_time_step)) :
4297  * 1.) +
4298  * scratch.grad_phi_T[i] * scratch.grad_phi_T[j] *
4299  * EquationData::kappa * time_step) *
4300  * scratch.temperature_fe_values.JxW(q);
4301  * }
4302  * }
4303  * }
4304  * }
4305  *
4306  *
4307  * template <int dim>
4308  * void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_rhs(
4309  * const Assembly::CopyData::TemperatureRHS<dim> &data)
4310  * {
4311  * temperature_constraints.distribute_local_to_global(data.local_rhs,
4312  * data.local_dof_indices,
4313  * temperature_rhs,
4314  * data.matrix_for_bc);
4315  * }
4316  *
4317  *
4318  *
4319  * @endcode
4320  *
4321  * In the function that runs the WorkStream for actually calculating the
4322  * right hand side, we also generate the final matrix. As mentioned above,
4323  * it is a sum of the mass matrix and the Laplace matrix, times some time
4324  * step-dependent weight. This weight is specified by the BDF-2 time
4325  * integration scheme, see the introduction in @ref step_31 "step-31". What is new in this
4326  * tutorial program (in addition to the use of MPI parallelization and the
4327  * WorkStream class), is that we now precompute the temperature
4328  * preconditioner as well. The reason is that the setup of the Jacobi
4329  * preconditioner takes a noticeable time compared to the solver because we
4330  * usually only need between 10 and 20 iterations for solving the
4331  * temperature system (this might sound strange, as Jacobi really only
4332  * consists of a diagonal, but in Trilinos it is derived from more general
4333  * framework for point relaxation preconditioners which is a bit
4334  * inefficient). Hence, it is more efficient to precompute the
4335  * preconditioner, even though the matrix entries may slightly change
4336  * because the time step might change. This is not too big a problem because
4337  * we remesh every few time steps (and regenerate the preconditioner then).
4338  *
4339  * @code
4340  * template <int dim>
4341  * void BoussinesqFlowProblem<dim>::assemble_temperature_system(
4342  * const double maximal_velocity)
4343  * {
4344  * const bool use_bdf2_scheme = (timestep_number != 0);
4345  *
4346  * if (use_bdf2_scheme == true)
4347  * {
4348  * temperature_matrix.copy_from(temperature_mass_matrix);
4349  * temperature_matrix *=
4350  * (2 * time_step + old_time_step) / (time_step + old_time_step);
4351  * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4352  * }
4353  * else
4354  * {
4355  * temperature_matrix.copy_from(temperature_mass_matrix);
4356  * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4357  * }
4358  *
4359  * if (rebuild_temperature_preconditioner == true)
4360  * {
4361  * T_preconditioner =
4362  * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
4363  * T_preconditioner->initialize(temperature_matrix);
4364  * rebuild_temperature_preconditioner = false;
4365  * }
4366  *
4367  * @endcode
4368  *
4369  * The next part is computing the right hand side vectors. To do so, we
4370  * first compute the average temperature @f$T_m@f$ that we use for evaluating
4371  * the artificial viscosity stabilization through the residual @f$E(T) =
4372  * (T-T_m)^2@f$. We do this by defining the midpoint between maximum and
4373  * minimum temperature as average temperature in the definition of the
4374  * entropy viscosity. An alternative would be to use the integral average,
4375  * but the results are not very sensitive to this choice. The rest then
4376  * only requires calling WorkStream::run again, binding the arguments to
4377  * the <code>local_assemble_temperature_rhs</code> function that are the
4378  * same in every call to the correct values:
4379  *
4380  * @code
4381  * temperature_rhs = 0;
4382  *
4383  * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4384  * const std::pair<double, double> global_T_range =
4385  * get_extrapolated_temperature_range();
4386  *
4387  * const double average_temperature =
4388  * 0.5 * (global_T_range.first + global_T_range.second);
4389  * const double global_entropy_variation =
4390  * get_entropy_variation(average_temperature);
4391  *
4392  * using CellFilter =
4394  *
4395  * auto worker =
4396  * [=](const typename DoFHandler<dim>::active_cell_iterator &cell,
4397  * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4398  * Assembly::CopyData::TemperatureRHS<dim> & data) {
4399  * this->local_assemble_temperature_rhs(global_T_range,
4400  * maximal_velocity,
4401  * global_entropy_variation,
4402  * cell,
4403  * scratch,
4404  * data);
4405  * };
4406  *
4407  * auto copier = [this](const Assembly::CopyData::TemperatureRHS<dim> &data) {
4408  * this->copy_local_to_global_temperature_rhs(data);
4409  * };
4410  *
4412  * temperature_dof_handler.begin_active()),
4413  * CellFilter(IteratorFilters::LocallyOwnedCell(),
4414  * temperature_dof_handler.end()),
4415  * worker,
4416  * copier,
4417  * Assembly::Scratch::TemperatureRHS<dim>(
4418  * temperature_fe, stokes_fe, mapping, quadrature_formula),
4419  * Assembly::CopyData::TemperatureRHS<dim>(temperature_fe));
4420  *
4421  * temperature_rhs.compress(VectorOperation::add);
4422  * }
4423  *
4424  *
4425  *
4426  * @endcode
4427  *
4428  *
4429  * <a name="BoussinesqFlowProblemsolve"></a>
4430  * <h4>BoussinesqFlowProblem::solve</h4>
4431  *
4432 
4433  *
4434  * This function solves the linear systems in each time step of the
4435  * Boussinesq problem. First, we work on the Stokes system and then on the
4436  * temperature system. In essence, it does the same things as the respective
4437  * function in @ref step_31 "step-31". However, there are a few changes here.
4438  *
4439 
4440  *
4441  * The first change is related to the way we store our solution: we keep the
4442  * vectors with locally owned degrees of freedom plus ghost nodes on each
4443  * MPI node. When we enter a solver which is supposed to perform
4444  * matrix-vector products with a distributed matrix, this is not the
4445  * appropriate form, though. There, we will want to have the solution vector
4446  * to be distributed in the same way as the matrix, i.e. without any
4447  * ghosts. So what we do first is to generate a distributed vector called
4448  * <code>distributed_stokes_solution</code> and put only the locally owned
4449  * dofs into that, which is neatly done by the <code>operator=</code> of the
4450  * Trilinos vector.
4451  *
4452 
4453  *
4454  * Next, we scale the pressure solution (or rather, the initial guess) for
4455  * the solver so that it matches with the length scales in the matrices, as
4456  * discussed in the introduction. We also immediately scale the pressure
4457  * solution back to the correct units after the solution is completed. We
4458  * also need to set the pressure values at hanging nodes to zero. This we
4459  * also did in @ref step_31 "step-31" in order not to disturb the Schur complement by some
4460  * vector entries that actually are irrelevant during the solve stage. As a
4461  * difference to @ref step_31 "step-31", here we do it only for the locally owned pressure
4462  * dofs. After solving for the Stokes solution, each processor copies the
4463  * distributed solution back into the solution vector that also includes
4464  * ghost elements.
4465  *
4466 
4467  *
4468  * The third and most obvious change is that we have two variants for the
4469  * Stokes solver: A fast solver that sometimes breaks down, and a robust
4470  * solver that is slower. This is what we already discussed in the
4471  * introduction. Here is how we realize it: First, we perform 30 iterations
4472  * with the fast solver based on the simple preconditioner based on the AMG
4473  * V-cycle instead of an approximate solve (this is indicated by the
4474  * <code>false</code> argument to the
4475  * <code>LinearSolvers::BlockSchurPreconditioner</code> object). If we
4476  * converge, everything is fine. If we do not converge, the solver control
4477  * object will throw an exception SolverControl::NoConvergence. Usually,
4478  * this would abort the program because we don't catch them in our usual
4479  * <code>solve()</code> functions. This is certainly not what we want to
4480  * happen here. Rather, we want to switch to the strong solver and continue
4481  * the solution process with whatever vector we got so far. Hence, we catch
4482  * the exception with the C++ try/catch mechanism. We then simply go through
4483  * the same solver sequence again in the <code>catch</code> clause, this
4484  * time passing the @p true flag to the preconditioner for the strong
4485  * solver, signaling an approximate CG solve.
4486  *
4487  * @code
4488  * template <int dim>
4489  * void BoussinesqFlowProblem<dim>::solve()
4490  * {
4491  * {
4492  * TimerOutput::Scope timer_section(computing_timer,
4493  * " Solve Stokes system");
4494  *
4495  * pcout << " Solving Stokes system... " << std::flush;
4496  *
4497  * TrilinosWrappers::MPI::BlockVector distributed_stokes_solution(
4498  * stokes_rhs);
4499  * distributed_stokes_solution = stokes_solution;
4500  *
4501  * distributed_stokes_solution.block(1) /= EquationData::pressure_scaling;
4502  *
4503  * const unsigned int
4504  * start = (distributed_stokes_solution.block(0).size() +
4505  * distributed_stokes_solution.block(1).local_range().first),
4506  * end = (distributed_stokes_solution.block(0).size() +
4507  * distributed_stokes_solution.block(1).local_range().second);
4508  * for (unsigned int i = start; i < end; ++i)
4509  * if (stokes_constraints.is_constrained(i))
4510  * distributed_stokes_solution(i) = 0;
4511  *
4512  *
4513  * PrimitiveVectorMemory<TrilinosWrappers::MPI::BlockVector> mem;
4514  *
4515  * unsigned int n_iterations = 0;
4516  * const double solver_tolerance = 1e-8 * stokes_rhs.l2_norm();
4517  * SolverControl solver_control(30, solver_tolerance);
4518  *
4519  * try
4520  * {
4521  * const LinearSolvers::BlockSchurPreconditioner<
4522  * TrilinosWrappers::PreconditionAMG,
4523  * TrilinosWrappers::PreconditionJacobi>
4524  * preconditioner(stokes_matrix,
4525  * stokes_preconditioner_matrix,
4526  * *Mp_preconditioner,
4527  * *Amg_preconditioner,
4528  * false);
4529  *
4530  * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4531  * solver_control,
4532  * mem,
4533  * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4534  * 30));
4535  * solver.solve(stokes_matrix,
4536  * distributed_stokes_solution,
4537  * stokes_rhs,
4538  * preconditioner);
4539  *
4540  * n_iterations = solver_control.last_step();
4541  * }
4542  *
4543  * catch (SolverControl::NoConvergence &)
4544  * {
4545  * const LinearSolvers::BlockSchurPreconditioner<
4546  * TrilinosWrappers::PreconditionAMG,
4547  * TrilinosWrappers::PreconditionJacobi>
4548  * preconditioner(stokes_matrix,
4549  * stokes_preconditioner_matrix,
4550  * *Mp_preconditioner,
4551  * *Amg_preconditioner,
4552  * true);
4553  *
4554  * SolverControl solver_control_refined(stokes_matrix.m(),
4555  * solver_tolerance);
4556  * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4557  * solver_control_refined,
4558  * mem,
4559  * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4560  * 50));
4561  * solver.solve(stokes_matrix,
4562  * distributed_stokes_solution,
4563  * stokes_rhs,
4564  * preconditioner);
4565  *
4566  * n_iterations =
4567  * (solver_control.last_step() + solver_control_refined.last_step());
4568  * }
4569  *
4570  *
4571  * stokes_constraints.distribute(distributed_stokes_solution);
4572  *
4573  * distributed_stokes_solution.block(1) *= EquationData::pressure_scaling;
4574  *
4575  * stokes_solution = distributed_stokes_solution;
4576  * pcout << n_iterations << " iterations." << std::endl;
4577  * }
4578  *
4579  *
4580  * @endcode
4581  *
4582  * Now let's turn to the temperature part: First, we compute the time step
4583  * size. We found that we need smaller time steps for 3D than for 2D for
4584  * the shell geometry. This is because the cells are more distorted in
4585  * that case (it is the smallest edge length that determines the CFL
4586  * number). Instead of computing the time step from maximum velocity and
4587  * minimal mesh size as in @ref step_31 "step-31", we compute local CFL numbers, i.e., on
4588  * each cell we compute the maximum velocity times the mesh size, and
4589  * compute the maximum of them. Hence, we need to choose the factor in
4590  * front of the time step slightly smaller.
4591  *
4592 
4593  *
4594  * After temperature right hand side assembly, we solve the linear system
4595  * for temperature (with fully distributed vectors without any ghosts),
4596  * apply constraints and copy the vector back to one with ghosts.
4597  *
4598 
4599  *
4600  * In the end, we extract the temperature range similarly to @ref step_31 "step-31" to
4601  * produce some output (for example in order to help us choose the
4602  * stabilization constants, as discussed in the introduction). The only
4603  * difference is that we need to exchange maxima over all processors.
4604  *
4605  * @code
4606  * {
4607  * TimerOutput::Scope timer_section(computing_timer,
4608  * " Assemble temperature rhs");
4609  *
4610  * old_time_step = time_step;
4611  *
4612  * const double scaling = (dim == 3 ? 0.25 : 1.0);
4613  * time_step = (scaling / (2.1 * dim * std::sqrt(1. * dim)) /
4614  * (parameters.temperature_degree * get_cfl_number()));
4615  *
4616  * const double maximal_velocity = get_maximal_velocity();
4617  * pcout << " Maximal velocity: "
4618  * << maximal_velocity * EquationData::year_in_seconds * 100
4619  * << " cm/year" << std::endl;
4620  * pcout << " "
4621  * << "Time step: " << time_step / EquationData::year_in_seconds
4622  * << " years" << std::endl;
4623  *
4624  * temperature_solution = old_temperature_solution;
4625  * assemble_temperature_system(maximal_velocity);
4626  * }
4627  *
4628  * {
4629  * TimerOutput::Scope timer_section(computing_timer,
4630  * " Solve temperature system");
4631  *
4632  * SolverControl solver_control(temperature_matrix.m(),
4633  * 1e-12 * temperature_rhs.l2_norm());
4634  * SolverCG<TrilinosWrappers::MPI::Vector> cg(solver_control);
4635  *
4636  * TrilinosWrappers::MPI::Vector distributed_temperature_solution(
4637  * temperature_rhs);
4638  * distributed_temperature_solution = temperature_solution;
4639  *
4640  * cg.solve(temperature_matrix,
4641  * distributed_temperature_solution,
4642  * temperature_rhs,
4643  * *T_preconditioner);
4644  *
4645  * temperature_constraints.distribute(distributed_temperature_solution);
4646  * temperature_solution = distributed_temperature_solution;
4647  *
4648  * pcout << " " << solver_control.last_step()
4649  * << " CG iterations for temperature" << std::endl;
4650  *
4651  * double temperature[2] = {std::numeric_limits<double>::max(),
4653  * double global_temperature[2];
4654  *
4655  * for (unsigned int i =
4656  * distributed_temperature_solution.local_range().first;
4657  * i < distributed_temperature_solution.local_range().second;
4658  * ++i)
4659  * {
4660  * temperature[0] =
4661  * std::min<double>(temperature[0],
4662  * distributed_temperature_solution(i));
4663  * temperature[1] =
4664  * std::max<double>(temperature[1],
4665  * distributed_temperature_solution(i));
4666  * }
4667  *
4668  * temperature[0] *= -1.0;
4669  * Utilities::MPI::max(temperature, MPI_COMM_WORLD, global_temperature);
4670  * global_temperature[0] *= -1.0;
4671  *
4672  * pcout << " Temperature range: " << global_temperature[0] << ' '
4673  * << global_temperature[1] << std::endl;
4674  * }
4675  * }
4676  *
4677  *
4678  * @endcode
4679  *
4680  *
4681  * <a name="BoussinesqFlowProblemoutput_results"></a>
4682  * <h4>BoussinesqFlowProblem::output_results</h4>
4683  *
4684 
4685  *
4686  * Next comes the function that generates the output. The quantities to
4687  * output could be introduced manually like we did in @ref step_31 "step-31". An
4688  * alternative is to hand this task over to a class PostProcessor that
4689  * inherits from the class DataPostprocessor, which can be attached to
4690  * DataOut. This allows us to output derived quantities from the solution,
4691  * like the friction heating included in this example. It overloads the
4692  * virtual function DataPostprocessor::evaluate_vector_field(),
4693  * which is then internally called from DataOut::build_patches(). We have to
4694  * give it values of the numerical solution, its derivatives, normals to the
4695  * cell, the actual evaluation points and any additional quantities. This
4696  * follows the same procedure as discussed in @ref step_29 "step-29" and other programs.
4697  *
4698  * @code
4699  * template <int dim>
4700  * class BoussinesqFlowProblem<dim>::Postprocessor
4701  * : public DataPostprocessor<dim>
4702  * {
4703  * public:
4704  * Postprocessor(const unsigned int partition, const double minimal_pressure);
4705  *
4706  * virtual void evaluate_vector_field(
4707  * const DataPostprocessorInputs::Vector<dim> &inputs,
4708  * std::vector<Vector<double>> &computed_quantities) const override;
4709  *
4710  * virtual std::vector<std::string> get_names() const override;
4711  *
4712  * virtual std::vector<
4714  * get_data_component_interpretation() const override;
4715  *
4716  * virtual UpdateFlags get_needed_update_flags() const override;
4717  *
4718  * private:
4719  * const unsigned int partition;
4720  * const double minimal_pressure;
4721  * };
4722  *
4723  *
4724  * template <int dim>
4725  * BoussinesqFlowProblem<dim>::Postprocessor::Postprocessor(
4726  * const unsigned int partition,
4727  * const double minimal_pressure)
4728  * : partition(partition)
4729  * , minimal_pressure(minimal_pressure)
4730  * {}
4731  *
4732  *
4733  * @endcode
4734  *
4735  * Here we define the names for the variables we want to output. These are
4736  * the actual solution values for velocity, pressure, and temperature, as
4737  * well as the friction heating and to each cell the number of the processor
4738  * that owns it. This allows us to visualize the partitioning of the domain
4739  * among the processors. Except for the velocity, which is vector-valued,
4740  * all other quantities are scalar.
4741  *
4742  * @code
4743  * template <int dim>
4744  * std::vector<std::string>
4745  * BoussinesqFlowProblem<dim>::Postprocessor::get_names() const
4746  * {
4747  * std::vector<std::string> solution_names(dim, "velocity");
4748  * solution_names.emplace_back("p");
4749  * solution_names.emplace_back("T");
4750  * solution_names.emplace_back("friction_heating");
4751  * solution_names.emplace_back("partition");
4752  *
4753  * return solution_names;
4754  * }
4755  *
4756  *
4757  * template <int dim>
4758  * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4759  * BoussinesqFlowProblem<dim>::Postprocessor::get_data_component_interpretation()
4760  * const
4761  * {
4762  * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4763  * interpretation(dim,
4765  *
4766  * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4767  * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4768  * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4769  * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4770  *
4771  * return interpretation;
4772  * }
4773  *
4774  *
4775  * template <int dim>
4776  * UpdateFlags
4777  * BoussinesqFlowProblem<dim>::Postprocessor::get_needed_update_flags() const
4778  * {
4780  * }
4781  *
4782  *
4783  * @endcode
4784  *
4785  * Now we implement the function that computes the derived quantities. As we
4786  * also did for the output, we rescale the velocity from its SI units to
4787  * something more readable, namely cm/year. Next, the pressure is scaled to
4788  * be between 0 and the maximum pressure. This makes it more easily
4789  * comparable -- in essence making all pressure variables positive or
4790  * zero. Temperature is taken as is, and the friction heating is computed as
4791  * @f$2 \eta \varepsilon(\mathbf{u}) \cdot \varepsilon(\mathbf{u})@f$.
4792  *
4793 
4794  *
4795  * The quantities we output here are more for illustration, rather than for
4796  * actual scientific value. We come back to this briefly in the results
4797  * section of this program and explain what one may in fact be interested in.
4798  *
4799  * @code
4800  * template <int dim>
4801  * void BoussinesqFlowProblem<dim>::Postprocessor::evaluate_vector_field(
4802  * const DataPostprocessorInputs::Vector<dim> &inputs,
4803  * std::vector<Vector<double>> & computed_quantities) const
4804  * {
4805  * const unsigned int n_quadrature_points = inputs.solution_values.size();
4806  * Assert(inputs.solution_gradients.size() == n_quadrature_points,
4807  * ExcInternalError());
4808  * Assert(computed_quantities.size() == n_quadrature_points,
4809  * ExcInternalError());
4810  * Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError());
4811  *
4812  * for (unsigned int q = 0; q < n_quadrature_points; ++q)
4813  * {
4814  * for (unsigned int d = 0; d < dim; ++d)
4815  * computed_quantities[q](d) = (inputs.solution_values[q](d) *
4816  * EquationData::year_in_seconds * 100);
4817  *
4818  * const double pressure =
4819  * (inputs.solution_values[q](dim) - minimal_pressure);
4820  * computed_quantities[q](dim) = pressure;
4821  *
4822  * const double temperature = inputs.solution_values[q](dim + 1);
4823  * computed_quantities[q](dim + 1) = temperature;
4824  *
4825  * Tensor<2, dim> grad_u;
4826  * for (unsigned int d = 0; d < dim; ++d)
4827  * grad_u[d] = inputs.solution_gradients[q][d];
4828  * const SymmetricTensor<2, dim> strain_rate = symmetrize(grad_u);
4829  * computed_quantities[q](dim + 2) =
4830  * 2 * EquationData::eta * strain_rate * strain_rate;
4831  *
4832  * computed_quantities[q](dim + 3) = partition;
4833  * }
4834  * }
4835  *
4836  *
4837  * @endcode
4838  *
4839  * The <code>output_results()</code> function has a similar task to the one
4840  * in @ref step_31 "step-31". However, here we are going to demonstrate a different
4841  * technique on how to merge output from different DoFHandler objects. The
4842  * way we're going to achieve this recombination is to create a joint
4843  * DoFHandler that collects both components, the Stokes solution and the
4844  * temperature solution. This can be nicely done by combining the finite
4845  * elements from the two systems to form one FESystem, and let this
4846  * collective system define a new DoFHandler object. To be sure that
4847  * everything was done correctly, we perform a sanity check that ensures
4848  * that we got all the dofs from both Stokes and temperature even in the
4849  * combined system. We then combine the data vectors. Unfortunately, there
4850  * is no straight-forward relation that tells us how to sort Stokes and
4851  * temperature vector into the joint vector. The way we can get around this
4852  * trouble is to rely on the information collected in the FESystem. For each
4853  * dof on a cell, the joint finite element knows to which equation component
4854  * (velocity component, pressure, or temperature) it belongs – that's the
4855  * information we need! So we step through all cells (with iterators into
4856  * all three DoFHandlers moving in sync), and for each joint cell dof, we
4857  * read out that component using the FiniteElement::system_to_base_index
4858  * function (see there for a description of what the various parts of its
4859  * return value contain). We also need to keep track whether we're on a
4860  * Stokes dof or a temperature dof, which is contained in
4861  * joint_fe.system_to_base_index(i).first.first. Eventually, the dof_indices
4862  * data structures on either of the three systems tell us how the relation
4863  * between global vector and local dofs looks like on the present cell,
4864  * which concludes this tedious work. We make sure that each processor only
4865  * works on the subdomain it owns locally (and not on ghost or artificial
4866  * cells) when building the joint solution vector. The same will then have
4867  * to be done in DataOut::build_patches(), but that function does so
4868  * automatically.
4869  *
4870 
4871  *
4872  * What we end up with is a set of patches that we can write using the
4873  * functions in DataOutBase in a variety of output formats. Here, we then
4874  * have to pay attention that what each processor writes is really only its
4875  * own part of the domain, i.e. we will want to write each processor's
4876  * contribution into a separate file. This we do by adding an additional
4877  * number to the filename when we write the solution. This is not really
4878  * new, we did it similarly in @ref step_40 "step-40". Note that we write in the compressed
4879  * format @p .vtu instead of plain vtk files, which saves quite some
4880  * storage.
4881  *
4882 
4883  *
4884  * All the rest of the work is done in the PostProcessor class.
4885  *
4886  * @code
4887  * template <int dim>
4888  * void BoussinesqFlowProblem<dim>::output_results()
4889  * {
4890  * TimerOutput::Scope timer_section(computing_timer, "Postprocessing");
4891  *
4892  * const FESystem<dim> joint_fe(stokes_fe, 1, temperature_fe, 1);
4893  *
4894  * DoFHandler<dim> joint_dof_handler(triangulation);
4895  * joint_dof_handler.distribute_dofs(joint_fe);
4896  * Assert(joint_dof_handler.n_dofs() ==
4897  * stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(),
4898  * ExcInternalError());
4899  *
4900  * TrilinosWrappers::MPI::Vector joint_solution;
4901  * joint_solution.reinit(joint_dof_handler.locally_owned_dofs(),
4902  * MPI_COMM_WORLD);
4903  *
4904  * {
4905  * std::vector<types::global_dof_index> local_joint_dof_indices(
4906  * joint_fe.dofs_per_cell);
4907  * std::vector<types::global_dof_index> local_stokes_dof_indices(
4908  * stokes_fe.dofs_per_cell);
4909  * std::vector<types::global_dof_index> local_temperature_dof_indices(
4910  * temperature_fe.dofs_per_cell);
4911  *
4913  * joint_cell = joint_dof_handler.begin_active(),
4914  * joint_endc = joint_dof_handler.end(),
4915  * stokes_cell = stokes_dof_handler.begin_active(),
4916  * temperature_cell = temperature_dof_handler.begin_active();
4917  * for (; joint_cell != joint_endc;
4918  * ++joint_cell, ++stokes_cell, ++temperature_cell)
4919  * if (joint_cell->is_locally_owned())
4920  * {
4921  * joint_cell->get_dof_indices(local_joint_dof_indices);
4922  * stokes_cell->get_dof_indices(local_stokes_dof_indices);
4923  * temperature_cell->get_dof_indices(local_temperature_dof_indices);
4924  *
4925  * for (unsigned int i = 0; i < joint_fe.dofs_per_cell; ++i)
4926  * if (joint_fe.system_to_base_index(i).first.first == 0)
4927  * {
4928  * Assert(joint_fe.system_to_base_index(i).second <
4929  * local_stokes_dof_indices.size(),
4930  * ExcInternalError());
4931  *
4932  * joint_solution(local_joint_dof_indices[i]) = stokes_solution(
4933  * local_stokes_dof_indices[joint_fe.system_to_base_index(i)
4934  * .second]);
4935  * }
4936  * else
4937  * {
4938  * Assert(joint_fe.system_to_base_index(i).first.first == 1,
4939  * ExcInternalError());
4940  * Assert(joint_fe.system_to_base_index(i).second <
4941  * local_temperature_dof_indices.size(),
4942  * ExcInternalError());
4943  * joint_solution(local_joint_dof_indices[i]) =
4944  * temperature_solution(
4945  * local_temperature_dof_indices
4946  * [joint_fe.system_to_base_index(i).second]);
4947  * }
4948  * }
4949  * }
4950  *
4951  * joint_solution.compress(VectorOperation::insert);
4952  *
4953  * IndexSet locally_relevant_joint_dofs(joint_dof_handler.n_dofs());
4954  * DoFTools::extract_locally_relevant_dofs(joint_dof_handler,
4955  * locally_relevant_joint_dofs);
4956  * TrilinosWrappers::MPI::Vector locally_relevant_joint_solution;
4957  * locally_relevant_joint_solution.reinit(locally_relevant_joint_dofs,
4958  * MPI_COMM_WORLD);
4959  * locally_relevant_joint_solution = joint_solution;
4960  *
4961  * Postprocessor postprocessor(Utilities::MPI::this_mpi_process(
4962  * MPI_COMM_WORLD),
4963  * stokes_solution.block(1).min());
4964  *
4965  * DataOut<dim> data_out;
4966  * data_out.attach_dof_handler(joint_dof_handler);
4967  * data_out.add_data_vector(locally_relevant_joint_solution, postprocessor);
4968  * data_out.build_patches();
4969  *
4970  * static int out_index = 0;
4971  * data_out.write_vtu_with_pvtu_record(
4972  * "./", "solution", out_index, MPI_COMM_WORLD, 5);
4973  *
4974  * out_index++;
4975  * }
4976  *
4977  *
4978  *
4979  * @endcode
4980  *
4981  *
4982  * <a name="BoussinesqFlowProblemrefine_mesh"></a>
4983  * <h4>BoussinesqFlowProblem::refine_mesh</h4>
4984  *
4985 
4986  *
4987  * This function isn't really new either. Since the <code>setup_dofs</code>
4988  * function that we call in the middle has its own timer section, we split
4989  * timing this function into two sections. It will also allow us to easily
4990  * identify which of the two is more expensive.
4991  *
4992 
4993  *
4994  * One thing of note, however, is that we only want to compute error
4995  * indicators on the locally owned subdomain. In order to achieve this, we
4996  * pass one additional argument to the KellyErrorEstimator::estimate
4997  * function. Note that the vector for error estimates is resized to the
4998  * number of active cells present on the current process, which is less than
4999  * the total number of active cells on all processors (but more than the
5000  * number of locally owned active cells); each processor only has a few
5001  * coarse cells around the locally owned ones, as also explained in @ref step_40 "step-40".
5002  *
5003 
5004  *
5005  * The local error estimates are then handed to a %parallel version of
5006  * GridRefinement (in namespace parallel::distributed::GridRefinement, see
5007  * also @ref step_40 "step-40") which looks at the errors and finds the cells that need
5008  * refinement by comparing the error values across processors. As in
5009  * @ref step_31 "step-31", we want to limit the maximum grid level. So in case some cells
5010  * have been marked that are already at the finest level, we simply clear
5011  * the refine flags.
5012  *
5013  * @code
5014  * template <int dim>
5015  * void
5016  * BoussinesqFlowProblem<dim>::refine_mesh(const unsigned int max_grid_level)
5017  * {
5018  * parallel::distributed::SolutionTransfer<dim, TrilinosWrappers::MPI::Vector>
5019  * temperature_trans(temperature_dof_handler);
5020  * parallel::distributed::SolutionTransfer<dim,
5021  * TrilinosWrappers::MPI::BlockVector>
5022  * stokes_trans(stokes_dof_handler);
5023  *
5024  * {
5025  * TimerOutput::Scope timer_section(computing_timer,
5026  * "Refine mesh structure, part 1");
5027  *
5028  * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
5029  *
5030  * KellyErrorEstimator<dim>::estimate(
5031  * temperature_dof_handler,
5032  * QGauss<dim - 1>(parameters.temperature_degree + 1),
5033  * std::map<types::boundary_id, const Function<dim> *>(),
5034  * temperature_solution,
5035  * estimated_error_per_cell,
5036  * ComponentMask(),
5037  * nullptr,
5038  * 0,
5039  * triangulation.locally_owned_subdomain());
5040  *
5041  * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction(
5042  * triangulation, estimated_error_per_cell, 0.3, 0.1);
5043  *
5044  * if (triangulation.n_levels() > max_grid_level)
5045  * for (typename Triangulation<dim>::active_cell_iterator cell =
5046  * triangulation.begin_active(max_grid_level);
5047  * cell != triangulation.end();
5048  * ++cell)
5049  * cell->clear_refine_flag();
5050  *
5051  * @endcode
5052  *
5053  * With all flags marked as necessary, we can then tell the
5054  * parallel::distributed::SolutionTransfer objects to get ready to
5055  * transfer data from one mesh to the next, which they will do when
5056  * notified by
5057  * Triangulation as part of the @p execute_coarsening_and_refinement() call.
5058  * The syntax is similar to the non-%parallel solution transfer (with the
5059  * exception that here a pointer to the vector entries is enough). The
5060  * remainder of the function further down below is then concerned with
5061  * setting up the data structures again after mesh refinement and
5062  * restoring the solution vectors on the new mesh.
5063  *
5064  * @code
5065  * std::vector<const TrilinosWrappers::MPI::Vector *> x_temperature(2);
5066  * x_temperature[0] = &temperature_solution;
5067  * x_temperature[1] = &old_temperature_solution;
5068  * std::vector<const TrilinosWrappers::MPI::BlockVector *> x_stokes(2);
5069  * x_stokes[0] = &stokes_solution;
5070  * x_stokes[1] = &old_stokes_solution;
5071  *
5072  * triangulation.prepare_coarsening_and_refinement();
5073  *
5074  * temperature_trans.prepare_for_coarsening_and_refinement(x_temperature);
5075  * stokes_trans.prepare_for_coarsening_and_refinement(x_stokes);
5076  *
5077  * triangulation.execute_coarsening_and_refinement();
5078  * }
5079  *
5080  * setup_dofs();
5081  *
5082  * {
5083  * TimerOutput::Scope timer_section(computing_timer,
5084  * "Refine mesh structure, part 2");
5085  *
5086  * {
5087  * TrilinosWrappers::MPI::Vector distributed_temp1(temperature_rhs);
5088  * TrilinosWrappers::MPI::Vector distributed_temp2(temperature_rhs);
5089  *
5090  * std::vector<TrilinosWrappers::MPI::Vector *> tmp(2);
5091  * tmp[0] = &(distributed_temp1);
5092  * tmp[1] = &(distributed_temp2);
5093  * temperature_trans.interpolate(tmp);
5094  *
5095  * @endcode
5096  *
5097  * enforce constraints to make the interpolated solution conforming on
5098  * the new mesh:
5099  *
5100  * @code
5101  * temperature_constraints.distribute(distributed_temp1);
5102  * temperature_constraints.distribute(distributed_temp2);
5103  *
5104  * temperature_solution = distributed_temp1;
5105  * old_temperature_solution = distributed_temp2;
5106  * }
5107  *
5108  * {
5109  * TrilinosWrappers::MPI::BlockVector distributed_stokes(stokes_rhs);
5110  * TrilinosWrappers::MPI::BlockVector old_distributed_stokes(stokes_rhs);
5111  *
5112  * std::vector<TrilinosWrappers::MPI::BlockVector *> stokes_tmp(2);
5113  * stokes_tmp[0] = &(distributed_stokes);
5114  * stokes_tmp[1] = &(old_distributed_stokes);
5115  *
5116  * stokes_trans.interpolate(stokes_tmp);
5117  *
5118  * @endcode
5119  *
5120  * enforce constraints to make the interpolated solution conforming on
5121  * the new mesh:
5122  *
5123  * @code
5124  * stokes_constraints.distribute(distributed_stokes);
5125  * stokes_constraints.distribute(old_distributed_stokes);
5126  *
5127  * stokes_solution = distributed_stokes;
5128  * old_stokes_solution = old_distributed_stokes;
5129  * }
5130  * }
5131  * }
5132  *
5133  *
5134  *
5135  * @endcode
5136  *
5137  *
5138  * <a name="BoussinesqFlowProblemrun"></a>
5139  * <h4>BoussinesqFlowProblem::run</h4>
5140  *
5141 
5142  *
5143  * This is the final and controlling function in this class. It, in fact,
5144  * runs the entire rest of the program and is, once more, very similar to
5145  * @ref step_31 "step-31". The only substantial difference is that we use a different mesh
5146  * now (a GridGenerator::hyper_shell instead of a simple cube geometry).
5147  *
5148  * @code
5149  * template <int dim>
5150  * void BoussinesqFlowProblem<dim>::run()
5151  * {
5152  * GridGenerator::hyper_shell(triangulation,
5153  * Point<dim>(),
5154  * EquationData::R0,
5155  * EquationData::R1,
5156  * (dim == 3) ? 96 : 12,
5157  * true);
5158  *
5159  * global_Omega_diameter = GridTools::diameter(triangulation);
5160  *
5161  * triangulation.refine_global(parameters.initial_global_refinement);
5162  *
5163  * setup_dofs();
5164  *
5165  * unsigned int pre_refinement_step = 0;
5166  *
5167  * start_time_iteration:
5168  *
5169  * {
5170  * TrilinosWrappers::MPI::Vector solution(
5171  * temperature_dof_handler.locally_owned_dofs());
5172  * @endcode
5173  *
5174  * VectorTools::project supports parallel vector classes with most
5175  * standard finite elements via deal.II's own native MatrixFree framework:
5176  * since we use standard Lagrange elements of moderate order this function
5177  * works well here.
5178  *
5179  * @code
5180  * VectorTools::project(temperature_dof_handler,
5181  * temperature_constraints,
5182  * QGauss<dim>(parameters.temperature_degree + 2),
5183  * EquationData::TemperatureInitialValues<dim>(),
5184  * solution);
5185  * @endcode
5186  *
5187  * Having so computed the current temperature field, let us set the member
5188  * variable that holds the temperature nodes. Strictly speaking, we really
5189  * only need to set <code>old_temperature_solution</code> since the first
5190  * thing we will do is to compute the Stokes solution that only requires
5191  * the previous time step's temperature field. That said, nothing good can
5192  * come from not initializing the other vectors as well (especially since
5193  * it's a relatively cheap operation and we only have to do it once at the
5194  * beginning of the program) if we ever want to extend our numerical
5195  * method or physical model, and so we initialize
5196  * <code>old_temperature_solution</code> and
5197  * <code>old_old_temperature_solution</code> as well. The assignment makes
5198  * sure that the vectors on the left hand side (which where initialized to
5199  * contain ghost elements as well) also get the correct ghost elements. In
5200  * other words, the assignment here requires communication between
5201  * processors:
5202  *
5203  * @code
5204  * temperature_solution = solution;
5205  * old_temperature_solution = solution;
5206  * old_old_temperature_solution = solution;
5207  * }
5208  *
5209  * timestep_number = 0;
5210  * time_step = old_time_step = 0;
5211  *
5212  * double time = 0;
5213  *
5214  * do
5215  * {
5216  * pcout << "Timestep " << timestep_number
5217  * << ": t=" << time / EquationData::year_in_seconds << " years"
5218  * << std::endl;
5219  *
5220  * assemble_stokes_system();
5221  * build_stokes_preconditioner();
5222  * assemble_temperature_matrix();
5223  *
5224  * solve();
5225  *
5226  * pcout << std::endl;
5227  *
5228  * if ((timestep_number == 0) &&
5229  * (pre_refinement_step < parameters.initial_adaptive_refinement))
5230  * {
5231  * refine_mesh(parameters.initial_global_refinement +
5232  * parameters.initial_adaptive_refinement);
5233  * ++pre_refinement_step;
5234  * goto start_time_iteration;
5235  * }
5236  * else if ((timestep_number > 0) &&
5237  * (timestep_number % parameters.adaptive_refinement_interval ==
5238  * 0))
5239  * refine_mesh(parameters.initial_global_refinement +
5240  * parameters.initial_adaptive_refinement);
5241  *
5242  * if ((parameters.generate_graphical_output == true) &&
5243  * (timestep_number % parameters.graphical_output_interval == 0))
5244  * output_results();
5245  *
5246  * @endcode
5247  *
5248  * In order to speed up linear solvers, we extrapolate the solutions
5249  * from the old time levels to the new one. This gives a very good
5250  * initial guess, cutting the number of iterations needed in solvers
5251  * by more than one half. We do not need to extrapolate in the last
5252  * iteration, so if we reached the final time, we stop here.
5253  *
5254 
5255  *
5256  * As the last thing during a time step (before actually bumping up
5257  * the number of the time step), we check whether the current time
5258  * step number is divisible by 100, and if so we let the computing
5259  * timer print a summary of CPU times spent so far.
5260  *
5261  * @code
5262  * if (time > parameters.end_time * EquationData::year_in_seconds)
5263  * break;
5264  *
5265  * TrilinosWrappers::MPI::BlockVector old_old_stokes_solution;
5266  * old_old_stokes_solution = old_stokes_solution;
5267  * old_stokes_solution = stokes_solution;
5268  * old_old_temperature_solution = old_temperature_solution;
5269  * old_temperature_solution = temperature_solution;
5270  * if (old_time_step > 0)
5271  * {
5272  * @endcode
5273  *
5274  * Trilinos sadd does not like ghost vectors even as input. Copy
5275  * into distributed vectors for now:
5276  *
5277  * @code
5278  * {
5279  * TrilinosWrappers::MPI::BlockVector distr_solution(stokes_rhs);
5280  * distr_solution = stokes_solution;
5281  * TrilinosWrappers::MPI::BlockVector distr_old_solution(stokes_rhs);
5282  * distr_old_solution = old_old_stokes_solution;
5283  * distr_solution.sadd(1. + time_step / old_time_step,
5284  * -time_step / old_time_step,
5285  * distr_old_solution);
5286  * stokes_solution = distr_solution;
5287  * }
5288  * {
5289  * TrilinosWrappers::MPI::Vector distr_solution(temperature_rhs);
5290  * distr_solution = temperature_solution;
5291  * TrilinosWrappers::MPI::Vector distr_old_solution(temperature_rhs);
5292  * distr_old_solution = old_old_temperature_solution;
5293  * distr_solution.sadd(1. + time_step / old_time_step,
5294  * -time_step / old_time_step,
5295  * distr_old_solution);
5296  * temperature_solution = distr_solution;
5297  * }
5298  * }
5299  *
5300  * if ((timestep_number > 0) && (timestep_number % 100 == 0))
5301  * computing_timer.print_summary();
5302  *
5303  * time += time_step;
5304  * ++timestep_number;
5305  * }
5306  * while (true);
5307  *
5308  * @endcode
5309  *
5310  * If we are generating graphical output, do so also for the last time
5311  * step unless we had just done so before we left the do-while loop
5312  *
5313  * @code
5314  * if ((parameters.generate_graphical_output == true) &&
5315  * !((timestep_number - 1) % parameters.graphical_output_interval == 0))
5316  * output_results();
5317  * }
5318  * } // namespace Step32
5319  *
5320  *
5321  *
5322  * @endcode
5323  *
5324  *
5325  * <a name="Thecodemaincodefunction"></a>
5326  * <h3>The <code>main</code> function</h3>
5327  *
5328 
5329  *
5330  * The main function is short as usual and very similar to the one in
5331  * @ref step_31 "step-31". Since we use a parameter file which is specified as an argument in
5332  * the command line, we have to read it in here and pass it on to the
5333  * Parameters class for parsing. If no filename is given in the command line,
5334  * we simply use the <code>step-32.prm</code> file which is distributed
5335  * together with the program.
5336  *
5337 
5338  *
5339  * Because 3d computations are simply very slow unless you throw a lot of
5340  * processors at them, the program defaults to 2d. You can get the 3d version
5341  * by changing the constant dimension below to 3.
5342  *
5343  * @code
5344  * int main(int argc, char *argv[])
5345  * {
5346  * try
5347  * {
5348  * using namespace Step32;
5349  * using namespace dealii;
5350  *
5351  * Utilities::MPI::MPI_InitFinalize mpi_initialization(
5352  * argc, argv, numbers::invalid_unsigned_int);
5353  *
5354  * std::string parameter_filename;
5355  * if (argc >= 2)
5356  * parameter_filename = argv[1];
5357  * else
5358  * parameter_filename = "step-32.prm";
5359  *
5360  * const int dim = 2;
5361  * BoussinesqFlowProblem<dim>::Parameters parameters(parameter_filename);
5362  * BoussinesqFlowProblem<dim> flow_problem(parameters);
5363  * flow_problem.run();
5364  * }
5365  * catch (std::exception &exc)
5366  * {
5367  * std::cerr << std::endl
5368  * << std::endl
5369  * << "----------------------------------------------------"
5370  * << std::endl;
5371  * std::cerr << "Exception on processing: " << std::endl
5372  * << exc.what() << std::endl
5373  * << "Aborting!" << std::endl
5374  * << "----------------------------------------------------"
5375  * << std::endl;
5376  *
5377  * return 1;
5378  * }
5379  * catch (...)
5380  * {
5381  * std::cerr << std::endl
5382  * << std::endl
5383  * << "----------------------------------------------------"
5384  * << std::endl;
5385  * std::cerr << "Unknown exception!" << std::endl
5386  * << "Aborting!" << std::endl
5387  * << "----------------------------------------------------"
5388  * << std::endl;
5389  * return 1;
5390  * }
5391  *
5392  * return 0;
5393  * }
5394  * @endcode
5395 <a name="Results"></a><h1>Results</h1>
5396 
5397 
5398 When run, the program simulates convection in 3d in much the same way
5399 as @ref step_31 "step-31" did, though with an entirely different testcase.
5400 
5401 
5402 <a name="Comparisonofresultswithstep31"></a><h3>Comparison of results with step-31</h3>
5403 
5404 
5405 Before we go to this testcase, however, let us show a few results from a
5406 slightly earlier version of this program that was solving exactly the
5407 testcase we used in @ref step_31 "step-31", just that we now solve it in parallel and with
5408 much higher resolution. We show these results mainly for comparison.
5409 
5410 Here are two images that show this higher resolution if we choose a 3d
5411 computation in <code>main()</code> and if we set
5412 <code>initial_refinement=3</code> and
5413 <code>n_pre_refinement_steps=4</code>. At the time steps shown, the
5414 meshes had around 72,000 and 236,000 cells, for a total of 2,680,000
5415 and 8,250,000 degrees of freedom, respectively, more than an order of
5416 magnitude more than we had available in @ref step_31 "step-31":
5417 
5418 <table align="center" class="doxtable">
5419  <tr>
5420  <td>
5421  <img src="https://www.dealii.org/images/steps/developer/step-32.3d.cube.0.png" alt="">
5422  </td>
5423  </tr>
5424  <tr>
5425  <td>
5426  <img src="https://www.dealii.org/images/steps/developer/step-32.3d.cube.1.png" alt="">
5427  </td>
5428  </tr>
5429 </table>
5430 
5431 The computation was done on a subset of 50 processors of the Brazos
5432 cluster at Texas A&amp;M University.
5433 
5434 
5435 <a name="Resultsfora2dcircularshelltestcase"></a><h3>Results for a 2d circular shell testcase</h3>
5436 
5437 
5438 Next, we will run @ref step_32 "step-32" with the parameter file in the directory with one
5439 change: we increase the final time to 1e9. Here we are using 16 processors. The
5440 command to launch is (note that @ref step_32 "step-32".prm is the default):
5441 
5442 <code>
5443 <pre>
5444 $ mpirun -np 16 ./step-32
5445 </pre>
5446 </code>
5447 
5448 Note that running a job on a cluster typically requires going through a job
5449 scheduler, which we won't discuss here. The output will look roughly like
5450 this:
5451 
5452 <code>
5453 <pre>
5454 $ mpirun -np 16 ./step-32
5455 Number of active cells: 12,288 (on 6 levels)
5456 Number of degrees of freedom: 186,624 (99,840+36,864+49,920)
5457 
5458 Timestep 0: t=0 years
5459 
5460  Rebuilding Stokes preconditioner...
5461  Solving Stokes system... 41 iterations.
5462  Maximal velocity: 60.4935 cm/year
5463  Time step: 18166.9 years
5464  17 CG iterations for temperature
5465  Temperature range: 973 4273.16
5466 
5467 Number of active cells: 15,921 (on 7 levels)
5468 Number of degrees of freedom: 252,723 (136,640+47,763+68,320)
5469 
5470 Timestep 0: t=0 years
5471 
5472  Rebuilding Stokes preconditioner...
5473  Solving Stokes system... 50 iterations.
5474  Maximal velocity: 60.3223 cm/year
5475  Time step: 10557.6 years
5476  19 CG iterations for temperature
5477  Temperature range: 973 4273.16
5478 
5479 Number of active cells: 19,926 (on 8 levels)
5480 Number of degrees of freedom: 321,246 (174,312+59,778+87,156)
5481 
5482 Timestep 0: t=0 years
5483 
5484  Rebuilding Stokes preconditioner...
5485  Solving Stokes system... 50 iterations.
5486  Maximal velocity: 57.8396 cm/year
5487  Time step: 5453.78 years
5488  18 CG iterations for temperature
5489  Temperature range: 973 4273.16
5490 
5491 Timestep 1: t=5453.78 years
5492 
5493  Solving Stokes system... 49 iterations.
5494  Maximal velocity: 59.0231 cm/year
5495  Time step: 5345.86 years
5496  18 CG iterations for temperature
5497  Temperature range: 973 4273.16
5498 
5499 Timestep 2: t=10799.6 years
5500 
5501  Solving Stokes system... 24 iterations.
5502  Maximal velocity: 60.2139 cm/year
5503  Time step: 5241.51 years
5504  17 CG iterations for temperature
5505  Temperature range: 973 4273.16
5506 
5507 [...]
5508 
5509 Timestep 100: t=272151 years
5510 
5511  Solving Stokes system... 21 iterations.
5512  Maximal velocity: 161.546 cm/year
5513  Time step: 1672.96 years
5514  17 CG iterations for temperature
5515  Temperature range: 973 4282.57
5516 
5517 Number of active cells: 56,085 (on 8 levels)
5518 Number of degrees of freedom: 903,408 (490,102+168,255+245,051)
5519 
5520 
5521 
5522 +---------------------------------------------+------------+------------+
5523 | Total wallclock time elapsed since start | 115s | |
5524 | | | |
5525 | Section | no. calls | wall time | % of total |
5526 +---------------------------------+-----------+------------+------------+
5527 | Assemble Stokes system | 103 | 2.82s | 2.5% |
5528 | Assemble temperature matrices | 12 | 0.452s | 0.39% |
5529 | Assemble temperature rhs | 103 | 11.5s | 10% |
5530 | Build Stokes preconditioner | 12 | 2.09s | 1.8% |
5531 | Solve Stokes system | 103 | 90.4s | 79% |
5532 | Solve temperature system | 103 | 1.53s | 1.3% |
5533 | Postprocessing | 3 | 0.532s | 0.46% |
5534 | Refine mesh structure, part 1 | 12 | 0.93s | 0.81% |
5535 | Refine mesh structure, part 2 | 12 | 0.384s | 0.33% |
5536 | Setup dof systems | 13 | 2.96s | 2.6% |
5537 +---------------------------------+-----------+------------+------------+
5538 
5539 [...]
5540 
5541 +---------------------------------------------+------------+------------+
5542 | Total wallclock time elapsed since start | 9.14e+04s | |
5543 | | | |
5544 | Section | no. calls | wall time | % of total |
5545 +---------------------------------+-----------+------------+------------+
5546 | Assemble Stokes system | 47045 | 2.05e+03s | 2.2% |
5547 | Assemble temperature matrices | 4707 | 310s | 0.34% |
5548 | Assemble temperature rhs | 47045 | 8.7e+03s | 9.5% |
5549 | Build Stokes preconditioner | 4707 | 1.48e+03s | 1.6% |
5550 | Solve Stokes system | 47045 | 7.34e+04s | 80% |
5551 | Solve temperature system | 47045 | 1.46e+03s | 1.6% |
5552 | Postprocessing | 1883 | 222s | 0.24% |
5553 | Refine mesh structure, part 1 | 4706 | 641s | 0.7% |
5554 | Refine mesh structure, part 2 | 4706 | 259s | 0.28% |
5555 | Setup dof systems | 4707 | 1.86e+03s | 2% |
5556 +---------------------------------+-----------+------------+------------+
5557 </pre>
5558 </code>
5559 
5560 The simulation terminates when the time reaches the 1 billion years
5561 selected in the input file. You can extrapolate from this how long a
5562 simulation would take for a different final time (the time step size
5563 ultimately settles on somewhere around 20,000 years, so computing for
5564 two billion years will take 100,000 time steps, give or take 20%). As
5565 can be seen here, we spend most of the compute time in assembling
5566 linear systems and &mdash; above all &mdash; in solving Stokes
5567 systems.
5568 
5569 
5570 To demonstrate the output we show the output from every 1250th time step here:
5571 <table>
5572  <tr>
5573  <td>
5574  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-000.png" alt="">
5575  </td>
5576  <td>
5577  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-050.png" alt="">
5578  </td>
5579  <td>
5580  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-100.png" alt="">
5581  </td>
5582  </tr>
5583  <tr>
5584  <td>
5585  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-150.png" alt="">
5586  </td>
5587  <td>
5588  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-200.png" alt="">
5589  </td>
5590  <td>
5591  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-250.png" alt="">
5592  </td>
5593  </tr>
5594  <tr>
5595  <td>
5596  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-300.png" alt="">
5597  </td>
5598  <td>
5599  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-350.png" alt="">
5600  </td>
5601  <td>
5602  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-400.png" alt="">
5603  </td>
5604  </tr>
5605  <tr>
5606  <td>
5607  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-450.png" alt="">
5608  </td>
5609  <td>
5610  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-500.png" alt="">
5611  </td>
5612  <td>
5613  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-550.png" alt="">
5614  </td>
5615  </tr>
5616  <tr>
5617  <td>
5618  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-600.png" alt="">
5619  </td>
5620  <td>
5621  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-cells.png" alt="">
5622  </td>
5623  <td>
5624  <img src="https://www.dealii.org/images/steps/developer/step-32-2d-partition.png" alt="">
5625  </td>
5626  </tr>
5627 </table>
5628 
5629 The last two images show the grid as well as the partitioning of the mesh for
5630 the same computation with 16 subdomains and 16 processors. The full dynamics of
5631 this simulation are really only visible by looking at an animation, for example
5632 the one <a
5633 href="https://www.dealii.org/images/steps/developer/step-32-2d-temperature.webm">shown
5634 on this site</a>. This image is well worth watching due to its artistic quality
5635 and entrancing depiction of the evolution of the magma plumes.
5636 
5637 If you watch the movie, you'll see that the convection pattern goes
5638 through several stages: First, it gets rid of the instable temperature
5639 layering with the hot material overlain by the dense cold
5640 material. After this great driver is removed and we have a sort of
5641 stable situation, a few blobs start to separate from the hot boundary
5642 layer at the inner ring and rise up, with a few cold fingers also
5643 dropping down from the outer boundary layer. During this phase, the solution
5644 remains mostly symmetric, reflecting the 12-fold symmetry of the
5645 original mesh. In a final phase, the fluid enters vigorous chaotic
5646 stirring in which all symmetries are lost. This is a pattern that then
5647 continues to dominate flow.
5648 
5649 These different phases can also be identified if we look at the
5650 maximal velocity as a function of time in the simulation:
5651 
5652 <img src="https://www.dealii.org/images/steps/developer/step-32.2d.t_vs_vmax.png" alt="">
5653 
5654 Here, the velocity (shown in centimeters per year) becomes very large,
5655 to the order of several meters per year) at the beginning when the
5656 temperature layering is instable. It then calms down to relatively
5657 small values before picking up again in the chaotic stirring
5658 regime. There, it remains in the range of 10-40 centimeters per year,
5659 quite within the physically expected region.
5660 
5661 
5662 <a name="Resultsfora3dsphericalshelltestcase"></a><h3>Results for a 3d spherical shell testcase</h3>
5663 
5664 
5665 3d computations are very expensive computationally. Furthermore, as
5666 seen above, interesting behavior only starts after quite a long time
5667 requiring more CPU hours than is available on a typical
5668 cluster. Consequently, rather than showing a complete simulation here,
5669 let us simply show a couple of pictures we have obtained using the
5670 successor to this program, called <i>ASPECT</i> (short for <i>Advanced
5671 %Solver for Problems in Earth's ConvecTion</i>), that is being
5672 developed independently of deal.II and that already incorporates some
5673 of the extensions discussed below. The following two pictures show
5674 isocontours of the temperature and the partition of the domain (along
5675 with the mesh) onto 512 processors:
5676 
5677 <p align="center">
5678 <img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.solution.png" alt="">
5679 
5680 <img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.partition.png" alt="">
5681 </p>
5682 
5683 
5684 <a name="extensions"></a>
5685 <a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
5686 
5687 
5688 There are many directions in which this program could be extended. As
5689 mentioned at the end of the introduction, most of these are under active
5690 development in the <i>ASPECT</i> (short for <i>Advanced %Solver for Problems
5691 in Earth's ConvecTion</i>) code at the time this tutorial program is being
5692 finished. Specifically, the following are certainly topics that one should
5693 address to make the program more useful:
5694 
5695 <ul>
5696  <li> <b>Adiabatic heating/cooling:</b>
5697  The temperature field we get in our simulations after a while
5698  is mostly constant with boundary layers at the inner and outer
5699  boundary, and streamers of cold and hot material mixing
5700  everything. Yet, this doesn't match our expectation that things
5701  closer to the earth core should be hotter than closer to the
5702  surface. The reason is that the energy equation we have used does
5703  not include a term that describes adiabatic cooling and heating:
5704  rock, like gas, heats up as you compress it. Consequently, material
5705  that rises up cools adiabatically, and cold material that sinks down
5706  heats adiabatically. The correct temperature equation would
5707  therefore look somewhat like this:
5708  @f{eqnarray*}
5709  \frac{D T}{Dt}
5710  -
5711  \nabla \cdot \kappa \nabla T &=& \gamma + \tau\frac{Dp}{Dt},
5712  @f}
5713  or, expanding the advected derivative @f$\frac{D}{Dt} =
5714  \frac{\partial}{\partial t} + \mathbf u \cdot \nabla@f$:
5715  @f{eqnarray*}
5716  \frac{\partial T}{\partial t}
5717  +
5718  {\mathbf u} \cdot \nabla T
5719  -
5720  \nabla \cdot \kappa \nabla T &=& \gamma +
5721  \tau\left\{\frac{\partial
5722  p}{\partial t} + \mathbf u \cdot \nabla p \right\}.
5723  @f}
5724  In other words, as pressure increases in a rock volume
5725  (@f$\frac{Dp}{Dt}>0@f$) we get an additional heat source, and vice
5726  versa.
5727 
5728  The time derivative of the pressure is a bit awkward to
5729  implement. If necessary, one could approximate using the fact
5730  outlined in the introduction that the pressure can be decomposed
5731  into a dynamic component due to temperature differences and the
5732  resulting flow, and a static component that results solely from the
5733  static pressure of the overlying rock. Since the latter is much
5734  bigger, one may approximate @f$p\approx p_{\text{static}}=-\rho_{\text{ref}}
5735  [1+\beta T_{\text{ref}}] \varphi@f$, and consequently
5736  @f$\frac{Dp}{Dt} \approx \left\{- \mathbf u \cdot \nabla \rho_{\text{ref}}
5737  [1+\beta T_{\text{ref}}]\varphi\right\} = \rho_{\text{ref}}
5738  [1+\beta T_{\text{ref}}] \mathbf u \cdot \mathbf g@f$.
5739  In other words, if the fluid is moving in the direction of gravity
5740  (downward) it will be compressed and because in that case @f$\mathbf u
5741  \cdot \mathbf g > 0@f$ we get a positive heat source. Conversely, the
5742  fluid will cool down if it moves against the direction of gravity.
5743 
5744 <li> <b>Compressibility:</b>
5745  As already hinted at in the temperature model above,
5746  mantle rocks are not incompressible. Rather, given the enormous pressures in
5747  the earth mantle (at the core-mantle boundary, the pressure is approximately
5748  140 GPa, equivalent to 1,400,000 times atmospheric pressure), rock actually
5749  does compress to something around 1.5 times the density it would have
5750  at surface pressure. Modeling this presents any number of
5751  difficulties. Primarily, the mass conservation equation is no longer
5752  @f$\textrm{div}\;\mathbf u=0@f$ but should read
5753  @f$\textrm{div}(\rho\mathbf u)=0@f$ where the density @f$\rho@f$ is now no longer
5754  spatially constant but depends on temperature and pressure. A consequence is
5755  that the model is now no longer linear; a linearized version of the Stokes
5756  equation is also no longer symmetric requiring us to rethink preconditioners
5757  and, possibly, even the discretization. We won't go into detail here as to
5758  how this can be resolved.
5759 
5760 <li> <b>Nonlinear material models:</b> As already hinted at in various places,
5761  material parameters such as the density, the viscosity, and the various
5762  thermal parameters are not constant throughout the earth mantle. Rather,
5763  they nonlinearly depend on the pressure and temperature, and in the case of
5764  the viscosity on the strain rate @f$\varepsilon(\mathbf u)@f$. For complicated
5765  models, the only way to solve such models accurately may be to actually
5766  iterate this dependence out in each time step, rather than simply freezing
5767  coefficients at values extrapolated from the previous time step(s).
5768 
5769 <li> <b>Checkpoint/restart:</b> Running this program in 2d on a number of
5770  processors allows solving realistic models in a day or two. However, in 3d,
5771  compute times are so large that one runs into two typical problems: (i) On
5772  most compute clusters, the queuing system limits run times for individual
5773  jobs are to 2 or 3 days; (ii) losing the results of a computation due to
5774  hardware failures, misconfigurations, or power outages is a shame when
5775  running on hundreds of processors for a couple of days. Both of these
5776  problems can be addressed by periodically saving the state of the program
5777  and, if necessary, restarting the program at this point. This technique is
5778  commonly called <i>checkpoint/restart</i> and it requires that the entire
5779  state of the program is written to a permanent storage location (e.g. a hard
5780  drive). Given the complexity of the data structures of this program, this is
5781  not entirely trivial (it may also involve writing gigabytes or more of
5782  data), but it can be made easier by realizing that one can save the state
5783  between two time steps where it essentially only consists of the mesh and
5784  solution vectors; during restart one would then first re-enumerate degrees
5785  of freedom in the same way as done before and then re-assemble
5786  matrices. Nevertheless, given the distributed nature of the data structures
5787  involved here, saving and restoring the state of a program is not
5788  trivial. An additional complexity is introduced by the fact that one may
5789  want to change the number of processors between runs, for example because
5790  one may wish to continue computing on a mesh that is finer than the one used
5791  to precompute a starting temperature field at an intermediate time.
5792 
5793 <li> <b>Predictive postprocessing:</b> The point of computations like this is
5794  not simply to solve the equations. Rather, it is typically the exploration
5795  of different physical models and their comparison with things that we can
5796  measure at the earth surface, in order to find which models are realistic
5797  and which are contradicted by reality. To this end, we need to compute
5798  quantities from our solution vectors that are related to what we can
5799  observe. Among these are, for example, heatfluxes at the surface of the
5800  earth, as well as seismic velocities throughout the mantle as these affect
5801  earthquake waves that are recorded by seismographs.
5802 
5803 <li> <b>Better refinement criteria:</b> As can be seen above for the
5804 3d case, the mesh in 3d is primarily refined along the inner
5805 boundary. This is because the boundary layer there is stronger than
5806 any other transition in the domain, leading us to refine there almost
5807 exclusively and basically not at all following the plumes. One
5808 certainly needs better refinement criteria to track the parts of the
5809 solution we are really interested in better than the criterion used
5810 here, namely the KellyErrorEstimator applied to the temperature, is
5811 able to.
5812 </ul>
5813 
5814 
5815 There are many other ways to extend the current program. However, rather than
5816 discussing them here, let us point to the much larger open
5817 source code ASPECT (see https://aspect.geodynamics.org/ ) that constitutes the
5818 further development of @ref step_32 "step-32" and that already includes many such possible
5819 extensions.
5820  *
5821  *
5822 <a name="PlainProg"></a>
5823 <h1> The plain program</h1>
5824 @include "step-32.cc"
5825 */
LAPACKSupport::diagonal
@ diagonal
Matrix is diagonal.
Definition: lapack_support.h:121
internal::QGaussLobatto::gamma
long double gamma(const unsigned int n)
Definition: quadrature_lib.cc:96
IndexSet
Definition: index_set.h:74
DataPostprocessor
Definition: data_postprocessor.h:502
DoFTools::nonzero
@ nonzero
Definition: dof_tools.h:244
TrilinosWrappers::MPI::Vector
Definition: trilinos_vector.h:400
ParameterHandler::get_double
double get_double(const std::string &entry_name) const
Definition: parameter_handler.cc:1056
update_quadrature_points
@ update_quadrature_points
Transformed quadrature points.
Definition: fe_update_flags.h:122
ParameterHandler::Text
@ Text
Definition: parameter_handler.h:894
SolverCG
Definition: solver_cg.h:98
TimerOutput::Scope
Definition: timer.h:554
TrilinosWrappers::PreconditionAMG::AdditionalData::elliptic
bool elliptic
Definition: trilinos_precondition.h:1486
FE_Q
Definition: fe_q.h:554
SolverControl::NoConvergence
Definition: solver_control.h:96
GridTools::volume
double volume(const Triangulation< dim, spacedim > &tria, const Mapping< dim, spacedim > &mapping=(StaticMappingQ1< dim, spacedim >::mapping))
Definition: grid_tools.cc:133
SymmetricTensor< 2, dim >
dealii
Definition: namespace_dealii.h:25
internal::SymmetricTensorAccessors::merge
constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
Definition: symmetric_tensor.h:146
VectorOperation::insert
@ insert
Definition: vector_operation.h:49
DataComponentInterpretation::component_is_scalar
@ component_is_scalar
Definition: data_component_interpretation.h:55
ParameterHandler::get_bool
bool get_bool(const std::string &entry_name) const
Definition: parameter_handler.cc:1101
WorkStream
Definition: work_stream.h:157
Triangulation
Definition: tria.h:1109
FETools::extrapolate
void extrapolate(const DoFHandler< dim, spacedim > &dof1, const InVector &z1, const DoFHandler< dim, spacedim > &dof2, OutVector &z2)
ParameterHandler::declare_entry
void declare_entry(const std::string &entry, const std::string &default_value, const Patterns::PatternBase &pattern=Patterns::Anything(), const std::string &documentation="", const bool has_to_be_set=false)
Definition: parameter_handler.cc:784
SymmetricTensor::scalar_product
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
Definition: symmetric_tensor.h:3749
DataOutBase::vtk
@ vtk
Definition: data_out_base.h:1605
FEValuesExtractors::Scalar
Definition: fe_values_extractors.h:95
LAPACKSupport::V
static const char V
Definition: lapack_support.h:175
TimerOutput::summary
@ summary
Definition: timer.h:605
Patterns::Bool
Definition: patterns.h:984
LAPACKSupport::L
static const char L
Definition: lapack_support.h:171
TrilinosWrappers::MPI::BlockVector
Definition: trilinos_parallel_block_vector.h:75
IteratorState::valid
@ valid
Iterator points to a valid object.
Definition: tria_iterator_base.h:38
DoFHandler::n_components
unsigned int n_components(const DoFHandler< dim, spacedim > &dh)
VectorOperation::add
@ add
Definition: vector_operation.h:53
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
VectorTools::project
void project(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const AffineConstraints< typename VectorType::value_type > &constraints, const Quadrature< dim > &quadrature, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const bool enforce_zero_boundary=false, const Quadrature< dim - 1 > &q_boundary=(dim > 1 ? QGauss< dim - 1 >(2) :Quadrature< dim - 1 >(0)), const bool project_to_boundary_first=false)
BlockDynamicSparsityPattern
Definition: block_sparsity_pattern.h:521
TimerOutput::wall_times
@ wall_times
Definition: timer.h:649
MatrixFree
Definition: matrix_free.h:117
LinearAlgebra::CUDAWrappers::kernel::sadd
__global__ void sadd(const Number s, Number *val, const Number a, const Number *V_val, const size_type N)
DataOutInterface::write_vtu_with_pvtu_record
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
Definition: data_out_base.cc:7001
IteratorFilters::LocallyOwnedCell
Definition: filtered_iterator.h:191
BlockVectorBase::sadd
void sadd(const value_type s, const BlockVectorBase &V)
update_values
@ update_values
Shape function values.
Definition: fe_update_flags.h:78
second
Point< 2 > second
Definition: grid_out.cc:4353
DataOut::build_patches
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition: data_out.cc:1071
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
TrilinosWrappers::PreconditionAMG::AdditionalData::smoother_sweeps
unsigned int smoother_sweeps
Definition: trilinos_precondition.h:1545
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
DoFHandler
Definition: dof_handler.h:205
SparsityTools::partition
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
Definition: sparsity_tools.cc:400
DoFTools::extract_constant_modes
void extract_constant_modes(const DoFHandlerType &dof_handler, const ComponentMask &component_mask, std::vector< std::vector< bool >> &constant_modes)
Definition: dof_tools.cc:1264
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
VectorizedArray::cos
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5324
TrilinosWrappers::PreconditionAMG::AdditionalData::higher_order_elements
bool higher_order_elements
Definition: trilinos_precondition.h:1492
QIterated
Definition: quadrature.h:369
SolverBase
Definition: solver.h:333
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
FEValues< dim >
KellyErrorEstimator
Definition: error_estimator.h:262
TrilinosWrappers::MPI::Vector::compress
void compress(::VectorOperation::values operation)
Definition: trilinos_vector.cc:583
Utilities::CUDA::free
void free(T *&pointer)
Definition: cuda.h:96
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
level
unsigned int level
Definition: grid_out.cc:4355
TimerOutput
Definition: timer.h:546
TensorAccessors::extract
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
Definition: tensor_accessors.h:226
Differentiation::SD::atan2
Expression atan2(const Expression &y, const Expression &x)
Definition: symengine_math.cc:154
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
DataPostprocessor::evaluate_vector_field
virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double >> &computed_quantities) const
Definition: data_postprocessor.cc:37
QTrapez
Definition: quadrature_lib.h:126
LAPACKSupport::T
static const char T
Definition: lapack_support.h:163
FiniteElement::system_to_base_index
std::pair< std::pair< unsigned int, unsigned int >, unsigned int > system_to_base_index(const unsigned int index) const
Definition: fe.h:3171
LAPACKSupport::symmetric
@ symmetric
Matrix is symmetric.
Definition: lapack_support.h:115
DataPostprocessorInputs::Vector::solution_values
std::vector<::Vector< double > > solution_values
Definition: data_postprocessor.h:329
Algorithms::Events::initial
const Event initial
Definition: event.cc:65
deal_II_exceptions::internals::abort
void abort(const ExceptionBase &exc) noexcept
Definition: exceptions.cc:408
FEValuesExtractors::Vector
Definition: fe_values_extractors.h:150
FilteredIterator
Definition: filtered_iterator.h:529
Tensor::norm
numbers::NumberTraits< Number >::real_type norm() const
StandardExceptions::ExcMessage
static ::ExceptionBase & ExcMessage(std::string arg1)
double
ParameterHandler::get_integer
long int get_integer(const std::string &entry_string) const
Definition: parameter_handler.cc:1013
internal::reinit
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
Definition: matrix_block.h:621
FiniteElementData::dofs_per_cell
const unsigned int dofs_per_cell
Definition: fe_base.h:282
Tensor< 1, dim >
LocalIntegrators::Divergence::norm
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim >>> &Du)
Definition: divergence.h:548
GridTools::scale
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:837
update_gradients
@ update_gradients
Shape function gradients.
Definition: fe_update_flags.h:84
SymmetricTensor::sum
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Physics::Elasticity::Kinematics::b
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
TrilinosWrappers::PreconditionAMG::AdditionalData::constant_modes
std::vector< std::vector< bool > > constant_modes
Definition: trilinos_precondition.h:1533
std_cxx17::apply
auto apply(F &&fn, Tuple &&t) -> decltype(apply_impl(std::forward< F >(fn), std::forward< Tuple >(t), std_cxx14::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >()))
Definition: tuple.h:40
IteratorFilters
Definition: filtered_iterator.h:50
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
ParameterHandler::print_parameters
std::ostream & print_parameters(std::ostream &out, const OutputStyle style) const
Definition: parameter_handler.cc:1238
LAPACKSupport::general
@ general
No special properties.
Definition: lapack_support.h:113
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
std::abs
inline ::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5450
Utilities::MPI::sum
T sum(const T &t, const MPI_Comm &mpi_communicator)
Function::value
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
numbers
Definition: numbers.h:207
TrilinosWrappers::BlockSparsityPattern
Definition: block_sparsity_pattern.h:638
types
Definition: types.h:31
TrilinosWrappers::PreconditionAMG::AdditionalData::aggregation_threshold
double aggregation_threshold
Definition: trilinos_precondition.h:1515
FiniteElement< dim >
UpdateFlags
UpdateFlags
Definition: fe_update_flags.h:66
QGauss
Definition: quadrature_lib.h:40
types::subdomain_id
unsigned int subdomain_id
Definition: types.h:43
LAPACKSupport::A
static const char A
Definition: lapack_support.h:155
DataOut_DoFData::attach_dof_handler
void attach_dof_handler(const DoFHandlerType &)
value
static const bool value
Definition: dof_tools_constraints.cc:433
vertices
Point< 3 > vertices[4]
Definition: data_out_base.cc:174
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
update_JxW_values
@ update_JxW_values
Transformed quadrature weights.
Definition: fe_update_flags.h:129
GridTools::diameter
double diameter(const Triangulation< dim, spacedim > &tria)
Definition: grid_tools.cc:76
AdaptationStrategies::Refinement::split
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
std::sqrt
inline ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5412
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
SymmetricTensor::symmetrize
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
Definition: symmetric_tensor.h:3547
Algorithms::Events::remesh
const Event remesh
Definition: event.cc:66
VectorizedArray::abs
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5450
FE_DGP
Definition: fe_dgp.h:311
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
internal::assemble
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition: loop.h:71
DoFTools::extract_locally_relevant_dofs
void extract_locally_relevant_dofs(const DoFHandlerType &dof_handler, IndexSet &dof_set)
Definition: dof_tools.cc:1173
numbers::invalid_unsigned_int
static const unsigned int invalid_unsigned_int
Definition: types.h:191
LAPACKSupport::zero
static const types::blas_int zero
Definition: lapack_support.h:179
Function::vector_value
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
DerivativeApproximation::internal::approximate
void approximate(SynchronousIterators< std::tuple< TriaActiveIterator< ::DoFCellAccessor< DoFHandlerType< dim, spacedim >, false >>, Vector< float >::iterator >> const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandlerType< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
Definition: derivative_approximation.cc:924
Point< dim >
ParameterHandler
Definition: parameter_handler.h:845
Differentiation::SD::sign
Expression sign(const Expression &x)
Definition: symengine_math.cc:280
ParameterHandler::enter_subsection
void enter_subsection(const std::string &subsection)
Definition: parameter_handler.cc:927
DataPostprocessor::get_needed_update_flags
virtual UpdateFlags get_needed_update_flags() const =0
Function
Definition: function.h:151
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
SolverControl
Definition: solver_control.h:67
DataComponentInterpretation::DataComponentInterpretation
DataComponentInterpretation
Definition: data_component_interpretation.h:49
TrilinosWrappers::PreconditionAMG::AdditionalData
Definition: trilinos_precondition.h:1370
MeshWorker::loop
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:443
ParameterHandler::leave_subsection
void leave_subsection()
Definition: parameter_handler.cc:941
DataPostprocessor::get_names
virtual std::vector< std::string > get_names() const =0
first
Point< 2 > first
Definition: grid_out.cc:4352
TrilinosWrappers::MPI::Vector::reinit
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
Definition: trilinos_vector.cc:198
DataOut
Definition: data_out.h:148
DoFHandler::begin_active
active_cell_iterator begin_active(const unsigned int level=0) const
Definition: dof_handler.cc:935
numbers::PI
static constexpr double PI
Definition: numbers.h:237
Vector< double >
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
Utilities::compress
std::string compress(const std::string &input)
Definition: utilities.cc:392
FESystem
Definition: fe.h:44
DoFHandler::get_fe
const FiniteElement< dim, spacedim > & get_fe(const unsigned int index=0) const
AssertThrow
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
DataPostprocessorInputs::Vector::solution_gradients
std::vector< std::vector< Tensor< 1, spacedim > > > solution_gradients
Definition: data_postprocessor.h:348
ParameterHandler::parse_input
virtual void parse_input(std::istream &input, const std::string &filename="input file", const std::string &last_line="", const bool skip_undefined=false)
Definition: parameter_handler.cc:399
parallel
Definition: distributed.h:416
DataPostprocessorInputs::Vector
Definition: data_postprocessor.h:318
Patterns::Double
Definition: patterns.h:293
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
DataPostprocessor::get_data_component_interpretation
virtual std::vector< DataComponentInterpretation::DataComponentInterpretation > get_data_component_interpretation() const
Definition: data_postprocessor.cc:48
internal::VectorOperations::copy
void copy(const T *begin, const T *end, U *dest)
Definition: vector_operations_internal.h:67
DataComponentInterpretation::component_is_part_of_vector
@ component_is_part_of_vector
Definition: data_component_interpretation.h:61
int
Utilities::MPI::MPI_InitFinalize
Definition: mpi.h:828
DataOut_DoFData::add_data_vector
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
Definition: data_out_dof_data.h:1090
Patterns::Integer
Definition: patterns.h:190