1508 *
constexpr double kappa = 1
e-6;
1516 *
constexpr double R0 = 6371000. - 2890000.;
1517 *
constexpr double R1 = 6371000. - 35000.;
1519 *
constexpr double T0 = 4000 + 273;
1520 *
constexpr double T1 = 700 + 273;
1539 *
template <
int dim>
1542 *
const double r = p.
norm();
1543 *
return -(1.245e-6 *
r + 7.714e13 /
r /
r) * p /
r;
1548 *
template <
int dim>
1557 *
const unsigned int component = 0)
const override;
1559 *
virtual void vector_value(
const Point<dim> &p,
1565 *
template <
int dim>
1567 *
const unsigned int)
const
1569 *
const double r = p.
norm();
1570 *
const double h =
R1 -
R0;
1572 *
const double s = (
r -
R0) / h;
1575 *
const double phi = std::atan2(p[0], p[1]);
1582 *
template <
int dim>
1587 *
for (
unsigned int c = 0; c < this->n_components; ++c)
1610 * so we convert to years using the factor defined here:
1613 * const double year_in_seconds = 60 * 60 * 24 * 365.2425;
1615 * } // namespace EquationData
1622 * <a name="step_32-PreconditioningtheStokessystem"></a>
1623 * <h3>Preconditioning the Stokes system</h3>
1627 * This namespace implements the preconditioner. As discussed in the
1628 * introduction, this preconditioner differs in a number of key portions
1629 * from the one used in @ref step_31 "step-31". Specifically, it is a right preconditioner,
1630 * implementing the matrix
1632 * \left(\begin{array}{cc}A^{-1} & -A^{-1}B^TS^{-1}
1634 * \end{array}\right)
1636 * where the two inverse matrix operations
1637 * are approximated by linear solvers or, if the right flag is given to the
1638 * constructor of this class, by a single AMG V-cycle for the velocity
1639 * block. The three code blocks of the <code>vmult</code> function implement
1640 * the multiplications with the three blocks of this preconditioner matrix
1641 * and should be self explanatory if you have read through @ref step_31 "step-31" or the
1642 * discussion of composing solvers in @ref step_20 "step-20".
1645 * namespace LinearSolvers
1647 * template <class PreconditionerTypeA, class PreconditionerTypeMp>
1648 * class BlockSchurPreconditioner : public EnableObserverPointer
1651 * BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
1652 * const TrilinosWrappers::BlockSparseMatrix &Spre,
1653 * const PreconditionerTypeMp &Mppreconditioner,
1654 * const PreconditionerTypeA &Apreconditioner,
1655 * const bool do_solve_A)
1656 * : stokes_matrix(&S)
1657 * , stokes_preconditioner_matrix(&Spre)
1658 * , mp_preconditioner(Mppreconditioner)
1659 * , a_preconditioner(Apreconditioner)
1660 * , do_solve_A(do_solve_A)
1663 * void vmult(TrilinosWrappers::MPI::BlockVector &dst,
1664 * const TrilinosWrappers::MPI::BlockVector &src) const
1666 * TrilinosWrappers::MPI::Vector utmp(src.block(0));
1669 * SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
1671 * SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
1673 * solver.solve(stokes_preconditioner_matrix->block(1, 1),
1676 * mp_preconditioner);
1678 * dst.block(1) *= -1.0;
1682 * stokes_matrix->block(0, 1).vmult(utmp, dst.block(1));
1684 * utmp.add(src.block(0));
1687 * if (do_solve_A == true)
1689 * SolverControl solver_control(5000, utmp.l2_norm() * 1e-2);
1690 * TrilinosWrappers::SolverCG solver(solver_control);
1691 * solver.solve(stokes_matrix->block(0, 0),
1694 * a_preconditioner);
1697 * a_preconditioner.vmult(dst.block(0), utmp);
1701 * const ObserverPointer<const TrilinosWrappers::BlockSparseMatrix>
1703 * const ObserverPointer<const TrilinosWrappers::BlockSparseMatrix>
1704 * stokes_preconditioner_matrix;
1705 * const PreconditionerTypeMp &mp_preconditioner;
1706 * const PreconditionerTypeA &a_preconditioner;
1707 * const bool do_solve_A;
1709 * } // namespace LinearSolvers
1716 * <a name="step_32-Definitionofassemblydatastructures"></a>
1717 * <h3>Definition of assembly data structures</h3>
1721 * As described in the introduction, we will use the WorkStream mechanism
1722 * discussed in the @ref threads topic to parallelize operations among the
1723 * processors of a single machine. The WorkStream class requires that data
1724 * is passed around in two kinds of data structures, one for scratch data
1725 * and one to pass data from the assembly function to the function that
1726 * copies local contributions into global objects.
1730 * The following namespace (and the two sub-namespaces) contains a
1731 * collection of data structures that serve this purpose, one pair for each
1732 * of the four operations discussed in the introduction that we will want to
1733 * parallelize. Each assembly routine gets two sets of data: a Scratch array
1734 * that collects all the classes and arrays that are used for the
1735 * calculation of the cell contribution, and a CopyData array that keeps
1736 * local matrices and vectors which will be written into the global
1737 * matrix. Whereas CopyData is a container for the final data that is
1738 * written into the global matrices and vector (and, thus, absolutely
1739 * necessary), the Scratch arrays are merely there for performance reasons
1740 * — it would be much more expensive to set up a FEValues object on
1741 * each cell, than creating it only once and updating some derivative data.
1745 * @ref step_31 "step-31" had four assembly routines: One for the preconditioner matrix of
1746 * the Stokes system, one for the Stokes matrix and right hand side, one for
1747 * the temperature matrices and one for the right hand side of the
1748 * temperature equation. We here organize the scratch arrays and CopyData
1749 * objects for each of those four assembly components using a
1750 * <code>struct</code> environment (since we consider these as temporary
1751 * objects we pass around, rather than classes that implement functionality
1752 * of their own, though this is a more subjective point of view to
1753 * distinguish between <code>struct</code>s and <code>class</code>es).
1757 * Regarding the Scratch objects, each struct is equipped with a constructor
1758 * that creates an @ref FEValues object using the @ref FiniteElement,
1759 * Quadrature, @ref Mapping (which describes the interpolation of curved
1760 * boundaries), and @ref UpdateFlags instances. Moreover, we manually
1761 * implement a copy constructor (since the FEValues class is not copyable by
1762 * itself), and provide some additional vector fields that are used to hold
1763 * intermediate data during the computation of local contributions.
1767 * Let us start with the scratch arrays and, specifically, the one used for
1768 * assembly of the Stokes preconditioner:
1771 * namespace Assembly
1775 * template <int dim>
1776 * struct StokesPreconditioner
1778 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe,
1779 * const Quadrature<dim> &stokes_quadrature,
1780 * const Mapping<dim> &mapping,
1781 * const UpdateFlags update_flags);
1783 * StokesPreconditioner(const StokesPreconditioner &data);
1786 * FEValues<dim> stokes_fe_values;
1788 * std::vector<Tensor<2, dim>> grad_phi_u;
1789 * std::vector<double> phi_p;
1792 * template <int dim>
1793 * StokesPreconditioner<dim>::StokesPreconditioner(
1794 * const FiniteElement<dim> &stokes_fe,
1795 * const Quadrature<dim> &stokes_quadrature,
1796 * const Mapping<dim> &mapping,
1797 * const UpdateFlags update_flags)
1798 * : stokes_fe_values(mapping, stokes_fe, stokes_quadrature, update_flags)
1799 * , grad_phi_u(stokes_fe.n_dofs_per_cell())
1800 * , phi_p(stokes_fe.n_dofs_per_cell())
1805 * template <int dim>
1806 * StokesPreconditioner<dim>::StokesPreconditioner(
1807 * const StokesPreconditioner &scratch)
1808 * : stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
1809 * scratch.stokes_fe_values.get_fe(),
1810 * scratch.stokes_fe_values.get_quadrature(),
1811 * scratch.stokes_fe_values.get_update_flags())
1812 * , grad_phi_u(scratch.grad_phi_u)
1813 * , phi_p(scratch.phi_p)
1820 * The next one is the scratch object used for the assembly of the full
1821 * Stokes system. Observe that we derive the StokesSystem scratch class
1822 * from the StokesPreconditioner class above. We do this because all the
1823 * objects that are necessary for the assembly of the preconditioner are
1824 * also needed for the actual matrix system and right hand side, plus
1825 * some extra data. This makes the program more compact. Note also that
1826 * the assembly of the Stokes system and the temperature right hand side
1827 * further down requires data from temperature and velocity,
1828 * respectively, so we actually need two FEValues objects for those two
1832 * template <int dim>
1833 * struct StokesSystem : public StokesPreconditioner<dim>
1835 * StokesSystem(const FiniteElement<dim> &stokes_fe,
1836 * const Mapping<dim> &mapping,
1837 * const Quadrature<dim> &stokes_quadrature,
1838 * const UpdateFlags stokes_update_flags,
1839 * const FiniteElement<dim> &temperature_fe,
1840 * const UpdateFlags temperature_update_flags);
1842 * StokesSystem(const StokesSystem<dim> &data);
1845 * FEValues<dim> temperature_fe_values;
1847 * std::vector<Tensor<1, dim>> phi_u;
1848 * std::vector<SymmetricTensor<2, dim>> grads_phi_u;
1849 * std::vector<double> div_phi_u;
1851 * std::vector<double> old_temperature_values;
1855 * template <int dim>
1856 * StokesSystem<dim>::StokesSystem(
1857 * const FiniteElement<dim> &stokes_fe,
1858 * const Mapping<dim> &mapping,
1859 * const Quadrature<dim> &stokes_quadrature,
1860 * const UpdateFlags stokes_update_flags,
1861 * const FiniteElement<dim> &temperature_fe,
1862 * const UpdateFlags temperature_update_flags)
1863 * : StokesPreconditioner<dim>(stokes_fe,
1864 * stokes_quadrature,
1866 * stokes_update_flags)
1867 * , temperature_fe_values(mapping,
1869 * stokes_quadrature,
1870 * temperature_update_flags)
1871 * , phi_u(stokes_fe.n_dofs_per_cell())
1872 * , grads_phi_u(stokes_fe.n_dofs_per_cell())
1873 * , div_phi_u(stokes_fe.n_dofs_per_cell())
1874 * , old_temperature_values(stokes_quadrature.size())
1878 * template <int dim>
1879 * StokesSystem<dim>::StokesSystem(const StokesSystem<dim> &scratch)
1880 * : StokesPreconditioner<dim>(scratch)
1881 * , temperature_fe_values(
1882 * scratch.temperature_fe_values.get_mapping(),
1883 * scratch.temperature_fe_values.get_fe(),
1884 * scratch.temperature_fe_values.get_quadrature(),
1885 * scratch.temperature_fe_values.get_update_flags())
1886 * , phi_u(scratch.phi_u)
1887 * , grads_phi_u(scratch.grads_phi_u)
1888 * , div_phi_u(scratch.div_phi_u)
1889 * , old_temperature_values(scratch.old_temperature_values)
1895 * After defining the objects used in the assembly of the Stokes system,
1896 * we do the same for the assembly of the matrices necessary for the
1897 * temperature system. The general structure is very similar:
1900 * template <int dim>
1901 * struct TemperatureMatrix
1903 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe,
1904 * const Mapping<dim> &mapping,
1905 * const Quadrature<dim> &temperature_quadrature);
1907 * TemperatureMatrix(const TemperatureMatrix &data);
1910 * FEValues<dim> temperature_fe_values;
1912 * std::vector<double> phi_T;
1913 * std::vector<Tensor<1, dim>> grad_phi_T;
1917 * template <int dim>
1918 * TemperatureMatrix<dim>::TemperatureMatrix(
1919 * const FiniteElement<dim> &temperature_fe,
1920 * const Mapping<dim> &mapping,
1921 * const Quadrature<dim> &temperature_quadrature)
1922 * : temperature_fe_values(mapping,
1924 * temperature_quadrature,
1925 * update_values | update_gradients |
1926 * update_JxW_values)
1927 * , phi_T(temperature_fe.n_dofs_per_cell())
1928 * , grad_phi_T(temperature_fe.n_dofs_per_cell())
1932 * template <int dim>
1933 * TemperatureMatrix<dim>::TemperatureMatrix(
1934 * const TemperatureMatrix &scratch)
1935 * : temperature_fe_values(
1936 * scratch.temperature_fe_values.get_mapping(),
1937 * scratch.temperature_fe_values.get_fe(),
1938 * scratch.temperature_fe_values.get_quadrature(),
1939 * scratch.temperature_fe_values.get_update_flags())
1940 * , phi_T(scratch.phi_T)
1941 * , grad_phi_T(scratch.grad_phi_T)
1947 * The final scratch object is used in the assembly of the right hand
1948 * side of the temperature system. This object is significantly larger
1949 * than the ones above because a lot more quantities enter the
1950 * computation of the right hand side of the temperature equation. In
1951 * particular, the temperature values and gradients of the previous two
1952 * time steps need to be evaluated at the quadrature points, as well as
1953 * the velocities and the strain rates (i.e. the symmetric gradients of
1954 * the velocity) that enter the right hand side as friction heating
1955 * terms. Despite the number of terms, the following should be rather
1959 * template <int dim>
1960 * struct TemperatureRHS
1962 * TemperatureRHS(const FiniteElement<dim> &temperature_fe,
1963 * const FiniteElement<dim> &stokes_fe,
1964 * const Mapping<dim> &mapping,
1965 * const Quadrature<dim> &quadrature);
1967 * TemperatureRHS(const TemperatureRHS &data);
1970 * FEValues<dim> temperature_fe_values;
1971 * FEValues<dim> stokes_fe_values;
1973 * std::vector<double> phi_T;
1974 * std::vector<Tensor<1, dim>> grad_phi_T;
1976 * std::vector<Tensor<1, dim>> old_velocity_values;
1977 * std::vector<Tensor<1, dim>> old_old_velocity_values;
1979 * std::vector<SymmetricTensor<2, dim>> old_strain_rates;
1980 * std::vector<SymmetricTensor<2, dim>> old_old_strain_rates;
1982 * std::vector<double> old_temperature_values;
1983 * std::vector<double> old_old_temperature_values;
1984 * std::vector<Tensor<1, dim>> old_temperature_grads;
1985 * std::vector<Tensor<1, dim>> old_old_temperature_grads;
1986 * std::vector<double> old_temperature_laplacians;
1987 * std::vector<double> old_old_temperature_laplacians;
1991 * template <int dim>
1992 * TemperatureRHS<dim>::TemperatureRHS(
1993 * const FiniteElement<dim> &temperature_fe,
1994 * const FiniteElement<dim> &stokes_fe,
1995 * const Mapping<dim> &mapping,
1996 * const Quadrature<dim> &quadrature)
1997 * : temperature_fe_values(mapping,
2000 * update_values | update_gradients |
2001 * update_hessians | update_quadrature_points |
2002 * update_JxW_values)
2003 * , stokes_fe_values(mapping,
2006 * update_values | update_gradients)
2007 * , phi_T(temperature_fe.n_dofs_per_cell())
2008 * , grad_phi_T(temperature_fe.n_dofs_per_cell())
2011 * old_velocity_values(quadrature.size())
2012 * , old_old_velocity_values(quadrature.size())
2013 * , old_strain_rates(quadrature.size())
2014 * , old_old_strain_rates(quadrature.size())
2017 * old_temperature_values(quadrature.size())
2018 * , old_old_temperature_values(quadrature.size())
2019 * , old_temperature_grads(quadrature.size())
2020 * , old_old_temperature_grads(quadrature.size())
2021 * , old_temperature_laplacians(quadrature.size())
2022 * , old_old_temperature_laplacians(quadrature.size())
2026 * template <int dim>
2027 * TemperatureRHS<dim>::TemperatureRHS(const TemperatureRHS &scratch)
2028 * : temperature_fe_values(
2029 * scratch.temperature_fe_values.get_mapping(),
2030 * scratch.temperature_fe_values.get_fe(),
2031 * scratch.temperature_fe_values.get_quadrature(),
2032 * scratch.temperature_fe_values.get_update_flags())
2033 * , stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
2034 * scratch.stokes_fe_values.get_fe(),
2035 * scratch.stokes_fe_values.get_quadrature(),
2036 * scratch.stokes_fe_values.get_update_flags())
2037 * , phi_T(scratch.phi_T)
2038 * , grad_phi_T(scratch.grad_phi_T)
2041 * old_velocity_values(scratch.old_velocity_values)
2042 * , old_old_velocity_values(scratch.old_old_velocity_values)
2043 * , old_strain_rates(scratch.old_strain_rates)
2044 * , old_old_strain_rates(scratch.old_old_strain_rates)
2047 * old_temperature_values(scratch.old_temperature_values)
2048 * , old_old_temperature_values(scratch.old_old_temperature_values)
2049 * , old_temperature_grads(scratch.old_temperature_grads)
2050 * , old_old_temperature_grads(scratch.old_old_temperature_grads)
2051 * , old_temperature_laplacians(scratch.old_temperature_laplacians)
2052 * , old_old_temperature_laplacians(scratch.old_old_temperature_laplacians)
2054 * } // namespace Scratch
2059 * The CopyData objects are even simpler than the Scratch objects as all
2060 * they have to do is to store the results of local computations until
2061 * they can be copied into the global matrix or vector objects. These
2062 * structures therefore only need to provide a constructor, a copy
2063 * operation, and some arrays for local matrix, local vectors and the
2064 * relation between local and global degrees of freedom (a.k.a.
2065 * <code>local_dof_indices</code>). Again, we have one such structure for
2066 * each of the four operations we will parallelize using the WorkStream
2070 * namespace CopyData
2072 * template <int dim>
2073 * struct StokesPreconditioner
2075 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe);
2076 * StokesPreconditioner(const StokesPreconditioner &data);
2077 * StokesPreconditioner &operator=(const StokesPreconditioner &) = default;
2079 * FullMatrix<double> local_matrix;
2080 * std::vector<types::global_dof_index> local_dof_indices;
2083 * template <int dim>
2084 * StokesPreconditioner<dim>::StokesPreconditioner(
2085 * const FiniteElement<dim> &stokes_fe)
2086 * : local_matrix(stokes_fe.n_dofs_per_cell(), stokes_fe.n_dofs_per_cell())
2087 * , local_dof_indices(stokes_fe.n_dofs_per_cell())
2090 * template <int dim>
2091 * StokesPreconditioner<dim>::StokesPreconditioner(
2092 * const StokesPreconditioner &data)
2093 * : local_matrix(data.local_matrix)
2094 * , local_dof_indices(data.local_dof_indices)
2099 * template <int dim>
2100 * struct StokesSystem : public StokesPreconditioner<dim>
2102 * StokesSystem(const FiniteElement<dim> &stokes_fe);
2104 * Vector<double> local_rhs;
2107 * template <int dim>
2108 * StokesSystem<dim>::StokesSystem(const FiniteElement<dim> &stokes_fe)
2109 * : StokesPreconditioner<dim>(stokes_fe)
2110 * , local_rhs(stokes_fe.n_dofs_per_cell())
2115 * template <int dim>
2116 * struct TemperatureMatrix
2118 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe);
2120 * FullMatrix<double> local_mass_matrix;
2121 * FullMatrix<double> local_stiffness_matrix;
2122 * std::vector<types::global_dof_index> local_dof_indices;
2125 * template <int dim>
2126 * TemperatureMatrix<dim>::TemperatureMatrix(
2127 * const FiniteElement<dim> &temperature_fe)
2128 * : local_mass_matrix(temperature_fe.n_dofs_per_cell(),
2129 * temperature_fe.n_dofs_per_cell())
2130 * , local_stiffness_matrix(temperature_fe.n_dofs_per_cell(),
2131 * temperature_fe.n_dofs_per_cell())
2132 * , local_dof_indices(temperature_fe.n_dofs_per_cell())
2137 * template <int dim>
2138 * struct TemperatureRHS
2140 * TemperatureRHS(const FiniteElement<dim> &temperature_fe);
2142 * Vector<double> local_rhs;
2143 * std::vector<types::global_dof_index> local_dof_indices;
2144 * FullMatrix<double> matrix_for_bc;
2147 * template <int dim>
2148 * TemperatureRHS<dim>::TemperatureRHS(
2149 * const FiniteElement<dim> &temperature_fe)
2150 * : local_rhs(temperature_fe.n_dofs_per_cell())
2151 * , local_dof_indices(temperature_fe.n_dofs_per_cell())
2152 * , matrix_for_bc(temperature_fe.n_dofs_per_cell(),
2153 * temperature_fe.n_dofs_per_cell())
2155 * } // namespace CopyData
2156 * } // namespace Assembly
2163 * <a name="step_32-ThecodeBoussinesqFlowProblemcodeclasstemplate"></a>
2164 * <h3>The <code>BoussinesqFlowProblem</code> class template</h3>
2168 * This is the declaration of the main class. It is very similar to @ref step_31 "step-31"
2169 * but there are a number differences we will comment on below.
2173 * The top of the class is essentially the same as in @ref step_31 "step-31", listing the
2174 * public methods and a set of private functions that do the heavy
2175 * lifting. Compared to @ref step_31 "step-31" there are only two additions to this
2176 * section: the function <code>get_cfl_number()</code> that computes the
2177 * maximum CFL number over all cells which we then compute the global time
2178 * step from, and the function <code>get_entropy_variation()</code> that is
2179 * used in the computation of the entropy stabilization. It is akin to the
2180 * <code>get_extrapolated_temperature_range()</code> we have used in @ref step_31 "step-31"
2181 * for this purpose, but works on the entropy instead of the temperature
2185 * template <int dim>
2186 * class BoussinesqFlowProblem
2189 * struct Parameters;
2190 * BoussinesqFlowProblem(Parameters ¶meters);
2194 * void setup_dofs();
2195 * void assemble_stokes_preconditioner();
2196 * void build_stokes_preconditioner();
2197 * void assemble_stokes_system();
2198 * void assemble_temperature_matrix();
2199 * void assemble_temperature_system(const double maximal_velocity);
2200 * double get_maximal_velocity() const;
2201 * double get_cfl_number() const;
2202 * double get_entropy_variation(const double average_temperature) const;
2203 * std::pair<double, double> get_extrapolated_temperature_range() const;
2205 * void output_results();
2206 * void refine_mesh(const unsigned int max_grid_level);
2208 * double compute_viscosity(
2209 * const std::vector<double> &old_temperature,
2210 * const std::vector<double> &old_old_temperature,
2211 * const std::vector<Tensor<1, dim>> &old_temperature_grads,
2212 * const std::vector<Tensor<1, dim>> &old_old_temperature_grads,
2213 * const std::vector<double> &old_temperature_laplacians,
2214 * const std::vector<double> &old_old_temperature_laplacians,
2215 * const std::vector<Tensor<1, dim>> &old_velocity_values,
2216 * const std::vector<Tensor<1, dim>> &old_old_velocity_values,
2217 * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
2218 * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
2219 * const double global_u_infty,
2220 * const double global_T_variation,
2221 * const double average_temperature,
2222 * const double global_entropy_variation,
2223 * const double cell_diameter) const;
2228 * The first significant new component is the definition of a struct for
2229 * the parameters according to the discussion in the introduction. This
2230 * structure is initialized by reading from a parameter file during
2231 * construction of this object.
2236 * Parameters(const std::string ¶meter_filename);
2238 * static void declare_parameters(ParameterHandler &prm);
2239 * void parse_parameters(ParameterHandler &prm);
2243 * unsigned int initial_global_refinement;
2244 * unsigned int initial_adaptive_refinement;
2246 * bool generate_graphical_output;
2247 * unsigned int graphical_output_interval;
2249 * unsigned int adaptive_refinement_interval;
2251 * double stabilization_alpha;
2252 * double stabilization_c_R;
2253 * double stabilization_beta;
2255 * unsigned int stokes_velocity_degree;
2256 * bool use_locally_conservative_discretization;
2258 * unsigned int temperature_degree;
2262 * Parameters ¶meters;
2266 * The <code>pcout</code> (for <i>%parallel <code>std::cout</code></i>)
2267 * object is used to simplify writing output: each MPI process can use
2268 * this to generate output as usual, but since each of these processes
2269 * will (hopefully) produce the same output it will just be replicated
2270 * many times over; with the ConditionalOStream class, only the output
2271 * generated by one MPI process will actually be printed to screen,
2272 * whereas the output by all the other threads will simply be forgotten.
2275 * ConditionalOStream pcout;
2279 * The following member variables will then again be similar to those in
2280 * @ref step_31 "step-31" (and to other tutorial programs). As mentioned in the
2281 * introduction, we fully distribute computations, so we will have to use
2282 * the parallel::distributed::Triangulation class (see @ref step_40 "step-40") but the
2283 * remainder of these variables is rather standard with two exceptions:
2287 * - The <code>mapping</code> variable is used to denote a higher-order
2288 * polynomial mapping. As mentioned in the introduction, we use this
2289 * mapping when forming integrals through quadrature for all cells.
2293 * - In a bit of naming confusion, you will notice below that some of the
2294 * variables from namespace TrilinosWrappers are taken from namespace
2295 * TrilinosWrappers::MPI (such as the right hand side vectors) whereas
2296 * others are not (such as the various matrices). This is due to legacy
2297 * reasons. We will frequently have to query velocities
2298 * and temperatures at arbitrary quadrature points; consequently, rather
2299 * than importing ghost information of a vector whenever we need access
2300 * to degrees of freedom that are relevant locally but owned by another
2301 * processor, we solve linear systems in %parallel but then immediately
2302 * initialize a vector including ghost entries of the solution for further
2303 * processing. The various <code>*_solution</code> vectors are therefore
2304 * filled immediately after solving their respective linear system in
2305 * %parallel and will always contain values for all
2306 * @ref GlossLocallyRelevantDof "locally relevant degrees of freedom";
2307 * the fully distributed vectors that we obtain from the solution process
2308 * and that only ever contain the
2309 * @ref GlossLocallyOwnedDof "locally owned degrees of freedom" are
2310 * destroyed immediately after the solution process and after we have
2311 * copied the relevant values into the member variable vectors.
2314 * parallel::distributed::Triangulation<dim> triangulation;
2315 * double global_Omega_diameter;
2317 * const MappingQ<dim> mapping;
2319 * const FESystem<dim> stokes_fe;
2320 * DoFHandler<dim> stokes_dof_handler;
2321 * AffineConstraints<double> stokes_constraints;
2323 * TrilinosWrappers::BlockSparseMatrix stokes_matrix;
2324 * TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
2326 * TrilinosWrappers::MPI::BlockVector stokes_solution;
2327 * TrilinosWrappers::MPI::BlockVector old_stokes_solution;
2328 * TrilinosWrappers::MPI::BlockVector stokes_rhs;
2331 * const FE_Q<dim> temperature_fe;
2332 * DoFHandler<dim> temperature_dof_handler;
2333 * AffineConstraints<double> temperature_constraints;
2335 * TrilinosWrappers::SparseMatrix temperature_mass_matrix;
2336 * TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
2337 * TrilinosWrappers::SparseMatrix temperature_matrix;
2339 * TrilinosWrappers::MPI::Vector temperature_solution;
2340 * TrilinosWrappers::MPI::Vector old_temperature_solution;
2341 * TrilinosWrappers::MPI::Vector old_old_temperature_solution;
2342 * TrilinosWrappers::MPI::Vector temperature_rhs;
2346 * double old_time_step;
2347 * unsigned int timestep_number;
2349 * std::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
2350 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
2351 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
2353 * bool rebuild_stokes_matrix;
2354 * bool rebuild_stokes_preconditioner;
2355 * bool rebuild_temperature_matrices;
2356 * bool rebuild_temperature_preconditioner;
2360 * The next member variable, <code>computing_timer</code> is used to
2361 * conveniently account for compute time spent in certain "sections" of
2362 * the code that are repeatedly entered. For example, we will enter (and
2363 * leave) sections for Stokes matrix assembly and would like to accumulate
2364 * the run time spent in this section over all time steps. Every so many
2365 * time steps as well as at the end of the program (through the destructor
2366 * of the TimerOutput class) we will then produce a nice summary of the
2367 * times spent in the different sections into which we categorize the
2368 * run-time of this program.
2371 * TimerOutput computing_timer;
2375 * After these member variables we have a number of auxiliary functions
2376 * that have been broken out of the ones listed above. Specifically, there
2377 * are first three functions that we call from <code>setup_dofs</code> and
2378 * then the ones that do the assembling of linear systems:
2381 * void setup_stokes_matrix(
2382 * const std::vector<IndexSet> &stokes_partitioning,
2383 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2384 * void setup_stokes_preconditioner(
2385 * const std::vector<IndexSet> &stokes_partitioning,
2386 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2387 * void setup_temperature_matrices(
2388 * const IndexSet &temperature_partitioning,
2389 * const IndexSet &temperature_relevant_partitioning);
2394 * Following the @ref MTWorkStream "task-based parallelization" paradigm,
2395 * we split all the assembly routines into two parts: a first part that
2396 * can do all the calculations on a certain cell without taking care of
2397 * other threads, and a second part (which is writing the local data into
2398 * the global matrices and vectors) which can be entered by only one
2399 * thread at a time. In order to implement that, we provide functions for
2400 * each of those two steps for all the four assembly routines that we use
2401 * in this program. The following eight functions do exactly this:
2404 * void local_assemble_stokes_preconditioner(
2405 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2406 * Assembly::Scratch::StokesPreconditioner<dim> &scratch,
2407 * Assembly::CopyData::StokesPreconditioner<dim> &data);
2409 * void copy_local_to_global_stokes_preconditioner(
2410 * const Assembly::CopyData::StokesPreconditioner<dim> &data);
2413 * void local_assemble_stokes_system(
2414 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2415 * Assembly::Scratch::StokesSystem<dim> &scratch,
2416 * Assembly::CopyData::StokesSystem<dim> &data);
2418 * void copy_local_to_global_stokes_system(
2419 * const Assembly::CopyData::StokesSystem<dim> &data);
2422 * void local_assemble_temperature_matrix(
2423 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2424 * Assembly::Scratch::TemperatureMatrix<dim> &scratch,
2425 * Assembly::CopyData::TemperatureMatrix<dim> &data);
2427 * void copy_local_to_global_temperature_matrix(
2428 * const Assembly::CopyData::TemperatureMatrix<dim> &data);
2432 * void local_assemble_temperature_rhs(
2433 * const std::pair<double, double> global_T_range,
2434 * const double global_max_velocity,
2435 * const double global_entropy_variation,
2436 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2437 * Assembly::Scratch::TemperatureRHS<dim> &scratch,
2438 * Assembly::CopyData::TemperatureRHS<dim> &data);
2440 * void copy_local_to_global_temperature_rhs(
2441 * const Assembly::CopyData::TemperatureRHS<dim> &data);
2445 * Finally, we forward declare a member class that we will define later on
2446 * and that will be used to compute a number of quantities from our
2458 * <a name=
"step_32-BoussinesqFlowProblemclassimplementation"></a>
2464 * <a name=
"step_32-BoussinesqFlowProblemParameters"></a>
2490 *
template <
int dim>
2520 *
"> not found. Creating a template file of the same name."));
2524 *
parse_parameters(prm);
2536 *
template <
int dim>
2540 *
prm.declare_entry(
"End time",
2543 *
"The end time of the simulation in years.");
2544 *
prm.declare_entry(
"Initial global refinement",
2547 *
"The number of global refinement steps performed on "
2548 *
"the initial coarse mesh, before the problem is first "
2550 *
prm.declare_entry(
"Initial adaptive refinement",
2553 *
"The number of adaptive refinement steps performed after "
2554 *
"initial global refinement.");
2555 *
prm.declare_entry(
"Time steps between mesh refinement",
2558 *
"The number of time steps after which the mesh is to be "
2559 *
"adapted based on computed error indicators.");
2560 *
prm.declare_entry(
"Generate graphical output",
2563 *
"Whether graphical output is to be generated or not. "
2564 *
"You may not want to get graphical output if the number "
2565 *
"of processors is large.");
2566 *
prm.declare_entry(
"Time steps between graphical output",
2569 *
"The number of time steps between each generation of "
2570 *
"graphical output files.");
2572 *
prm.enter_subsection(
"Stabilization parameters");
2574 *
prm.declare_entry(
"alpha",
2577 *
"The exponent in the entropy viscosity stabilization.");
2578 *
prm.declare_entry(
"c_R",
2581 *
"The c_R factor in the entropy viscosity "
2582 *
"stabilization.");
2583 *
prm.declare_entry(
"beta",
2586 *
"The beta factor in the artificial viscosity "
2587 *
"stabilization. An appropriate value for 2d is 0.052 "
2588 *
"and 0.078 for 3d.");
2590 *
prm.leave_subsection();
2592 *
prm.enter_subsection(
"Discretization");
2594 *
prm.declare_entry(
2595 *
"Stokes velocity polynomial degree",
2598 *
"The polynomial degree to use for the velocity variables "
2599 *
"in the Stokes system.");
2600 *
prm.declare_entry(
2601 *
"Temperature polynomial degree",
2604 *
"The polynomial degree to use for the temperature variable.");
2605 *
prm.declare_entry(
2606 *
"Use locally conservative discretization",
2609 *
"Whether to use a Stokes discretization that is locally "
2610 *
"conservative at the expense of a larger number of degrees "
2611 *
"of freedom, or to go with a cheaper discretization "
2612 *
"that does not locally conserve mass (although it is "
2613 *
"globally conservative.");
2615 *
prm.leave_subsection();
2628 *
template <
int dim>
2632 *
end_time = prm.get_double(
"End time");
2635 *
prm.get_integer(
"Initial adaptive refinement");
2638 *
prm.get_integer(
"Time steps between mesh refinement");
2642 *
prm.get_integer(
"Time steps between graphical output");
2644 *
prm.enter_subsection(
"Stabilization parameters");
2650 *
prm.leave_subsection();
2652 *
prm.enter_subsection(
"Discretization");
2655 *
prm.get_integer(
"Stokes velocity polynomial degree");
2658 *
prm.get_bool(
"Use locally conservative discretization");
2660 *
prm.leave_subsection();
2668 * <a name=
"step_32-BoussinesqFlowProblemBoussinesqFlowProblem"></a>
2700 *
template <
int dim>
2719 *
(parameters.use_locally_conservative_discretization ?
2721 *
FE_DGP<dim>(parameters.stokes_velocity_degree - 1)) :
2753 * <a name=
"step_32-TheBoussinesqFlowProblemhelperfunctions"></a>
2756 * <a name=
"step_32-BoussinesqFlowProblemget_maximal_velocity"></a>
2774 * <
code>cell-@>is_locally_owned()</
code>.
2786 * call,
but it's even simpler to use the respective function in namespace
2787 * Utilities::MPI using the MPI communicator object since that will do the
2788 * right thing even if we work without MPI and on a single machine only. The
2789 * call to <code>Utilities::MPI::max</code> needs two arguments, namely the
2790 * local maximum (input) and the MPI communicator, which is MPI_COMM_WORLD
2794 * template <int dim>
2795 * double BoussinesqFlowProblem<dim>::get_maximal_velocity() const
2797 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
2798 * parameters.stokes_velocity_degree);
2799 * const unsigned int n_q_points = quadrature_formula.size();
2801 * FEValues<dim> fe_values(mapping,
2803 * quadrature_formula,
2805 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2807 * const FEValuesExtractors::Vector velocities(0);
2809 * double max_local_velocity = 0;
2811 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2812 * if (cell->is_locally_owned())
2814 * fe_values.reinit(cell);
2815 * fe_values[velocities].get_function_values(stokes_solution,
2818 * for (unsigned int q = 0; q < n_q_points; ++q)
2819 * max_local_velocity =
2820 * std::max(max_local_velocity, velocity_values[q].norm());
2823 * return Utilities::MPI::max(max_local_velocity, MPI_COMM_WORLD);
2830 * <a name="step_32-BoussinesqFlowProblemget_cfl_number"></a>
2831 * <h5>BoussinesqFlowProblem::get_cfl_number</h5>
2835 * The next function does something similar, but we now compute the CFL
2836 * number, i.e., maximal velocity on a cell divided by the cell
2837 * diameter. This number is necessary to determine the time step size, as we
2838 * use a semi-explicit time stepping scheme for the temperature equation
2839 * (see @ref step_31 "step-31" for a discussion). We compute it in the same way as above:
2840 * Compute the local maximum over all locally owned cells, then exchange it
2841 * via MPI to find the global maximum.
2844 * template <int dim>
2845 * double BoussinesqFlowProblem<dim>::get_cfl_number() const
2847 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
2848 * parameters.stokes_velocity_degree);
2849 * const unsigned int n_q_points = quadrature_formula.size();
2851 * FEValues<dim> fe_values(mapping,
2853 * quadrature_formula,
2855 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2857 * const FEValuesExtractors::Vector velocities(0);
2859 * double max_local_cfl = 0;
2861 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2862 * if (cell->is_locally_owned())
2864 * fe_values.reinit(cell);
2865 * fe_values[velocities].get_function_values(stokes_solution,
2868 * double max_local_velocity = 1e-10;
2869 * for (unsigned int q = 0; q < n_q_points; ++q)
2870 * max_local_velocity =
2871 * std::max(max_local_velocity, velocity_values[q].norm());
2873 * std::max(max_local_cfl, max_local_velocity / cell->diameter());
2876 * return Utilities::MPI::max(max_local_cfl, MPI_COMM_WORLD);
2883 * <a name="step_32-BoussinesqFlowProblemget_entropy_variation"></a>
2884 * <h5>BoussinesqFlowProblem::get_entropy_variation</h5>
2888 * Next comes the computation of the global entropy variation
2889 * @f$\|E(T)-\bar{E}(T)\|_\infty@f$ where the entropy @f$E@f$ is defined as
2890 * discussed in the introduction. This is needed for the evaluation of the
2891 * stabilization in the temperature equation as explained in the
2892 * introduction. The entropy variation is actually only needed if we use
2893 * @f$\alpha=2@f$ as a power in the residual computation. The infinity norm is
2894 * computed by the maxima over quadrature points, as usual in discrete
2899 * In order to compute this quantity, we first have to find the
2900 * space-average @f$\bar{E}(T)@f$ and then evaluate the maximum. However, that
2901 * means that we would need to perform two loops. We can avoid the overhead
2902 * by noting that @f$\|E(T)-\bar{E}(T)\|_\infty =
2903 * \max\big(E_{\textrm{max}}(T)-\bar{E}(T),
2904 * \bar{E}(T)-E_{\textrm{min}}(T)\big)@f$, i.e., the maximum out of the
2905 * deviation from the average entropy in positive and negative
2906 * directions. The four quantities we need for the latter formula (maximum
2907 * entropy, minimum entropy, average entropy, area) can all be evaluated in
2908 * the same loop over all cells, so we choose this simpler variant.
2911 * template <int dim>
2912 * double BoussinesqFlowProblem<dim>::get_entropy_variation(
2913 * const double average_temperature) const
2915 * if (parameters.stabilization_alpha != 2)
2918 * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 1);
2919 * const unsigned int n_q_points = quadrature_formula.size();
2921 * FEValues<dim> fe_values(temperature_fe,
2922 * quadrature_formula,
2923 * update_values | update_JxW_values);
2924 * std::vector<double> old_temperature_values(n_q_points);
2925 * std::vector<double> old_old_temperature_values(n_q_points);
2929 * In the two functions above we computed the maximum of numbers that were
2930 * all non-negative, so we knew that zero was certainly a lower bound. On
2931 * the other hand, here we need to find the maximum deviation from the
2932 * average value, i.e., we will need to know the maximal and minimal
2953 *
if (cell->is_locally_owned())
2955 *
fe_values.
reinit(cell);
2960 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
2969 *
area += fe_values.JxW(
q);
3013 * <a name=
"step_32-BoussinesqFlowProblemget_extrapolated_temperature_range"></a>
3032 *
template <
int dim>
3033 *
std::pair<double, double>
3037 *
parameters.temperature_degree);
3053 *
if (cell->is_locally_owned())
3055 *
fe_values.
reinit(cell);
3061 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
3078 *
if (cell->is_locally_owned())
3080 *
fe_values.
reinit(cell);
3084 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
3107 * <a name=
"step_32-BoussinesqFlowProblemcompute_viscosity"></a>
3117 *
template <
int dim>
3133 *
const double cell_diameter)
const
3136 *
return 5
e-3 * cell_diameter;
3143 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
3152 *
const double dT_dt =
3161 *
const double gamma =
3167 *
if (parameters.stabilization_alpha == 2)
3175 *
(parameters.stabilization_beta *
max_velocity * cell_diameter);
3183 *
if (parameters.stabilization_alpha == 2)
3185 *
(parameters.stabilization_c_R * cell_diameter * cell_diameter *
3189 *
(parameters.stabilization_c_R * cell_diameter *
3202 * <a name=
"step_32-TheBoussinesqFlowProblemsetupfunctions"></a>
3259 * structures for accumulating off-processor data on the fly are not thread
3260 * safe. With the initialization presented here, there is no such problem
3261 * and one could safely introduce graph coloring for this algorithm.
3265 * The only other change we need to make is to tell the
3266 * DoFTools::make_sparsity_pattern() function that it is only supposed to
3267 * work on a subset of cells, namely the ones whose
3268 * <code>subdomain_id</code> equals the number of the current processor, and
3269 * to ignore all other cells.
3273 * This strategy is replicated across all three of the following functions.
3277 * Note that Trilinos matrices store the information contained in the
3278 * sparsity patterns, so we can safely release the <code>sp</code> variable
3279 * once the matrix has been given the sparsity structure.
3282 * template <int dim>
3283 * void BoussinesqFlowProblem<dim>::setup_stokes_matrix(
3284 * const std::vector<IndexSet> &stokes_partitioning,
3285 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3287 * stokes_matrix.clear();
3289 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3290 * stokes_partitioning,
3291 * stokes_relevant_partitioning,
3294 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3295 * for (unsigned int c = 0; c < dim + 1; ++c)
3296 * for (unsigned int d = 0; d < dim + 1; ++d)
3297 * if (!((c == dim) && (d == dim)))
3298 * coupling[c][d] = DoFTools::always;
3300 * coupling[c][d] = DoFTools::none;
3302 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3305 * stokes_constraints,
3307 * Utilities::MPI::this_mpi_process(
3311 * stokes_matrix.reinit(sp);
3316 * template <int dim>
3317 * void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner(
3318 * const std::vector<IndexSet> &stokes_partitioning,
3319 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3321 * Amg_preconditioner.reset();
3322 * Mp_preconditioner.reset();
3324 * stokes_preconditioner_matrix.clear();
3326 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3327 * stokes_partitioning,
3328 * stokes_relevant_partitioning,
3331 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3332 * for (unsigned int c = 0; c < dim + 1; ++c)
3333 * for (unsigned int d = 0; d < dim + 1; ++d)
3335 * coupling[c][d] = DoFTools::always;
3337 * coupling[c][d] = DoFTools::none;
3339 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3342 * stokes_constraints,
3344 * Utilities::MPI::this_mpi_process(
3348 * stokes_preconditioner_matrix.reinit(sp);
3352 * template <int dim>
3353 * void BoussinesqFlowProblem<dim>::setup_temperature_matrices(
3354 * const IndexSet &temperature_partitioner,
3355 * const IndexSet &temperature_relevant_partitioner)
3357 * T_preconditioner.reset();
3358 * temperature_mass_matrix.clear();
3359 * temperature_stiffness_matrix.clear();
3360 * temperature_matrix.clear();
3362 * TrilinosWrappers::SparsityPattern sp(temperature_partitioner,
3363 * temperature_partitioner,
3364 * temperature_relevant_partitioner,
3366 * DoFTools::make_sparsity_pattern(temperature_dof_handler,
3368 * temperature_constraints,
3370 * Utilities::MPI::this_mpi_process(
3374 * temperature_matrix.reinit(sp);
3375 * temperature_mass_matrix.reinit(sp);
3376 * temperature_stiffness_matrix.reinit(sp);
3383 * The remainder of the setup function (after splitting out the three
3384 * functions above) mostly has to deal with the things we need to do for
3385 * parallelization across processors. Because setting all of this up is a
3386 * significant compute time expense of the program, we put everything we do
3387 * here into a timer group so that we can get summary information about the
3388 * fraction of time spent in this part of the program at its end.
3392 * At the top as usual we enumerate degrees of freedom and sort them by
3393 * component/block, followed by writing their numbers to the screen from
3394 * processor zero. The DoFHandler::distributed_dofs() function, when applied
3395 * to a parallel::distributed::Triangulation object, sorts degrees of
3396 * freedom in such a way that all degrees of freedom associated with
3397 * subdomain zero come before all those associated with subdomain one,
3398 * etc. For the Stokes part, this entails, however, that velocities and
3399 * pressures become intermixed, but this is trivially solved by sorting
3400 * again by blocks; it is worth noting that this latter operation leaves the
3401 * relative ordering of all velocities and pressures alone, i.e. within the
3402 * velocity block we will still have all those associated with subdomain
3403 * zero before all velocities associated with subdomain one, etc. This is
3404 * important since we store each of the blocks of this matrix distributed
3405 * across all processors and want this to be done in such a way that each
3406 * processor stores that part of the matrix that is roughly equal to the
3407 * degrees of freedom located on those cells that it will actually work on.
3411 * When printing the numbers of degrees of freedom, note that these numbers
3412 * are going to be large if we use many processors. Consequently, we let the
3413 * stream put a comma separator in between every three digits. The state of
3414 * the stream, using the locale, is saved from before to after this
3415 * operation. While slightly opaque, the code works because the default
3416 * locale (which we get using the constructor call
3417 * <code>std::locale("")</code>) implies printing numbers with a comma
3418 * separator for every third digit (i.e., thousands, millions, billions).
3422 * In this function as well as many below, we measure how much time
3423 * we spend here and collect that in a section called "Setup dof
3424 * systems" across function invocations. This is done using an
3425 * TimerOutput::Scope object that gets a timer going in the section
3426 * with above name of the `computing_timer` object upon construction
3427 * of the local variable; the timer is stopped again when the
3428 * destructor of the `timing_section` variable is called. This, of
3429 * course, happens either at the end of the function, or if we leave
3430 * the function through a `return` statement or when an exception is
3431 * thrown somewhere -- in other words, whenever we leave this
3432 * function in any way. The use of such "scope" objects therefore
3433 * makes sure that we do not have to manually add code that tells
3434 * the timer to stop at every location where this function may be
3438 * template <int dim>
3439 * void BoussinesqFlowProblem<dim>::setup_dofs()
3441 * TimerOutput::Scope timing_section(computing_timer, "Setup dof systems");
3443 * stokes_dof_handler.distribute_dofs(stokes_fe);
3445 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
3446 * stokes_sub_blocks[dim] = 1;
3447 * DoFRenumbering::component_wise(stokes_dof_handler, stokes_sub_blocks);
3449 * temperature_dof_handler.distribute_dofs(temperature_fe);
3451 * const std::vector<types::global_dof_index> stokes_dofs_per_block =
3452 * DoFTools::count_dofs_per_fe_block(stokes_dof_handler, stokes_sub_blocks);
3454 * const types::global_dof_index n_u = stokes_dofs_per_block[0],
3455 * n_p = stokes_dofs_per_block[1],
3456 * n_T = temperature_dof_handler.n_dofs();
3458 * std::locale s = pcout.get_stream().getloc();
3459 * pcout.get_stream().imbue(std::locale(""));
3460 * pcout << "Number of active cells: " << triangulation.n_global_active_cells()
3461 * << " (on " << triangulation.n_levels() << " levels)" << std::endl
3462 * << "Number of degrees of freedom: " << n_u + n_p + n_T << " (" << n_u
3463 * << '+
' << n_p << '+
' << n_T << ')
' << std::endl
3465 * pcout.get_stream().imbue(s);
3470 * After this, we have to set up the various partitioners (of type
3471 * <code>IndexSet</code>, see the introduction) that describe which parts
3472 * of each matrix or vector will be stored where, then call the functions
3473 * that actually set up the matrices, and at the end also resize the
3474 * various vectors we keep around in this program.
3480 * const IndexSet &stokes_locally_owned_index_set =
3481 * stokes_dof_handler.locally_owned_dofs();
3482 * const IndexSet stokes_locally_relevant_set =
3483 * DoFTools::extract_locally_relevant_dofs(stokes_dof_handler);
3485 * std::vector<IndexSet> stokes_partitioning;
3486 * stokes_partitioning.push_back(
3487 * stokes_locally_owned_index_set.get_view(0, n_u));
3488 * stokes_partitioning.push_back(
3489 * stokes_locally_owned_index_set.get_view(n_u, n_u + n_p));
3491 * std::vector<IndexSet> stokes_relevant_partitioning;
3492 * stokes_relevant_partitioning.push_back(
3493 * stokes_locally_relevant_set.get_view(0, n_u));
3494 * stokes_relevant_partitioning.push_back(
3495 * stokes_locally_relevant_set.get_view(n_u, n_u + n_p));
3497 * const IndexSet temperature_partitioning =
3498 * temperature_dof_handler.locally_owned_dofs();
3499 * const IndexSet temperature_relevant_partitioning =
3500 * DoFTools::extract_locally_relevant_dofs(temperature_dof_handler);
3504 * Following this, we can compute constraints for the solution vectors,
3505 * including hanging node constraints and homogeneous and inhomogeneous
3506 * boundary values for the Stokes and temperature fields. Note that as for
3507 * everything else, the constraint objects can not hold <i>all</i>
3508 * constraints on every processor. Rather, each processor needs to store
3509 * only those that are actually necessary for correctness given that it
3510 * only assembles linear systems on cells it owns. As discussed in the
3511 * @ref distributed_paper "this paper", the set of constraints we need to
3512 * know about is exactly the set of constraints on all locally relevant
3513 * degrees of freedom, so this is what we use to initialize the constraint
3518 * stokes_constraints.clear();
3519 * stokes_constraints.reinit(stokes_locally_owned_index_set,
3520 * stokes_locally_relevant_set);
3522 * DoFTools::make_hanging_node_constraints(stokes_dof_handler,
3523 * stokes_constraints);
3525 * const FEValuesExtractors::Vector velocity_components(0);
3526 * VectorTools::interpolate_boundary_values(
3527 * stokes_dof_handler,
3529 * Functions::ZeroFunction<dim>(dim + 1),
3530 * stokes_constraints,
3531 * stokes_fe.component_mask(velocity_components));
3533 * std::set<types::boundary_id> no_normal_flux_boundaries;
3534 * no_normal_flux_boundaries.insert(1);
3535 * VectorTools::compute_no_normal_flux_constraints(stokes_dof_handler,
3537 * no_normal_flux_boundaries,
3538 * stokes_constraints,
3540 * stokes_constraints.close();
3543 * temperature_constraints.clear();
3544 * temperature_constraints.reinit(temperature_partitioning,
3545 * temperature_relevant_partitioning);
3547 * DoFTools::make_hanging_node_constraints(temperature_dof_handler,
3548 * temperature_constraints);
3549 * VectorTools::interpolate_boundary_values(
3550 * temperature_dof_handler,
3552 * EquationData::TemperatureInitialValues<dim>(),
3553 * temperature_constraints);
3554 * VectorTools::interpolate_boundary_values(
3555 * temperature_dof_handler,
3557 * EquationData::TemperatureInitialValues<dim>(),
3558 * temperature_constraints);
3559 * temperature_constraints.close();
3564 * All this done, we can then initialize the various matrix and vector
3565 * objects to their proper sizes. At the end, we also record that all
3566 * matrices and preconditioners have to be re-computed at the beginning of
3567 * the next time step. Note how we initialize the vectors for the Stokes
3568 * and temperature right hand sides: These are writable vectors (last
3569 * boolean argument set to @p true) that have the correct one-to-one
3570 * partitioning of locally owned elements but are still given the relevant
3571 * partitioning for means of figuring out the vector entries that are
3572 * going to be set right away. As for matrices, this allows for writing
3573 * local contributions into the vector with multiple threads (always
3574 * assuming that the same vector entry is not accessed by multiple threads
3575 * at the same time). The other vectors only allow for read access of
3576 * individual elements, including ghosts, but are not suitable for
3580 * setup_stokes_matrix(stokes_partitioning, stokes_relevant_partitioning);
3581 * setup_stokes_preconditioner(stokes_partitioning,
3582 * stokes_relevant_partitioning);
3583 * setup_temperature_matrices(temperature_partitioning,
3584 * temperature_relevant_partitioning);
3586 * stokes_rhs.reinit(stokes_partitioning,
3587 * stokes_relevant_partitioning,
3590 * stokes_solution.reinit(stokes_relevant_partitioning, MPI_COMM_WORLD);
3591 * old_stokes_solution.reinit(stokes_solution);
3593 * temperature_rhs.reinit(temperature_partitioning,
3594 * temperature_relevant_partitioning,
3597 * temperature_solution.reinit(temperature_relevant_partitioning,
3599 * old_temperature_solution.reinit(temperature_solution);
3600 * old_old_temperature_solution.reinit(temperature_solution);
3602 * rebuild_stokes_matrix = true;
3603 * rebuild_stokes_preconditioner = true;
3604 * rebuild_temperature_matrices = true;
3605 * rebuild_temperature_preconditioner = true;
3613 * <a name="step_32-TheBoussinesqFlowProblemassemblyfunctions"></a>
3614 * <h4>The BoussinesqFlowProblem assembly functions</h4>
3618 * Following the discussion in the introduction and in the @ref threads
3619 * topic, we split the assembly functions into different parts:
3623 * <ul> <li> The local calculations of matrices and right hand sides, given
3624 * a certain cell as input (these functions are named
3625 * <code>local_assemble_*</code> below). The resulting function is, in other
3626 * words, essentially the body of the loop over all cells in @ref step_31 "step-31". Note,
3627 * however, that these functions store the result from the local
3628 * calculations in variables of classes from the CopyData namespace.
3632 * <li>These objects are then given to the second step which writes the
3633 * local data into the global data structures (these functions are named
3634 * <code>copy_local_to_global_*</code> below). These functions are pretty
3639 * <li>These two subfunctions are then used in the respective assembly
3640 * routine (called <code>assemble_*</code> below), where a WorkStream object
3641 * is set up and runs over all the cells that belong to the processor's
3647 * <a name=
"step_32-Stokespreconditionerassembly"></a>
3669 *
const unsigned int n_q_points =
3670 *
scratch.stokes_fe_values.n_quadrature_points;
3675 *
scratch.stokes_fe_values.reinit(cell);
3676 *
cell->get_dof_indices(
data.local_dof_indices);
3678 *
data.local_matrix = 0;
3680 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
3682 *
for (
unsigned int k = 0;
k < dofs_per_cell; ++
k)
3684 *
scratch.grad_phi_u[
k] =
3686 *
scratch.phi_p[
k] = scratch.stokes_fe_values[
pressure].value(
k,
q);
3689 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3690 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
3691 *
data.local_matrix(i,
j) +=
3693 *
scalar_product(scratch.grad_phi_u[i], scratch.grad_phi_u[
j]) +
3696 *
(scratch.phi_p[i] * scratch.phi_p[
j])) *
3697 *
scratch.stokes_fe_values.JxW(
q);
3703 *
template <
int dim>
3705 *
const Assembly::CopyData::StokesPreconditioner<dim> &
data)
3708 *
data.local_dof_indices,
3776 *
using CellFilter =
3781 *
Assembly::Scratch::StokesPreconditioner<dim> &scratch,
3782 *
Assembly::CopyData::StokesPreconditioner<dim> &
data) {
3787 *
[
this](
const Assembly::CopyData::StokesPreconditioner<dim> &
data) {
3797 *
Assembly::Scratch::StokesPreconditioner<dim>(
3802 *
Assembly::CopyData::StokesPreconditioner<dim>(
stokes_fe));
3818 *
template <
int dim>
3825 *
" Build Stokes preconditioner");
3826 *
pcout <<
" Rebuilding Stokes preconditioner..." << std::flush;
3831 *
const std::vector<std::vector<bool>> constant_modes =
3836 *
std::make_shared<TrilinosWrappers::PreconditionJacobi>();
3840 *
Amg_data.constant_modes = constant_modes;
3842 *
Amg_data.higher_order_elements =
true;
3844 *
Amg_data.aggregation_threshold = 0.02;
3852 *
pcout << std::endl;
3859 * <a name=
"step_32-Stokessystemassembly"></a>
3876 *
template <
int dim>
3879 *
Assembly::Scratch::StokesSystem<dim> &scratch,
3880 *
Assembly::CopyData::StokesSystem<dim> &
data)
3882 *
const unsigned int dofs_per_cell =
3883 *
scratch.stokes_fe_values.get_fe().n_dofs_per_cell();
3884 *
const unsigned int n_q_points =
3885 *
scratch.stokes_fe_values.n_quadrature_points;
3890 *
scratch.stokes_fe_values.reinit(cell);
3897 *
data.local_matrix = 0;
3898 *
data.local_rhs = 0;
3900 *
scratch.temperature_fe_values.get_function_values(
3903 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
3907 *
for (
unsigned int k = 0;
k < dofs_per_cell; ++
k)
3909 *
scratch.phi_u[
k] = scratch.stokes_fe_values[
velocities].value(
k,
q);
3912 *
scratch.grads_phi_u[
k] =
3913 *
scratch.stokes_fe_values[
velocities].symmetric_gradient(
k,
q);
3914 *
scratch.div_phi_u[
k] =
3915 *
scratch.stokes_fe_values[
velocities].divergence(
k,
q);
3916 *
scratch.phi_p[
k] =
3917 *
scratch.stokes_fe_values[
pressure].value(
k,
q);
3922 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3923 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
3924 *
data.local_matrix(i,
j) +=
3926 *
(scratch.grads_phi_u[i] * scratch.grads_phi_u[
j]) -
3928 *
scratch.phi_p[
j]) -
3930 *
scratch.div_phi_u[
j])) *
3931 *
scratch.stokes_fe_values.JxW(
q);
3934 *
scratch.stokes_fe_values.quadrature_point(
q));
3936 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3938 *
gravity * scratch.phi_u[i]) *
3939 *
scratch.stokes_fe_values.JxW(
q);
3942 *
cell->get_dof_indices(
data.local_dof_indices);
3947 *
template <
int dim>
3949 *
const Assembly::CopyData::StokesSystem<dim> &
data)
3954 *
data.local_dof_indices,
3959 *
data.local_dof_indices,
3965 *
template <
int dim>
3969 *
" Assemble Stokes system");
3978 *
using CellFilter =
3986 *
Assembly::Scratch::StokesSystem<dim> &scratch,
3987 *
Assembly::CopyData::StokesSystem<dim> &
data) {
3988 * this->local_assemble_stokes_system(cell, scratch, data);
3990 *
[
this](
const Assembly::CopyData::StokesSystem<dim> &
data) {
3991 * this->copy_local_to_global_stokes_system(data);
3993 *
Assembly::Scratch::StokesSystem<dim>(
4009 *
pcout << std::endl;
4016 * <a name=
"step_32-Temperaturematrixassembly"></a>
4033 *
template <
int dim>
4036 *
Assembly::Scratch::TemperatureMatrix<dim> &scratch,
4037 *
Assembly::CopyData::TemperatureMatrix<dim> &
data)
4039 *
const unsigned int dofs_per_cell =
4040 *
scratch.temperature_fe_values.get_fe().n_dofs_per_cell();
4041 *
const unsigned int n_q_points =
4042 *
scratch.temperature_fe_values.n_quadrature_points;
4044 *
scratch.temperature_fe_values.reinit(cell);
4045 *
cell->get_dof_indices(
data.local_dof_indices);
4047 *
data.local_mass_matrix = 0;
4048 *
data.local_stiffness_matrix = 0;
4050 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
4052 *
for (
unsigned int k = 0;
k < dofs_per_cell; ++
k)
4054 *
scratch.grad_phi_T[
k] =
4055 *
scratch.temperature_fe_values.shape_grad(
k,
q);
4056 *
scratch.phi_T[
k] = scratch.temperature_fe_values.shape_value(
k,
q);
4059 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
4060 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
4062 *
data.local_mass_matrix(i,
j) +=
4063 *
(scratch.phi_T[i] * scratch.phi_T[
j] *
4064 *
scratch.temperature_fe_values.JxW(
q));
4065 *
data.local_stiffness_matrix(i,
j) +=
4067 *
scratch.grad_phi_T[
j] * scratch.temperature_fe_values.JxW(
q));
4074 *
template <
int dim>
4076 *
const Assembly::CopyData::TemperatureMatrix<dim> &
data)
4079 *
data.local_dof_indices,
4082 *
data.local_stiffness_matrix,
4083 *
data.local_dof_indices,
4088 *
template <
int dim>
4095 *
" Assemble temperature matrices");
4101 *
using CellFilter =
4110 *
Assembly::Scratch::TemperatureMatrix<dim> &scratch,
4111 *
Assembly::CopyData::TemperatureMatrix<dim> &
data) {
4112 * this->local_assemble_temperature_matrix(cell, scratch, data);
4114 *
[
this](
const Assembly::CopyData::TemperatureMatrix<dim> &
data) {
4115 * this->copy_local_to_global_temperature_matrix(data);
4133 * <a name=
"step_32-Temperaturerighthandsideassembly"></a>
4164 *
const unsigned int dofs_per_cell =
4165 *
scratch.temperature_fe_values.get_fe().n_dofs_per_cell();
4166 *
const unsigned int n_q_points =
4167 *
scratch.temperature_fe_values.n_quadrature_points;
4171 *
data.local_rhs = 0;
4172 *
data.matrix_for_bc = 0;
4173 *
cell->get_dof_indices(
data.local_dof_indices);
4175 *
scratch.temperature_fe_values.reinit(cell);
4181 *
scratch.temperature_fe_values.get_function_values(
4183 *
scratch.temperature_fe_values.get_function_values(
4186 *
scratch.temperature_fe_values.get_function_gradients(
4188 *
scratch.temperature_fe_values.get_function_gradients(
4191 *
scratch.temperature_fe_values.get_function_laplacians(
4193 *
scratch.temperature_fe_values.get_function_laplacians(
4196 *
scratch.stokes_fe_values[
velocities].get_function_values(
4198 *
scratch.stokes_fe_values[
velocities].get_function_values(
4200 *
scratch.stokes_fe_values[
velocities].get_function_symmetric_gradients(
4202 *
scratch.stokes_fe_values[
velocities].get_function_symmetric_gradients(
4207 *
scratch.old_old_temperature_values,
4208 *
scratch.old_temperature_grads,
4209 *
scratch.old_old_temperature_grads,
4210 *
scratch.old_temperature_laplacians,
4211 *
scratch.old_old_temperature_laplacians,
4212 *
scratch.old_velocity_values,
4213 *
scratch.old_old_velocity_values,
4214 *
scratch.old_strain_rates,
4215 *
scratch.old_old_strain_rates,
4220 *
cell->diameter());
4222 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
4224 *
for (
unsigned int k = 0;
k < dofs_per_cell; ++
k)
4226 *
scratch.phi_T[
k] = scratch.temperature_fe_values.shape_value(
k,
q);
4227 *
scratch.grad_phi_T[
k] =
4228 *
scratch.temperature_fe_values.shape_grad(
k,
q);
4234 *
(scratch.old_temperature_values[
q] *
4240 *
const double ext_T =
4243 *
scratch.old_old_temperature_values[
q] *
4250 *
scratch.old_old_temperature_grads[
q] *
time_step /
4266 *
const double gamma =
4272 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
4274 *
data.local_rhs(i) +=
4278 *
time_step * gamma * scratch.phi_T[i]) *
4279 *
scratch.temperature_fe_values.JxW(
q);
4282 *
data.local_dof_indices[i]))
4284 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
4285 *
data.matrix_for_bc(
j, i) +=
4286 *
(scratch.phi_T[i] * scratch.phi_T[
j] *
4299 *
template <
int dim>
4301 *
const Assembly::CopyData::TemperatureRHS<dim> &
data)
4304 *
data.local_dof_indices,
4306 *
data.matrix_for_bc);
4332 *
template <
int dim>
4354 *
std::make_shared<TrilinosWrappers::PreconditionJacobi>();
4384 *
using CellFilter =
4390 *
Assembly::Scratch::TemperatureRHS<dim> &scratch,
4391 *
Assembly::CopyData::TemperatureRHS<dim> &
data) {
4400 *
auto copier = [
this](
const Assembly::CopyData::TemperatureRHS<dim> &
data) {
4410 *
Assembly::Scratch::TemperatureRHS<dim>(
4422 * <a name=
"step_32-BoussinesqFlowProblemsolve"></a>
4423 * <
h4>BoussinesqFlowProblem::solve</
h4>
4472 * <code>solve()</code> functions. This is certainly not what we want to
4473 * happen here. Rather, we want to switch to the strong solver and continue
4474 * the solution process with whatever vector we got so far. Hence, we catch
4475 * the exception with the C++ try/catch mechanism. We then simply go through
4476 * the same solver sequence again in the <code>catch</code> clause, this
4477 * time passing the @p true flag to the preconditioner for the strong
4478 * solver, signaling an approximate CG solve.
4481 * template <int dim>
4482 * void BoussinesqFlowProblem<dim>::solve()
4485 * TimerOutput::Scope timer_section(computing_timer,
4486 * " Solve Stokes system");
4488 * pcout << " Solving Stokes system... " << std::flush;
4490 * TrilinosWrappers::MPI::BlockVector distributed_stokes_solution(
4492 * distributed_stokes_solution = stokes_solution;
4494 * distributed_stokes_solution.block(1) /= EquationData::pressure_scaling;
4496 * const unsigned int
4497 * start = (distributed_stokes_solution.block(0).size() +
4498 * distributed_stokes_solution.block(1).local_range().first),
4499 * end = (distributed_stokes_solution.block(0).size() +
4500 * distributed_stokes_solution.block(1).local_range().second);
4501 * for (unsigned int i = start; i < end; ++i)
4502 * if (stokes_constraints.is_constrained(i))
4503 * distributed_stokes_solution(i) = 0;
4506 * PrimitiveVectorMemory<TrilinosWrappers::MPI::BlockVector> mem;
4508 * unsigned int n_iterations = 0;
4509 * const double solver_tolerance = 1e-8 * stokes_rhs.l2_norm();
4510 * SolverControl solver_control(30, solver_tolerance);
4514 * const LinearSolvers::BlockSchurPreconditioner<
4515 * TrilinosWrappers::PreconditionAMG,
4516 * TrilinosWrappers::PreconditionJacobi>
4517 * preconditioner(stokes_matrix,
4518 * stokes_preconditioner_matrix,
4519 * *Mp_preconditioner,
4520 * *Amg_preconditioner,
4523 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4526 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4528 * solver.solve(stokes_matrix,
4529 * distributed_stokes_solution,
4533 * n_iterations = solver_control.last_step();
4536 * catch (SolverControl::NoConvergence &)
4538 * const LinearSolvers::BlockSchurPreconditioner<
4539 * TrilinosWrappers::PreconditionAMG,
4540 * TrilinosWrappers::PreconditionJacobi>
4541 * preconditioner(stokes_matrix,
4542 * stokes_preconditioner_matrix,
4543 * *Mp_preconditioner,
4544 * *Amg_preconditioner,
4547 * SolverControl solver_control_refined(stokes_matrix.m(),
4548 * solver_tolerance);
4549 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4550 * solver_control_refined,
4552 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4554 * solver.solve(stokes_matrix,
4555 * distributed_stokes_solution,
4560 * (solver_control.last_step() + solver_control_refined.last_step());
4564 * stokes_constraints.distribute(distributed_stokes_solution);
4566 * distributed_stokes_solution.block(1) *= EquationData::pressure_scaling;
4568 * stokes_solution = distributed_stokes_solution;
4569 * pcout << n_iterations << " iterations." << std::endl;
4585 * want to read the time stepping section in @cite HDGB17 .)
4589 * After temperature right hand side assembly, we solve the linear
4590 * system for temperature (with fully distributed vectors without
4591 * ghost elements and using the solution from the last timestep as
4592 * our initial guess for the iterative solver), apply constraints,
4593 * and copy the vector back to one with ghosts.
4597 * In the end, we extract the temperature range similarly to @ref step_31 "step-31" to
4598 * produce some output (for example in order to help us choose the
4599 * stabilization constants, as discussed in the introduction). The only
4600 * difference is that we need to exchange maxima over all processors.
4604 * TimerOutput::Scope timer_section(computing_timer,
4605 * " Assemble temperature rhs");
4607 * old_time_step = time_step;
4609 * const double scaling = (dim == 3 ? 0.25 : 1.0);
4610 * time_step = (scaling / (2.1 * dim * std::sqrt(1. * dim)) /
4611 * (parameters.temperature_degree * get_cfl_number()));
4613 * const double maximal_velocity = get_maximal_velocity();
4614 * pcout << " Maximal velocity: "
4615 * << maximal_velocity * EquationData::year_in_seconds * 100
4616 * << " cm/year" << std::endl;
4618 * << "Time step: " << time_step / EquationData::year_in_seconds
4619 * << " years" << std::endl;
4621 * assemble_temperature_system(maximal_velocity);
4625 * TimerOutput::Scope timer_section(computing_timer,
4626 * " Solve temperature system");
4628 * SolverControl solver_control(temperature_matrix.m(),
4629 * 1e-12 * temperature_rhs.l2_norm());
4630 * SolverCG<TrilinosWrappers::MPI::Vector> cg(solver_control);
4632 * TrilinosWrappers::MPI::Vector distributed_temperature_solution(
4634 * distributed_temperature_solution = old_temperature_solution;
4636 * cg.solve(temperature_matrix,
4637 * distributed_temperature_solution,
4639 * *T_preconditioner);
4641 * temperature_constraints.distribute(distributed_temperature_solution);
4642 * temperature_solution = distributed_temperature_solution;
4644 * pcout << " " << solver_control.last_step()
4645 * << " CG iterations for temperature" << std::endl;
4647 * double temperature[2] = {std::numeric_limits<double>::max(),
4648 * -std::numeric_limits<double>::max()};
4649 * double global_temperature[2];
4651 * for (unsigned int i =
4652 * distributed_temperature_solution.local_range().first;
4653 * i < distributed_temperature_solution.local_range().second;
4657 * std::min<double>(temperature[0],
4658 * distributed_temperature_solution(i));
4660 * std::max<double>(temperature[1],
4661 * distributed_temperature_solution(i));
4664 * temperature[0] *= -1.0;
4665 * Utilities::MPI::max(temperature, MPI_COMM_WORLD, global_temperature);
4666 * global_temperature[0] *= -1.0;
4668 * pcout << " Temperature range: " << global_temperature[0] << ' '
4669 * << global_temperature[1] << std::endl;
4677 * <a name="step_32-BoussinesqFlowProblemoutput_results"></a>
4678 * <h4>BoussinesqFlowProblem::output_results</h4>
4682 * Next comes the function that generates the output. The quantities to
4683 * output could be introduced manually like we did in @ref step_31 "step-31". An
4684 * alternative is to hand this task over to a class PostProcessor that
4685 * inherits from the class DataPostprocessor, which can be attached to
4686 * DataOut. This allows us to output derived quantities from the solution,
4687 * like the friction heating included in this example. It overloads the
4688 * virtual function DataPostprocessor::evaluate_vector_field(),
4689 * which is then internally called from DataOut::build_patches(). We have to
4690 * give it values of the numerical solution, its derivatives, normals to the
4691 * cell, the actual evaluation points and any additional quantities. This
4692 * follows the same procedure as discussed in @ref step_29 "step-29" and other programs.
4695 * template <int dim>
4696 * class BoussinesqFlowProblem<dim>::Postprocessor
4697 * : public DataPostprocessor<dim>
4700 * Postprocessor(const unsigned int partition, const double minimal_pressure);
4702 * virtual void evaluate_vector_field(
4703 * const DataPostprocessorInputs::Vector<dim> &inputs,
4704 * std::vector<Vector<double>> &computed_quantities) const override;
4706 * virtual std::vector<std::string> get_names() const override;
4708 * virtual std::vector<
4709 * DataComponentInterpretation::DataComponentInterpretation>
4710 * get_data_component_interpretation() const override;
4712 * virtual UpdateFlags get_needed_update_flags() const override;
4715 * const unsigned int partition;
4716 * const double minimal_pressure;
4720 * template <int dim>
4721 * BoussinesqFlowProblem<dim>::Postprocessor::Postprocessor(
4722 * const unsigned int partition,
4723 * const double minimal_pressure)
4724 * : partition(partition)
4725 * , minimal_pressure(minimal_pressure)
4731 * Here we define the names for the variables we want to output. These are
4732 * the actual solution values for velocity, pressure, and temperature, as
4733 * well as the friction heating and to each cell the number of the processor
4734 * that owns it. This allows us to visualize the partitioning of the domain
4735 * among the processors. Except for the velocity, which is vector-valued,
4736 * all other quantities are scalar.
4739 * template <int dim>
4740 * std::vector<std::string>
4741 * BoussinesqFlowProblem<dim>::Postprocessor::get_names() const
4743 * std::vector<std::string> solution_names(dim, "velocity");
4744 * solution_names.emplace_back("p");
4745 * solution_names.emplace_back("T");
4746 * solution_names.emplace_back("friction_heating");
4747 * solution_names.emplace_back("partition");
4749 * return solution_names;
4753 * template <int dim>
4754 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4755 * BoussinesqFlowProblem<dim>::Postprocessor::get_data_component_interpretation()
4758 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4759 * interpretation(dim,
4760 * DataComponentInterpretation::component_is_part_of_vector);
4762 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4763 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4764 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4765 * interpretation.push_back(DataComponentInterpretation::component_is_scalar);
4767 * return interpretation;
4771 * template <int dim>
4773 * BoussinesqFlowProblem<dim>::Postprocessor::get_needed_update_flags() const
4775 * return update_values | update_gradients | update_quadrature_points;
4781 * Now we implement the function that computes the derived quantities. As we
4782 * also did for the output, we rescale the velocity from its SI units to
4783 * something more readable, namely cm/year. Next, the pressure is scaled to
4784 * be between 0 and the maximum pressure. This makes it more easily
4785 * comparable -- in essence making all pressure variables positive or
4786 * zero. Temperature is taken as is, and the friction heating is computed as
4787 * @f$2 \eta \varepsilon(\mathbf{u}) \cdot \varepsilon(\mathbf{u})@f$.
4791 * The quantities we output here are more for illustration, rather than for
4792 * actual scientific value. We come back to this briefly in the results
4793 * section of this program and explain what one may in fact be interested in.
4796 * template <int dim>
4797 * void BoussinesqFlowProblem<dim>::Postprocessor::evaluate_vector_field(
4798 * const DataPostprocessorInputs::Vector<dim> &inputs,
4799 * std::vector<Vector<double>> &computed_quantities) const
4801 * const unsigned int n_evaluation_points = inputs.solution_values.size();
4802 * Assert(inputs.solution_gradients.size() == n_evaluation_points,
4803 * ExcInternalError());
4804 * Assert(computed_quantities.size() == n_evaluation_points,
4805 * ExcInternalError());
4806 * Assert(inputs.solution_values[0].size() == dim + 2, ExcInternalError());
4808 * for (unsigned int p = 0; p < n_evaluation_points; ++p)
4810 * for (unsigned int d = 0; d < dim; ++d)
4811 * computed_quantities[p](d) = (inputs.solution_values[p](d) *
4812 * EquationData::year_in_seconds * 100);
4814 * const double pressure =
4815 * (inputs.solution_values[p](dim) - minimal_pressure);
4816 * computed_quantities[p](dim) = pressure;
4818 * const double temperature = inputs.solution_values[p](dim + 1);
4819 * computed_quantities[p](dim + 1) = temperature;
4821 * Tensor<2, dim> grad_u;
4822 * for (unsigned int d = 0; d < dim; ++d)
4823 * grad_u[d] = inputs.solution_gradients[p][d];
4824 * const SymmetricTensor<2, dim> strain_rate = symmetrize(grad_u);
4825 * computed_quantities[p](dim + 2) =
4826 * 2 * EquationData::eta * strain_rate * strain_rate;
4828 * computed_quantities[p](dim + 3) = partition;
4835 * The <code>output_results()</code> function has a similar task to the one
4836 * in @ref step_31 "step-31". However, here we are going to demonstrate a different
4837 * technique on how to merge output from different DoFHandler objects. The
4851 * information we need! So we step through all cells (with iterators into
4852 * all three DoFHandlers moving in sync), and for each joint cell dof, we
4853 * read out that component using the FiniteElement::system_to_base_index
4854 * function (see there for a description of what the various parts of its
4855 * return value contain). We also need to keep track whether we're on a
4872 * contribution into a separate file. This we do by adding an additional
4873 * number to the filename when we write the solution. This is not really
4874 * new, we did it similarly in @ref step_40 "step-40". Note that we write in the compressed
4875 * format @p .vtu instead of plain vtk files, which saves quite some
4880 * All the rest of the work is done in the PostProcessor class.
4883 * template <int dim>
4884 * void BoussinesqFlowProblem<dim>::output_results()
4886 * TimerOutput::Scope timer_section(computing_timer, "Postprocessing");
4888 * const FESystem<dim> joint_fe(stokes_fe, 1, temperature_fe, 1);
4890 * DoFHandler<dim> joint_dof_handler(triangulation);
4891 * joint_dof_handler.distribute_dofs(joint_fe);
4892 * Assert(joint_dof_handler.n_dofs() ==
4893 * stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(),
4894 * ExcInternalError());
4896 * TrilinosWrappers::MPI::Vector joint_solution;
4897 * joint_solution.reinit(joint_dof_handler.locally_owned_dofs(),
4901 * std::vector<types::global_dof_index> local_joint_dof_indices(
4902 * joint_fe.n_dofs_per_cell());
4903 * std::vector<types::global_dof_index> local_stokes_dof_indices(
4904 * stokes_fe.n_dofs_per_cell());
4905 * std::vector<types::global_dof_index> local_temperature_dof_indices(
4906 * temperature_fe.n_dofs_per_cell());
4908 * typename DoFHandler<dim>::active_cell_iterator
4909 * joint_cell = joint_dof_handler.begin_active(),
4910 * joint_endc = joint_dof_handler.end(),
4911 * stokes_cell = stokes_dof_handler.begin_active(),
4912 * temperature_cell = temperature_dof_handler.begin_active();
4913 * for (; joint_cell != joint_endc;
4914 * ++joint_cell, ++stokes_cell, ++temperature_cell)
4915 * if (joint_cell->is_locally_owned())
4917 * joint_cell->get_dof_indices(local_joint_dof_indices);
4918 * stokes_cell->get_dof_indices(local_stokes_dof_indices);
4919 * temperature_cell->get_dof_indices(local_temperature_dof_indices);
4921 * for (unsigned int i = 0; i < joint_fe.n_dofs_per_cell(); ++i)
4922 * if (joint_fe.system_to_base_index(i).first.first == 0)
4924 * Assert(joint_fe.system_to_base_index(i).second <
4925 * local_stokes_dof_indices.size(),
4926 * ExcInternalError());
4928 * joint_solution(local_joint_dof_indices[i]) = stokes_solution(
4929 * local_stokes_dof_indices[joint_fe.system_to_base_index(i)
4934 * Assert(joint_fe.system_to_base_index(i).first.first == 1,
4935 * ExcInternalError());
4936 * Assert(joint_fe.system_to_base_index(i).second <
4937 * local_temperature_dof_indices.size(),
4938 * ExcInternalError());
4939 * joint_solution(local_joint_dof_indices[i]) =
4940 * temperature_solution(
4941 * local_temperature_dof_indices
4942 * [joint_fe.system_to_base_index(i).second]);
4947 * joint_solution.compress(VectorOperation::insert);
4949 * const IndexSet locally_relevant_joint_dofs =
4950 * DoFTools::extract_locally_relevant_dofs(joint_dof_handler);
4951 * TrilinosWrappers::MPI::Vector locally_relevant_joint_solution;
4952 * locally_relevant_joint_solution.reinit(locally_relevant_joint_dofs,
4954 * locally_relevant_joint_solution = joint_solution;
4956 * Postprocessor postprocessor(Utilities::MPI::this_mpi_process(
4958 * stokes_solution.block(1).min());
4960 * DataOut<dim> data_out;
4961 * data_out.attach_dof_handler(joint_dof_handler);
4962 * data_out.add_data_vector(locally_relevant_joint_solution, postprocessor);
4963 * data_out.build_patches();
4965 * static int out_index = 0;
4966 * data_out.write_vtu_with_pvtu_record(
4967 * "./", "solution", out_index, MPI_COMM_WORLD, 5);
4977 * <a name="step_32-BoussinesqFlowProblemrefine_mesh"></a>
4978 * <h4>BoussinesqFlowProblem::refine_mesh</h4>
5009 *
template <
int dim>
5021 *
"Refine mesh structure, part 1");
5044 *
cell->clear_refine_flag();
5062 *
const std::vector<const TrilinosWrappers::MPI::BlockVector *>
x_stokes = {
5077 *
"Refine mesh structure, part 2");
5104 *
std::vector<TrilinosWrappers::MPI::BlockVector *>
stokes_tmp = {
5129 * <a name=
"step_32-BoussinesqFlowProblemrun"></a>
5130 * <
h4>BoussinesqFlowProblem::run</
h4>
5140 *
template <
int dim>
5147 *
(dim == 3) ? 96 : 12,
5152 *
triangulation.refine_global(parameters.initial_global_refinement);
5167 * since we use standard Lagrange elements of moderate order this function
5171 * VectorTools::project(temperature_dof_handler,
5172 * temperature_constraints,
5173 * QGauss<dim>(parameters.temperature_degree + 2),
5174 * EquationData::TemperatureInitialValues<dim>(),
5178 * Having so computed the current temperature field, let us set the member
5179 * variable that holds the temperature nodes. Strictly speaking, we really
5180 * only need to set <code>old_temperature_solution</code> since the first
5181 * thing we will do is to compute the Stokes solution that only requires
5184 *
it's a relatively cheap operation and we only have to do it once at the
5185 * beginning of the program) if we ever want to extend our numerical
5186 * method or physical model, and so we initialize
5187 * <code>old_temperature_solution</code> and
5188 * <code>old_old_temperature_solution</code> as well. The assignment makes
5189 * sure that the vectors on the left hand side (which where initialized to
5190 * contain ghost elements as well) also get the correct ghost elements. In
5191 * other words, the assignment here requires communication between
5195 * temperature_solution = solution;
5196 * old_temperature_solution = solution;
5197 * old_old_temperature_solution = solution;
5200 * timestep_number = 0;
5201 * time_step = old_time_step = 0;
5207 * pcout << "Timestep " << timestep_number
5208 * << ": t=" << time / EquationData::year_in_seconds << " years"
5211 * assemble_stokes_system();
5212 * build_stokes_preconditioner();
5213 * assemble_temperature_matrix();
5217 * pcout << std::endl;
5219 * if ((timestep_number == 0) &&
5220 * (pre_refinement_step < parameters.initial_adaptive_refinement))
5222 * refine_mesh(parameters.initial_global_refinement +
5223 * parameters.initial_adaptive_refinement);
5224 * ++pre_refinement_step;
5225 * goto start_time_iteration;
5227 * else if ((timestep_number > 0) &&
5228 * (timestep_number % parameters.adaptive_refinement_interval ==
5230 * refine_mesh(parameters.initial_global_refinement +
5231 * parameters.initial_adaptive_refinement);
5233 * if ((parameters.generate_graphical_output == true) &&
5234 * (timestep_number % parameters.graphical_output_interval == 0))
5239 * In order to speed up linear solvers, we extrapolate the solutions
5240 * from the old time levels to the new one. This gives a very good
5241 * initial guess, cutting the number of iterations needed in solvers
5242 * by more than one half. We do not need to extrapolate in the last
5243 * iteration, so if we reached the final time, we stop here.
5247 * As the last thing during a time step (before actually bumping up
5248 * the number of the time step), we check whether the current time
5249 * step number is divisible by 100, and if so we let the computing
5250 * timer print a summary of CPU times spent so far.
5253 * if (time > parameters.end_time * EquationData::year_in_seconds)
5256 * TrilinosWrappers::MPI::BlockVector old_old_stokes_solution;
5257 * old_old_stokes_solution = old_stokes_solution;
5258 * old_stokes_solution = stokes_solution;
5259 * old_old_temperature_solution = old_temperature_solution;
5260 * old_temperature_solution = temperature_solution;
5261 * if (old_time_step > 0)
5265 * Trilinos sadd does not like ghost vectors even as input. Copy
5266 * into distributed vectors for now:
5270 * TrilinosWrappers::MPI::BlockVector distr_solution(stokes_rhs);
5271 * distr_solution = stokes_solution;
5272 * TrilinosWrappers::MPI::BlockVector distr_old_solution(stokes_rhs);
5273 * distr_old_solution = old_old_stokes_solution;
5274 * distr_solution.sadd(1. + time_step / old_time_step,
5275 * -time_step / old_time_step,
5276 * distr_old_solution);
5277 * stokes_solution = distr_solution;
5280 * TrilinosWrappers::MPI::Vector distr_solution(temperature_rhs);
5281 * distr_solution = temperature_solution;
5282 * TrilinosWrappers::MPI::Vector distr_old_solution(temperature_rhs);
5283 * distr_old_solution = old_old_temperature_solution;
5284 * distr_solution.sadd(1. + time_step / old_time_step,
5285 * -time_step / old_time_step,
5286 * distr_old_solution);
5287 * temperature_solution = distr_solution;
5291 * if ((timestep_number > 0) && (timestep_number % 100 == 0))
5292 * computing_timer.print_summary();
5294 * time += time_step;
5295 * ++timestep_number;
5301 * If we are generating graphical output, do so also for the last time
5302 * step unless we had just done so before we left the do-while loop
5305 * if ((parameters.generate_graphical_output == true) &&
5306 * !((timestep_number - 1) % parameters.graphical_output_interval == 0))
5309 * } // namespace Step32
5316 * <a name="step_32-Thecodemaincodefunction"></a>
5317 * <h3>The <code>main</code> function</h3>
5321 * The main function is short as usual and very similar to the one in
5322 * @ref step_31 "step-31". Since we use a parameter file which is specified as an argument in
5323 * the command line, we have to read it in here and pass it on to the
5324 * Parameters class for parsing. If no filename is given in the command line,
5325 * we simply use the <code>step-32.prm</code> file which is distributed
5326 * together with the program.
5330 * Because 3d computations are simply very slow unless you throw a lot of
5331 * processors at them, the program defaults to 2d. You can get the 3d version
5332 * by changing the constant dimension below to 3.
5335 * int main(int argc, char *argv[])
5339 * using namespace Step32;
5340 * using namespace dealii;
5342 * Utilities::MPI::MPI_InitFinalize mpi_initialization(
5343 * argc, argv, numbers::invalid_unsigned_int);
5345 * std::string parameter_filename;
5347 * parameter_filename = argv[1];
5349 * parameter_filename = "step-32.prm";
5351 * const int dim = 2;
5352 * BoussinesqFlowProblem<dim>::Parameters parameters(parameter_filename);
5353 * BoussinesqFlowProblem<dim> flow_problem(parameters);
5354 * flow_problem.run();
5356 * catch (std::exception &exc)
5358 * std::cerr << std::endl
5360 * << "----------------------------------------------------"
5362 * std::cerr << "Exception on processing: " << std::endl
5363 * << exc.what() << std::endl
5364 * << "Aborting!" << std::endl
5365 * << "----------------------------------------------------"
5372 * std::cerr << std::endl
5374 * << "----------------------------------------------------"
5376 * std::cerr << "Unknown exception!" << std::endl
5377 * << "Aborting!" << std::endl
5378 * << "----------------------------------------------------"
5386<a name="step_32-Results"></a><h1>Results</h1>
5389When run, the program simulates convection in 3d in much the same way
5390as @ref step_31 "step-31" did, though with an entirely different testcase.
5393<a name="step_32-Comparisonofresultswithstep31"></a><h3>Comparison of results with step-31</h3>
5396Before we go to this testcase, however, let us show a few results from a
5397slightly earlier version of this program that was solving exactly the
5398testcase we used in @ref step_31 "step-31", just that we now solve it in parallel and with
5399much higher resolution. We show these results mainly for comparison.
5401Here are two images that show this higher resolution if we choose a 3d
5402computation in <code>main()</code> and if we set
5403<code>initial_refinement=3</code> and
5404<code>n_pre_refinement_steps=4</code>. At the time steps shown, the
5405meshes had around 72,000 and 236,000 cells, for a total of 2,680,000
5406and 8,250,000 degrees of freedom, respectively, more than an order of
5407magnitude more than we had available in @ref step_31 "step-31":
5409<table align="center" class="doxtable">
5412 <img src="https://www.dealii.org/images/steps/developer/step-32.3d.cube.0.png" alt="">
5417 <img src="https://www.dealii.org/images/steps/developer/step-32.3d.cube.1.png" alt="">
5422The computation was done on a subset of 50 processors of the Brazos
5423cluster at Texas A&M University.
5426<a name="step_32-Resultsfora2dcircularshelltestcase"></a><h3>Results for a 2d circular shell testcase</h3>
5429Next, we will run @ref step_32 "step-32" with the parameter file in the directory with one
5430change: we increase the final time to 1e9. Here we are using 16 processors. The
5431command to launch is (note that @ref step_32 "step-32".prm is the default):
5435\$ mpirun -np 16 ./step-32
5439Note that running a job on a cluster typically requires going through a job
5447Number
of degrees
of freedom: 186,624 (99,840+36,864+49,920)
5459Number
of degrees
of freedom: 252,723 (136,640+47,763+68,320)
5471Number
of degrees
of freedom: 321,246 (174,312+59,778+87,156)
5509Number
of degrees
of freedom: 903,408 (490,102+168,255+245,051)
5513+---------------------------------------------+------------+------------+
5517+---------------------------------+-----------+------------+------------+
5521|
Build Stokes preconditioner | 12 | 2.09s | 1.8% |
5528+---------------------------------+-----------+------------+------------+
5532+---------------------------------------------+------------+------------+
5536+---------------------------------+-----------+------------+------------+
5540|
Build Stokes preconditioner | 4707 | 1.48e+03s | 1.6% |
5547+---------------------------------+-----------+------------+------------+
5565 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-000.png" alt=
"">
5568 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-050.png" alt=
"">
5571 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-100.png" alt=
"">
5576 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-150.png" alt=
"">
5579 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-200.png" alt=
"">
5582 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-250.png" alt=
"">
5587 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-300.png" alt=
"">
5590 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-350.png" alt=
"">
5593 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-400.png" alt=
"">
5598 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-450.png" alt=
"">
5601 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-500.png" alt=
"">
5604 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-550.png" alt=
"">
5609 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-time-600.png" alt=
"">
5612 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-cells.png" alt=
"">
5615 <
img src=
"https://www.dealii.org/images/steps/developer/step-32-2d-partition.png" alt=
"">
5624href=
"https://www.dealii.org/images/steps/developer/step-32-2d-temperature.webm">
shown
5629through several stages: First, it gets rid of the instable temperature
5630layering with the hot material overlain by the dense cold
5631material. After this great driver is removed and we have a sort of
5632stable situation, a few blobs start to separate from the hot boundary
5633layer at the inner ring and rise up, with a few cold fingers also
5634dropping down from the outer boundary layer. During this phase, the solution
5635remains mostly symmetric, reflecting the 12-fold symmetry of the
5636original mesh. In a final phase, the fluid enters vigorous chaotic
5637stirring in which all symmetries are lost. This is a pattern that then
5638continues to dominate flow.
5640These different phases can also be identified if we look at the
5641maximal velocity as a function of time in the simulation:
5643<img src="https://www.dealii.org/images/steps/developer/step-32.2d.t_vs_vmax.png" alt="">
5645Here, the velocity (shown in centimeters per year) becomes very large,
5646to the order of several meters per year) at the beginning when the
5647temperature layering is instable. It then calms down to relatively
5648small values before picking up again in the chaotic stirring
5649regime. There, it remains in the range of 10-40 centimeters per year,
5650quite within the physically expected region.
5653<a name="step_32-Resultsfora3dsphericalshelltestcase"></a><h3>Results for a 3d spherical shell testcase</h3>
56563d computations are very expensive computationally. Furthermore, as
5657seen above, interesting behavior only starts after quite a long time
5658requiring more CPU hours than is available on a typical
5659cluster. Consequently, rather than showing a complete simulation here,
5660let us simply show a couple of pictures we have obtained using the
5661successor to this program, called <i>ASPECT</i> (short for <i>Advanced
5669<
img src=
"https://www.dealii.org/images/steps/developer/step-32.3d-sphere.solution.png" alt=
"">
5671<
img src=
"https://www.dealii.org/images/steps/developer/step-32.3d-sphere.partition.png" alt=
"">
5675<a name=
"step-32-extensions"></a>
5676<a name=
"step_32-Possibilitiesforextensions"></a><
h3>Possibilities
for extensions</
h3>
5682in
Earth's ConvecTion</i>) code at the time this tutorial program is being
5683finished. Specifically, the following are certainly topics that one should
5684address to make the program more useful:
5687 <li> <b>Adiabatic heating/cooling:</b>
5688 The temperature field we get in our simulations after a while
5689 is mostly constant with boundary layers at the inner and outer
5690 boundary, and streamers of cold and hot material mixing
5749 how this can be resolved.
5751<li> <b>Nonlinear material models:</b> As already hinted at in various places,
5752 material parameters such as the density, the viscosity, and the various
5753 thermal parameters are not constant throughout the earth mantle. Rather,
5754 they nonlinearly depend on the pressure and temperature, and in the case of
5755 the viscosity on the strain rate @f$\varepsilon(\mathbf u)@f$. For complicated
5756 models, the only way to solve such models accurately may be to actually
5757 iterate this dependence out in each time step, rather than simply freezing
5758 coefficients at values extrapolated from the previous time step(s).
5760<li> <b>Checkpoint/restart:</b> Running this program in 2d on a number of
5761 processors allows solving realistic models in a day or two. However, in 3d,
5762 compute times are so large that one runs into two typical problems: (i) On
5763 most compute clusters, the queuing system limits run times for individual
5764 jobs are to 2 or 3 days; (ii) losing the results of a computation due to
5765 hardware failures, misconfigurations, or power outages is a shame when
5766 running on hundreds of processors for a couple of days. Both of these
5767 problems can be addressed by periodically saving the state of the program
5768 and, if necessary, restarting the program at this point. This technique is
5769 commonly called <i>checkpoint/restart</i> and it requires that the entire
5770 state of the program is written to a permanent storage location (e.g. a hard
5771 drive). Given the complexity of the data structures of this program, this is
5772 not entirely trivial (it may also involve writing gigabytes or more of
5773 data), but it can be made easier by realizing that one can save the state
5774 between two time steps where it essentially only consists of the mesh and
5775 solution vectors; during restart one would then first re-enumerate degrees
5776 of freedom in the same way as done before and then re-assemble
5777 matrices. Nevertheless, given the distributed nature of the data structures
5778 involved here, saving and restoring the state of a program is not
5779 trivial. An additional complexity is introduced by the fact that one may
5780 want to change the number of processors between runs, for example because
5781 one may wish to continue computing on a mesh that is finer than the one used
5782 to precompute a starting temperature field at an intermediate time.
5784<li> <b>Predictive postprocessing:</b> The point of computations like this is
5785 not simply to solve the equations. Rather, it is typically the exploration
5786 of different physical models and their comparison with things that we can
5787 measure at the earth surface, in order to find which models are realistic
5788 and which are contradicted by reality. To this end, we need to compute
5789 quantities from our solution vectors that are related to what we can
5790 observe. Among these are, for example, heatfluxes at the surface of the
5791 earth, as well as seismic velocities throughout the mantle as these affect
5792 earthquake waves that are recorded by seismographs.
5794<li> <b>Better refinement criteria:</b> As can be seen above for the
57953d case, the mesh in 3d is primarily refined along the inner
5796boundary. This is because the boundary layer there is stronger than
5797any other transition in the domain, leading us to refine there almost
5798exclusively and basically not at all following the plumes. One
5799certainly needs better refinement criteria to track the parts of the
5800solution we are really interested in better than the criterion used
5801here, namely the KellyErrorEstimator applied to the temperature, is
5806There are many other ways to extend the current program. However, rather than
5807discussing them here, let us point to the much larger open
5808source code ASPECT (see https://aspect.geodynamics.org/ ) that constitutes the
5809further development of @ref step_32 "step-32" and that already includes many such possible
5813<a name="step_32-PlainProg"></a>
5814<h1> The plain program</h1>
5815@include "step-32.cc"
virtual void build_patches(const unsigned int n_subdivisions=0)
active_cell_iterator begin_active(const unsigned int level=0) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
numbers::NumberTraits< Number >::real_type norm() const
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< index_type > data
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
Expression sign(const Expression &x)
void hyper_shell(Triangulation< dim, spacedim > &tria, const Point< spacedim > ¢er, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ valid
Iterator points to a valid object.
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
constexpr types::blas_int zero
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * end(VectorType &V)
std::vector< unsigned int > serial(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string compress(const std::string &input)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void abort(const ExceptionBase &exc) noexcept
bool check(const ConstraintKinds kind_in, const unsigned int dim)
long double gamma(const unsigned int n)
int(&) functions(const void *v1, const void *v2)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
unsigned int subdomain_id
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation