1517 * constexpr
double kappa = 1
e-6;
1518 * constexpr
double reference_density = 3300;
1519 * constexpr
double reference_temperature = 293;
1520 * constexpr
double expansion_coefficient = 2
e-5;
1521 * constexpr
double specific_heat = 1250;
1522 * constexpr
double radiogenic_heating = 7.4e-12;
1525 * constexpr
double R0 = 6371000. - 2890000.;
1526 * constexpr
double R1 = 6371000. - 35000.;
1528 * constexpr
double T0 = 4000 + 273;
1529 * constexpr
double T1 = 700 + 273;
1534 * The next
set of definitions are
for functions that encode the density
1535 * as a
function of temperature, the gravity vector, and the
initial
1536 * values
for the temperature. Again, all of these (along with the values
1537 * they compute) are discussed in the introduction:
1540 *
double density(
const double temperature)
1543 * reference_density *
1544 * (1 - expansion_coefficient * (temperature - reference_temperature)));
1548 *
template <
int dim>
1551 *
const double r = p.
norm();
1552 *
return -(1.245e-6 * r + 7.714e13 / r / r) * p / r;
1557 *
template <
int dim>
1558 *
class TemperatureInitialValues :
public Function<dim>
1561 * TemperatureInitialValues()
1566 *
const unsigned int component = 0)
const override;
1574 *
template <
int dim>
1576 *
const unsigned int)
const
1578 *
const double r = p.norm();
1579 *
const double h = R1 - R0;
1581 *
const double s = (r - R0) / h;
1585 *
const double tau = s + 0.2 * s * (1 - s) * std::sin(6 * phi) * q;
1587 *
return T0 * (1.0 - tau) + T1 * tau;
1591 *
template <
int dim>
1593 * TemperatureInitialValues<dim>::vector_value(
const Point<dim> &p,
1596 *
for (
unsigned int c = 0; c < this->
n_components; ++c)
1603 * As mentioned in the introduction we need to rescale the pressure to
1604 * avoid the relative ill-conditioning of the momentum and mass
1605 * conservation equations. The scaling factor is @f$\frac{\eta}{
L}@f$ where
1606 * @f$L@f$ was a typical length
scale. By experimenting it turns out that a
1607 * good length
scale is the
diameter of plumes, which is around 10 km:
1610 * constexpr
double pressure_scaling = eta / 10000;
1614 * The
final number in
this namespace is a constant that denotes the
1615 * number of seconds per (average, tropical) year. We use
this only when
1616 * generating screen output: internally, all computations of
this program
1617 * happen in SI units (kilogram, meter, seconds) but writing geological
1618 * times in seconds yields
numbers that
one can
't relate to reality, and
1619 * so we convert to years using the factor defined here:
1622 * const double year_in_seconds = 60 * 60 * 24 * 365.2425;
1624 * } // namespace EquationData
1631 * <a name="PreconditioningtheStokessystem"></a>
1632 * <h3>Preconditioning the Stokes system</h3>
1636 * This namespace implements the preconditioner. As discussed in the
1637 * introduction, this preconditioner differs in a number of key portions
1638 * from the one used in @ref step_31 "step-31". Specifically, it is a right preconditioner,
1639 * implementing the matrix
1641 * \left(\begin{array}{cc}A^{-1} & B^T
1643 * \end{array}\right)
1645 * where the two inverse matrix operations
1646 * are approximated by linear solvers or, if the right flag is given to the
1647 * constructor of this class, by a single AMG V-cycle for the velocity
1648 * block. The three code blocks of the <code>vmult</code> function implement
1649 * the multiplications with the three blocks of this preconditioner matrix
1650 * and should be self explanatory if you have read through @ref step_31 "step-31" or the
1651 * discussion of composing solvers in @ref step_20 "step-20".
1654 * namespace LinearSolvers
1656 * template <class PreconditionerTypeA, class PreconditionerTypeMp>
1657 * class BlockSchurPreconditioner : public Subscriptor
1660 * BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
1661 * const TrilinosWrappers::BlockSparseMatrix &Spre,
1662 * const PreconditionerTypeMp &Mppreconditioner,
1663 * const PreconditionerTypeA & Apreconditioner,
1664 * const bool do_solve_A)
1665 * : stokes_matrix(&S)
1666 * , stokes_preconditioner_matrix(&Spre)
1667 * , mp_preconditioner(Mppreconditioner)
1668 * , a_preconditioner(Apreconditioner)
1669 * , do_solve_A(do_solve_A)
1672 * void vmult(TrilinosWrappers::MPI::BlockVector & dst,
1673 * const TrilinosWrappers::MPI::BlockVector &src) const
1675 * TrilinosWrappers::MPI::Vector utmp(src.block(0));
1678 * SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
1680 * SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
1682 * solver.solve(stokes_preconditioner_matrix->block(1, 1),
1685 * mp_preconditioner);
1687 * dst.block(1) *= -1.0;
1691 * stokes_matrix->block(0, 1).vmult(utmp, dst.block(1));
1693 * utmp.add(src.block(0));
1696 * if (do_solve_A == true)
1698 * SolverControl solver_control(5000, utmp.l2_norm() * 1e-2);
1699 * TrilinosWrappers::SolverCG solver(solver_control);
1700 * solver.solve(stokes_matrix->block(0, 0),
1703 * a_preconditioner);
1706 * a_preconditioner.vmult(dst.block(0), utmp);
1710 * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1712 * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1713 * stokes_preconditioner_matrix;
1714 * const PreconditionerTypeMp &mp_preconditioner;
1715 * const PreconditionerTypeA & a_preconditioner;
1716 * const bool do_solve_A;
1718 * } // namespace LinearSolvers
1725 * <a name="Definitionofassemblydatastructures"></a>
1726 * <h3>Definition of assembly data structures</h3>
1730 * As described in the introduction, we will use the WorkStream mechanism
1731 * discussed in the @ref threads module to parallelize operations among the
1732 * processors of a single machine. The WorkStream class requires that data
1733 * is passed around in two kinds of data structures, one for scratch data
1734 * and one to pass data from the assembly function to the function that
1735 * copies local contributions into global objects.
1739 * The following namespace (and the two sub-namespaces) contains a
1740 * collection of data structures that serve this purpose, one pair for each
1741 * of the four operations discussed in the introduction that we will want to
1742 * parallelize. Each assembly routine gets two sets of data: a Scratch array
1743 * that collects all the classes and arrays that are used for the
1744 * calculation of the cell contribution, and a CopyData array that keeps
1745 * local matrices and vectors which will be written into the global
1746 * matrix. Whereas CopyData is a container for the final data that is
1747 * written into the global matrices and vector (and, thus, absolutely
1748 * necessary), the Scratch arrays are merely there for performance reasons
1749 * — it would be much more expensive to set up a FEValues object on
1750 * each cell, than creating it only once and updating some derivative data.
1754 * @ref step_31 "step-31" had four assembly routines: One for the preconditioner matrix of
1755 * the Stokes system, one for the Stokes matrix and right hand side, one for
1756 * the temperature matrices and one for the right hand side of the
1757 * temperature equation. We here organize the scratch arrays and CopyData
1758 * objects for each of those four assembly components using a
1759 * <code>struct</code> environment (since we consider these as temporary
1760 * objects we pass around, rather than classes that implement functionality
1761 * of their own, though this is a more subjective point of view to
1762 * distinguish between <code>struct</code>s and <code>class</code>es).
1766 * Regarding the Scratch objects, each struct is equipped with a constructor
1767 * that creates an @ref FEValues object using the @ref FiniteElement,
1768 * Quadrature, @ref Mapping (which describes the interpolation of curved
1769 * boundaries), and @ref UpdateFlags instances. Moreover, we manually
1770 * implement a copy constructor (since the FEValues class is not copyable by
1771 * itself), and provide some additional vector fields that are used to hold
1772 * intermediate data during the computation of local contributions.
1776 * Let us start with the scratch arrays and, specifically, the one used for
1777 * assembly of the Stokes preconditioner:
1780 * namespace Assembly
1784 * template <int dim>
1785 * struct StokesPreconditioner
1787 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe,
1788 * const Quadrature<dim> & stokes_quadrature,
1789 * const Mapping<dim> & mapping,
1790 * const UpdateFlags update_flags);
1792 * StokesPreconditioner(const StokesPreconditioner &data);
1795 * FEValues<dim> stokes_fe_values;
1797 * std::vector<Tensor<2, dim>> grad_phi_u;
1798 * std::vector<double> phi_p;
1801 * template <int dim>
1802 * StokesPreconditioner<dim>::StokesPreconditioner(
1803 * const FiniteElement<dim> &stokes_fe,
1804 * const Quadrature<dim> & stokes_quadrature,
1805 * const Mapping<dim> & mapping,
1806 * const UpdateFlags update_flags)
1807 * : stokes_fe_values(mapping, stokes_fe, stokes_quadrature, update_flags)
1808 * , grad_phi_u(stokes_fe.dofs_per_cell)
1809 * , phi_p(stokes_fe.dofs_per_cell)
1814 * template <int dim>
1815 * StokesPreconditioner<dim>::StokesPreconditioner(
1816 * const StokesPreconditioner &scratch)
1817 * : stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
1818 * scratch.stokes_fe_values.get_fe(),
1819 * scratch.stokes_fe_values.get_quadrature(),
1820 * scratch.stokes_fe_values.get_update_flags())
1821 * , grad_phi_u(scratch.grad_phi_u)
1822 * , phi_p(scratch.phi_p)
1829 * The next one is the scratch object used for the assembly of the full
1830 * Stokes system. Observe that we derive the StokesSystem scratch class
1831 * from the StokesPreconditioner class above. We do this because all the
1832 * objects that are necessary for the assembly of the preconditioner are
1833 * also needed for the actual matrix system and right hand side, plus
1834 * some extra data. This makes the program more compact. Note also that
1835 * the assembly of the Stokes system and the temperature right hand side
1836 * further down requires data from temperature and velocity,
1837 * respectively, so we actually need two FEValues objects for those two
1841 * template <int dim>
1842 * struct StokesSystem : public StokesPreconditioner<dim>
1844 * StokesSystem(const FiniteElement<dim> &stokes_fe,
1845 * const Mapping<dim> & mapping,
1846 * const Quadrature<dim> & stokes_quadrature,
1847 * const UpdateFlags stokes_update_flags,
1848 * const FiniteElement<dim> &temperature_fe,
1849 * const UpdateFlags temperature_update_flags);
1851 * StokesSystem(const StokesSystem<dim> &data);
1854 * FEValues<dim> temperature_fe_values;
1856 * std::vector<Tensor<1, dim>> phi_u;
1857 * std::vector<SymmetricTensor<2, dim>> grads_phi_u;
1858 * std::vector<double> div_phi_u;
1860 * std::vector<double> old_temperature_values;
1864 * template <int dim>
1865 * StokesSystem<dim>::StokesSystem(
1866 * const FiniteElement<dim> &stokes_fe,
1867 * const Mapping<dim> & mapping,
1868 * const Quadrature<dim> & stokes_quadrature,
1869 * const UpdateFlags stokes_update_flags,
1870 * const FiniteElement<dim> &temperature_fe,
1871 * const UpdateFlags temperature_update_flags)
1872 * : StokesPreconditioner<dim>(stokes_fe,
1873 * stokes_quadrature,
1875 * stokes_update_flags)
1876 * , temperature_fe_values(mapping,
1878 * stokes_quadrature,
1879 * temperature_update_flags)
1880 * , phi_u(stokes_fe.dofs_per_cell)
1881 * , grads_phi_u(stokes_fe.dofs_per_cell)
1882 * , div_phi_u(stokes_fe.dofs_per_cell)
1883 * , old_temperature_values(stokes_quadrature.size())
1887 * template <int dim>
1888 * StokesSystem<dim>::StokesSystem(const StokesSystem<dim> &scratch)
1889 * : StokesPreconditioner<dim>(scratch)
1890 * , temperature_fe_values(
1891 * scratch.temperature_fe_values.get_mapping(),
1892 * scratch.temperature_fe_values.get_fe(),
1893 * scratch.temperature_fe_values.get_quadrature(),
1894 * scratch.temperature_fe_values.get_update_flags())
1895 * , phi_u(scratch.phi_u)
1896 * , grads_phi_u(scratch.grads_phi_u)
1897 * , div_phi_u(scratch.div_phi_u)
1898 * , old_temperature_values(scratch.old_temperature_values)
1904 * After defining the objects used in the assembly of the Stokes system,
1905 * we do the same for the assembly of the matrices necessary for the
1906 * temperature system. The general structure is very similar:
1909 * template <int dim>
1910 * struct TemperatureMatrix
1912 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe,
1913 * const Mapping<dim> & mapping,
1914 * const Quadrature<dim> & temperature_quadrature);
1916 * TemperatureMatrix(const TemperatureMatrix &data);
1919 * FEValues<dim> temperature_fe_values;
1921 * std::vector<double> phi_T;
1922 * std::vector<Tensor<1, dim>> grad_phi_T;
1926 * template <int dim>
1927 * TemperatureMatrix<dim>::TemperatureMatrix(
1928 * const FiniteElement<dim> &temperature_fe,
1929 * const Mapping<dim> & mapping,
1930 * const Quadrature<dim> & temperature_quadrature)
1931 * : temperature_fe_values(mapping,
1933 * temperature_quadrature,
1934 * update_values | update_gradients |
1935 * update_JxW_values)
1936 * , phi_T(temperature_fe.dofs_per_cell)
1937 * , grad_phi_T(temperature_fe.dofs_per_cell)
1941 * template <int dim>
1942 * TemperatureMatrix<dim>::TemperatureMatrix(
1943 * const TemperatureMatrix &scratch)
1944 * : temperature_fe_values(
1945 * scratch.temperature_fe_values.get_mapping(),
1946 * scratch.temperature_fe_values.get_fe(),
1947 * scratch.temperature_fe_values.get_quadrature(),
1948 * scratch.temperature_fe_values.get_update_flags())
1949 * , phi_T(scratch.phi_T)
1950 * , grad_phi_T(scratch.grad_phi_T)
1956 * The final scratch object is used in the assembly of the right hand
1957 * side of the temperature system. This object is significantly larger
1958 * than the ones above because a lot more quantities enter the
1959 * computation of the right hand side of the temperature equation. In
1960 * particular, the temperature values and gradients of the previous two
1961 * time steps need to be evaluated at the quadrature points, as well as
1962 * the velocities and the strain rates (i.e. the symmetric gradients of
1963 * the velocity) that enter the right hand side as friction heating
1964 * terms. Despite the number of terms, the following should be rather
1968 * template <int dim>
1969 * struct TemperatureRHS
1971 * TemperatureRHS(const FiniteElement<dim> &temperature_fe,
1972 * const FiniteElement<dim> &stokes_fe,
1973 * const Mapping<dim> & mapping,
1974 * const Quadrature<dim> & quadrature);
1976 * TemperatureRHS(const TemperatureRHS &data);
1979 * FEValues<dim> temperature_fe_values;
1980 * FEValues<dim> stokes_fe_values;
1982 * std::vector<double> phi_T;
1983 * std::vector<Tensor<1, dim>> grad_phi_T;
1985 * std::vector<Tensor<1, dim>> old_velocity_values;
1986 * std::vector<Tensor<1, dim>> old_old_velocity_values;
1988 * std::vector<SymmetricTensor<2, dim>> old_strain_rates;
1989 * std::vector<SymmetricTensor<2, dim>> old_old_strain_rates;
1991 * std::vector<double> old_temperature_values;
1992 * std::vector<double> old_old_temperature_values;
1993 * std::vector<Tensor<1, dim>> old_temperature_grads;
1994 * std::vector<Tensor<1, dim>> old_old_temperature_grads;
1995 * std::vector<double> old_temperature_laplacians;
1996 * std::vector<double> old_old_temperature_laplacians;
2000 * template <int dim>
2001 * TemperatureRHS<dim>::TemperatureRHS(
2002 * const FiniteElement<dim> &temperature_fe,
2003 * const FiniteElement<dim> &stokes_fe,
2004 * const Mapping<dim> & mapping,
2005 * const Quadrature<dim> & quadrature)
2006 * : temperature_fe_values(mapping,
2009 * update_values | update_gradients |
2010 * update_hessians | update_quadrature_points |
2011 * update_JxW_values)
2012 * , stokes_fe_values(mapping,
2015 * update_values | update_gradients)
2016 * , phi_T(temperature_fe.dofs_per_cell)
2017 * , grad_phi_T(temperature_fe.dofs_per_cell)
2020 * old_velocity_values(quadrature.size())
2021 * , old_old_velocity_values(quadrature.size())
2022 * , old_strain_rates(quadrature.size())
2023 * , old_old_strain_rates(quadrature.size())
2026 * old_temperature_values(quadrature.size())
2027 * , old_old_temperature_values(quadrature.size())
2028 * , old_temperature_grads(quadrature.size())
2029 * , old_old_temperature_grads(quadrature.size())
2030 * , old_temperature_laplacians(quadrature.size())
2031 * , old_old_temperature_laplacians(quadrature.size())
2035 * template <int dim>
2036 * TemperatureRHS<dim>::TemperatureRHS(const TemperatureRHS &scratch)
2037 * : temperature_fe_values(
2038 * scratch.temperature_fe_values.get_mapping(),
2039 * scratch.temperature_fe_values.get_fe(),
2040 * scratch.temperature_fe_values.get_quadrature(),
2041 * scratch.temperature_fe_values.get_update_flags())
2042 * , stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
2043 * scratch.stokes_fe_values.get_fe(),
2044 * scratch.stokes_fe_values.get_quadrature(),
2045 * scratch.stokes_fe_values.get_update_flags())
2046 * , phi_T(scratch.phi_T)
2047 * , grad_phi_T(scratch.grad_phi_T)
2050 * old_velocity_values(scratch.old_velocity_values)
2051 * , old_old_velocity_values(scratch.old_old_velocity_values)
2052 * , old_strain_rates(scratch.old_strain_rates)
2053 * , old_old_strain_rates(scratch.old_old_strain_rates)
2056 * old_temperature_values(scratch.old_temperature_values)
2057 * , old_old_temperature_values(scratch.old_old_temperature_values)
2058 * , old_temperature_grads(scratch.old_temperature_grads)
2059 * , old_old_temperature_grads(scratch.old_old_temperature_grads)
2060 * , old_temperature_laplacians(scratch.old_temperature_laplacians)
2061 * , old_old_temperature_laplacians(scratch.old_old_temperature_laplacians)
2063 * } // namespace Scratch
2068 * The CopyData objects are even simpler than the Scratch objects as all
2069 * they have to do is to store the results of local computations until
2070 * they can be copied into the global matrix or vector objects. These
2071 * structures therefore only need to provide a constructor, a copy
2072 * operation, and some arrays for local matrix, local vectors and the
2073 * relation between local and global degrees of freedom (a.k.a.
2074 * <code>local_dof_indices</code>). Again, we have one such structure for
2075 * each of the four operations we will parallelize using the WorkStream
2079 * namespace CopyData
2081 * template <int dim>
2082 * struct StokesPreconditioner
2084 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe);
2085 * StokesPreconditioner(const StokesPreconditioner &data);
2086 * StokesPreconditioner &operator=(const StokesPreconditioner &) = default;
2088 * FullMatrix<double> local_matrix;
2089 * std::vector<types::global_dof_index> local_dof_indices;
2092 * template <int dim>
2093 * StokesPreconditioner<dim>::StokesPreconditioner(
2094 * const FiniteElement<dim> &stokes_fe)
2095 * : local_matrix(stokes_fe.dofs_per_cell, stokes_fe.dofs_per_cell)
2096 * , local_dof_indices(stokes_fe.dofs_per_cell)
2099 * template <int dim>
2100 * StokesPreconditioner<dim>::StokesPreconditioner(
2101 * const StokesPreconditioner &data)
2102 * : local_matrix(data.local_matrix)
2103 * , local_dof_indices(data.local_dof_indices)
2108 * template <int dim>
2109 * struct StokesSystem : public StokesPreconditioner<dim>
2111 * StokesSystem(const FiniteElement<dim> &stokes_fe);
2113 * Vector<double> local_rhs;
2116 * template <int dim>
2117 * StokesSystem<dim>::StokesSystem(const FiniteElement<dim> &stokes_fe)
2118 * : StokesPreconditioner<dim>(stokes_fe)
2119 * , local_rhs(stokes_fe.dofs_per_cell)
2124 * template <int dim>
2125 * struct TemperatureMatrix
2127 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe);
2129 * FullMatrix<double> local_mass_matrix;
2130 * FullMatrix<double> local_stiffness_matrix;
2131 * std::vector<types::global_dof_index> local_dof_indices;
2134 * template <int dim>
2135 * TemperatureMatrix<dim>::TemperatureMatrix(
2136 * const FiniteElement<dim> &temperature_fe)
2137 * : local_mass_matrix(temperature_fe.dofs_per_cell,
2138 * temperature_fe.dofs_per_cell)
2139 * , local_stiffness_matrix(temperature_fe.dofs_per_cell,
2140 * temperature_fe.dofs_per_cell)
2141 * , local_dof_indices(temperature_fe.dofs_per_cell)
2146 * template <int dim>
2147 * struct TemperatureRHS
2149 * TemperatureRHS(const FiniteElement<dim> &temperature_fe);
2151 * Vector<double> local_rhs;
2152 * std::vector<types::global_dof_index> local_dof_indices;
2153 * FullMatrix<double> matrix_for_bc;
2156 * template <int dim>
2157 * TemperatureRHS<dim>::TemperatureRHS(
2158 * const FiniteElement<dim> &temperature_fe)
2159 * : local_rhs(temperature_fe.dofs_per_cell)
2160 * , local_dof_indices(temperature_fe.dofs_per_cell)
2161 * , matrix_for_bc(temperature_fe.dofs_per_cell,
2162 * temperature_fe.dofs_per_cell)
2164 * } // namespace CopyData
2165 * } // namespace Assembly
2172 * <a name="ThecodeBoussinesqFlowProblemcodeclasstemplate"></a>
2173 * <h3>The <code>BoussinesqFlowProblem</code> class template</h3>
2177 * This is the declaration of the main class. It is very similar to @ref step_31 "step-31"
2178 * but there are a number differences we will comment on below.
2182 * The top of the class is essentially the same as in @ref step_31 "step-31", listing the
2183 * public methods and a set of private functions that do the heavy
2184 * lifting. Compared to @ref step_31 "step-31" there are only two additions to this
2185 * section: the function <code>get_cfl_number()</code> that computes the
2186 * maximum CFL number over all cells which we then compute the global time
2187 * step from, and the function <code>get_entropy_variation()</code> that is
2188 * used in the computation of the entropy stabilization. It is akin to the
2189 * <code>get_extrapolated_temperature_range()</code> we have used in @ref step_31 "step-31"
2190 * for this purpose, but works on the entropy instead of the temperature
2194 * template <int dim>
2195 * class BoussinesqFlowProblem
2198 * struct Parameters;
2199 * BoussinesqFlowProblem(Parameters ¶meters);
2203 * void setup_dofs();
2204 * void assemble_stokes_preconditioner();
2205 * void build_stokes_preconditioner();
2206 * void assemble_stokes_system();
2207 * void assemble_temperature_matrix();
2208 * void assemble_temperature_system(const double maximal_velocity);
2209 * double get_maximal_velocity() const;
2210 * double get_cfl_number() const;
2211 * double get_entropy_variation(const double average_temperature) const;
2212 * std::pair<double, double> get_extrapolated_temperature_range() const;
2214 * void output_results();
2215 * void refine_mesh(const unsigned int max_grid_level);
2217 * double compute_viscosity(
2218 * const std::vector<double> & old_temperature,
2219 * const std::vector<double> & old_old_temperature,
2220 * const std::vector<Tensor<1, dim>> &old_temperature_grads,
2221 * const std::vector<Tensor<1, dim>> &old_old_temperature_grads,
2222 * const std::vector<double> & old_temperature_laplacians,
2223 * const std::vector<double> & old_old_temperature_laplacians,
2224 * const std::vector<Tensor<1, dim>> &old_velocity_values,
2225 * const std::vector<Tensor<1, dim>> &old_old_velocity_values,
2226 * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
2227 * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
2228 * const double global_u_infty,
2229 * const double global_T_variation,
2230 * const double average_temperature,
2231 * const double global_entropy_variation,
2232 * const double cell_diameter) const;
2237 * The first significant new component is the definition of a struct for
2238 * the parameters according to the discussion in the introduction. This
2239 * structure is initialized by reading from a parameter file during
2240 * construction of this object.
2245 * Parameters(const std::string ¶meter_filename);
2247 * static void declare_parameters(ParameterHandler &prm);
2248 * void parse_parameters(ParameterHandler &prm);
2252 * unsigned int initial_global_refinement;
2253 * unsigned int initial_adaptive_refinement;
2255 * bool generate_graphical_output;
2256 * unsigned int graphical_output_interval;
2258 * unsigned int adaptive_refinement_interval;
2260 * double stabilization_alpha;
2261 * double stabilization_c_R;
2262 * double stabilization_beta;
2264 * unsigned int stokes_velocity_degree;
2265 * bool use_locally_conservative_discretization;
2267 * unsigned int temperature_degree;
2271 * Parameters ¶meters;
2275 * The <code>pcout</code> (for <i>%parallel <code>std::cout</code></i>)
2276 * object is used to simplify writing output: each MPI process can use
2277 * this to generate output as usual, but since each of these processes
2278 * will (hopefully) produce the same output it will just be replicated
2279 * many times over; with the ConditionalOStream class, only the output
2280 * generated by one MPI process will actually be printed to screen,
2281 * whereas the output by all the other threads will simply be forgotten.
2284 * ConditionalOStream pcout;
2288 * The following member variables will then again be similar to those in
2289 * @ref step_31 "step-31" (and to other tutorial programs). As mentioned in the
2290 * introduction, we fully distribute computations, so we will have to use
2291 * the parallel::distributed::Triangulation class (see @ref step_40 "step-40") but the
2292 * remainder of these variables is rather standard with two exceptions:
2296 * - The <code>mapping</code> variable is used to denote a higher-order
2297 * polynomial mapping. As mentioned in the introduction, we use this
2298 * mapping when forming integrals through quadrature for all cells that
2299 * are adjacent to either the inner or outer boundaries of our domain
2300 * where the boundary is curved.
2304 * - In a bit of naming confusion, you will notice below that some of the
2305 * variables from namespace TrilinosWrappers are taken from namespace
2306 * TrilinosWrappers::MPI (such as the right hand side vectors) whereas
2307 * others are not (such as the various matrices). This is due to legacy
2308 * reasons. We will frequently have to query velocities
2309 * and temperatures at arbitrary quadrature points; consequently, rather
2310 * than importing ghost information of a vector whenever we need access
2311 * to degrees of freedom that are relevant locally but owned by another
2312 * processor, we solve linear systems in %parallel but then immediately
2313 * initialize a vector including ghost entries of the solution for further
2314 * processing. The various <code>*_solution</code> vectors are therefore
2315 * filled immediately after solving their respective linear system in
2316 * %parallel and will always contain values for all
2317 * @ref GlossLocallyRelevantDof "locally relevant degrees of freedom";
2318 * the fully distributed vectors that we obtain from the solution process
2319 * and that only ever contain the
2320 * @ref GlossLocallyOwnedDof "locally owned degrees of freedom" are
2321 * destroyed immediately after the solution process and after we have
2322 * copied the relevant values into the member variable vectors.
2325 * parallel::distributed::Triangulation<dim> triangulation;
2326 * double global_Omega_diameter;
2328 * const MappingQ<dim> mapping;
2330 * const FESystem<dim> stokes_fe;
2331 * DoFHandler<dim> stokes_dof_handler;
2332 * AffineConstraints<double> stokes_constraints;
2334 * TrilinosWrappers::BlockSparseMatrix stokes_matrix;
2335 * TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
2337 * TrilinosWrappers::MPI::BlockVector stokes_solution;
2338 * TrilinosWrappers::MPI::BlockVector old_stokes_solution;
2339 * TrilinosWrappers::MPI::BlockVector stokes_rhs;
2342 * FE_Q<dim> temperature_fe;
2343 * DoFHandler<dim> temperature_dof_handler;
2344 * AffineConstraints<double> temperature_constraints;
2346 * TrilinosWrappers::SparseMatrix temperature_mass_matrix;
2347 * TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
2348 * TrilinosWrappers::SparseMatrix temperature_matrix;
2350 * TrilinosWrappers::MPI::Vector temperature_solution;
2351 * TrilinosWrappers::MPI::Vector old_temperature_solution;
2352 * TrilinosWrappers::MPI::Vector old_old_temperature_solution;
2353 * TrilinosWrappers::MPI::Vector temperature_rhs;
2357 * double old_time_step;
2358 * unsigned int timestep_number;
2360 * std::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
2361 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
2362 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
2364 * bool rebuild_stokes_matrix;
2365 * bool rebuild_stokes_preconditioner;
2366 * bool rebuild_temperature_matrices;
2367 * bool rebuild_temperature_preconditioner;
2371 * The next member variable, <code>computing_timer</code> is used to
2372 * conveniently account for compute time spent in certain "sections" of
2373 * the code that are repeatedly entered. For example, we will enter (and
2374 * leave) sections for Stokes matrix assembly and would like to accumulate
2375 * the run time spent in this section over all time steps. Every so many
2376 * time steps as well as at the end of the program (through the destructor
2377 * of the TimerOutput class) we will then produce a nice summary of the
2378 * times spent in the different sections into which we categorize the
2379 * run-time of this program.
2382 * TimerOutput computing_timer;
2386 * After these member variables we have a number of auxiliary functions
2387 * that have been broken out of the ones listed above. Specifically, there
2388 * are first three functions that we call from <code>setup_dofs</code> and
2389 * then the ones that do the assembling of linear systems:
2392 * void setup_stokes_matrix(
2393 * const std::vector<IndexSet> &stokes_partitioning,
2394 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2395 * void setup_stokes_preconditioner(
2396 * const std::vector<IndexSet> &stokes_partitioning,
2397 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2398 * void setup_temperature_matrices(
2399 * const IndexSet &temperature_partitioning,
2400 * const IndexSet &temperature_relevant_partitioning);
2405 * Following the @ref MTWorkStream "task-based parallelization" paradigm,
2406 * we split all the assembly routines into two parts: a first part that
2407 * can do all the calculations on a certain cell without taking care of
2408 * other threads, and a second part (which is writing the local data into
2409 * the global matrices and vectors) which can be entered by only one
2410 * thread at a time. In order to implement that, we provide functions for
2411 * each of those two steps for all the four assembly routines that we use
2412 * in this program. The following eight functions do exactly this:
2415 * void local_assemble_stokes_preconditioner(
2416 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2417 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
2418 * Assembly::CopyData::StokesPreconditioner<dim> & data);
2420 * void copy_local_to_global_stokes_preconditioner(
2421 * const Assembly::CopyData::StokesPreconditioner<dim> &data);
2424 * void local_assemble_stokes_system(
2425 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2426 * Assembly::Scratch::StokesSystem<dim> & scratch,
2427 * Assembly::CopyData::StokesSystem<dim> & data);
2429 * void copy_local_to_global_stokes_system(
2430 * const Assembly::CopyData::StokesSystem<dim> &data);
2433 * void local_assemble_temperature_matrix(
2434 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2435 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
2436 * Assembly::CopyData::TemperatureMatrix<dim> & data);
2438 * void copy_local_to_global_temperature_matrix(
2439 * const Assembly::CopyData::TemperatureMatrix<dim> &data);
2443 * void local_assemble_temperature_rhs(
2444 * const std::pair<double, double> global_T_range,
2445 * const double global_max_velocity,
2446 * const double global_entropy_variation,
2447 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2448 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
2449 * Assembly::CopyData::TemperatureRHS<dim> & data);
2451 * void copy_local_to_global_temperature_rhs(
2452 * const Assembly::CopyData::TemperatureRHS<dim> &data);
2456 * Finally, we forward declare a member class that we will define later on
2457 * and that will be used to compute a number of quantities from our
2458 * solution vectors that we'd like to put into the output files
for
2462 *
class Postprocessor;
2469 * <a name=
"BoussinesqFlowProblemclassimplementation"></a>
2470 * <h3>BoussinesqFlowProblem
class implementation</h3>
2475 * <a name=
"BoussinesqFlowProblemParameters"></a>
2476 * <h4>BoussinesqFlowProblem::Parameters</h4>
2480 * Here comes the definition of the parameters
for the Stokes problem. We
2481 * allow to
set the
end time
for the simulation, the
level of refinements
2482 * (both global and adaptive, which in the
sum specify what maximum
level
2483 * the cells are allowed to have), and the interval between refinements in
2484 * the time stepping.
2488 * Then, we let the user specify constants
for the stabilization parameters
2489 * (as discussed in the introduction), the polynomial degree
for the Stokes
2490 * velocity space, whether to use the locally conservative discretization
2491 * based on
FE_DGP elements
for the pressure or not (
FE_Q elements
for
2492 * pressure), and the polynomial degree
for the temperature interpolation.
2496 * The constructor checks
for a
valid input file (
if not, a file with
2497 *
default parameters
for the quantities is written), and eventually parses
2501 *
template <
int dim>
2502 * BoussinesqFlowProblem<dim>::Parameters::Parameters(
2503 *
const std::string ¶meter_filename)
2505 * , initial_global_refinement(2)
2506 * , initial_adaptive_refinement(2)
2507 * , adaptive_refinement_interval(10)
2508 * , stabilization_alpha(2)
2509 * , stabilization_c_R(0.11)
2510 * , stabilization_beta(0.078)
2511 * , stokes_velocity_degree(2)
2512 * , use_locally_conservative_discretization(
true)
2513 * , temperature_degree(2)
2516 * BoussinesqFlowProblem<dim>::Parameters::declare_parameters(prm);
2518 * std::ifstream parameter_file(parameter_filename);
2520 *
if (!parameter_file)
2522 * parameter_file.close();
2524 * std::ofstream parameter_out(parameter_filename);
2530 *
"Input parameter file <" + parameter_filename +
2531 *
"> not found. Creating a template file of the same name."));
2535 * parse_parameters(prm);
2542 * Next we have a
function that declares the parameters that we expect in
2543 * the input file, together with their data
types,
default values and a
2547 *
template <
int dim>
2548 *
void BoussinesqFlowProblem<dim>::Parameters::declare_parameters(
2554 *
"The end time of the simulation in years.");
2558 *
"The number of global refinement steps performed on "
2559 *
"the initial coarse mesh, before the problem is first "
2564 *
"The number of adaptive refinement steps performed after "
2565 *
"initial global refinement.");
2569 *
"The number of time steps after which the mesh is to be "
2570 *
"adapted based on computed error indicators.");
2574 *
"Whether graphical output is to be generated or not. "
2575 *
"You may not want to get graphical output if the number "
2576 *
"of processors is large.");
2580 *
"The number of time steps between each generation of "
2581 *
"graphical output files.");
2588 *
"The exponent in the entropy viscosity stabilization.");
2592 *
"The c_R factor in the entropy viscosity "
2593 *
"stabilization.");
2597 *
"The beta factor in the artificial viscosity "
2598 *
"stabilization. An appropriate value for 2d is 0.052 "
2599 *
"and 0.078 for 3d.");
2606 *
"Stokes velocity polynomial degree",
2609 *
"The polynomial degree to use for the velocity variables "
2610 *
"in the Stokes system.");
2612 *
"Temperature polynomial degree",
2615 *
"The polynomial degree to use for the temperature variable.");
2617 *
"Use locally conservative discretization",
2620 *
"Whether to use a Stokes discretization that is locally "
2621 *
"conservative at the expense of a larger number of degrees "
2622 *
"of freedom, or to go with a cheaper discretization "
2623 *
"that does not locally conserve mass (although it is "
2624 *
"globally conservative.");
2633 * And then we need a
function that reads the contents of the
2635 * results into variables that store the values of the parameters we have
2636 * previously declared:
2639 *
template <
int dim>
2640 *
void BoussinesqFlowProblem<dim>::Parameters::parse_parameters(
2644 * initial_global_refinement = prm.
get_integer(
"Initial global refinement");
2645 * initial_adaptive_refinement =
2648 * adaptive_refinement_interval =
2649 * prm.
get_integer(
"Time steps between mesh refinement");
2651 * generate_graphical_output = prm.
get_bool(
"Generate graphical output");
2652 * graphical_output_interval =
2653 * prm.
get_integer(
"Time steps between graphical output");
2657 * stabilization_alpha = prm.
get_double(
"alpha");
2659 * stabilization_beta = prm.
get_double(
"beta");
2665 * stokes_velocity_degree =
2666 * prm.
get_integer(
"Stokes velocity polynomial degree");
2667 * temperature_degree = prm.
get_integer(
"Temperature polynomial degree");
2668 * use_locally_conservative_discretization =
2669 * prm.
get_bool(
"Use locally conservative discretization");
2679 * <a name=
"BoussinesqFlowProblemBoussinesqFlowProblem"></a>
2680 * <h4>BoussinesqFlowProblem::BoussinesqFlowProblem</h4>
2684 * The constructor of the problem is very similar to the constructor in
2685 * @ref step_31
"step-31". What is different is the %
parallel communication: Trilinos uses
2686 * a message passing interface (MPI)
for data distribution. When entering
2687 * the BoussinesqFlowProblem
class, we have to decide how the parallelization
2688 * is to be done. We choose a rather simple strategy and let all processors
2689 * that are running the program work together, specified by the communicator
2690 * <code>MPI_COMM_WORLD</code>. Next, we create the output stream (as we
2691 * already did in @ref step_18
"step-18") that only generates output on the
first MPI
2692 * process and is completely forgetful on all others. The implementation of
2693 *
this idea is to check the process number when <code>pcout</code> gets a
2694 *
true argument, and it uses the <code>std::cout</code> stream
for
2695 * output. If we are
one processor five,
for instance, then we will give a
2696 * <code>
false</code> argument to <code>pcout</code>, which means that the
2697 * output of that processor will not be printed. With the exception of the
2698 * mapping object (
for which we use polynomials of degree 4) all but the
2699 *
final member variable are exactly the same as in @ref step_31
"step-31".
2703 * This
final object, the
TimerOutput object, is then told to restrict
2704 * output to the <code>pcout</code> stream (processor 0), and then we
2705 * specify that we want to get a summary table at the
end of the program
2706 * which shows us wallclock times (as opposed to CPU times). We will
2707 * manually also request intermediate summaries every so many time steps in
2708 * the <code>
run()</code>
function below.
2711 *
template <
int dim>
2712 * BoussinesqFlowProblem<dim>::BoussinesqFlowProblem(Parameters ¶meters_)
2713 * : parameters(parameters_)
2723 * global_Omega_diameter(0.)
2729 * stokes_fe(
FE_Q<dim>(parameters.stokes_velocity_degree),
2731 * (parameters.use_locally_conservative_discretization ?
2733 *
FE_DGP<dim>(parameters.stokes_velocity_degree - 1)) :
2735 *
FE_Q<dim>(parameters.stokes_velocity_degree - 1))),
2742 * temperature_fe(parameters.temperature_degree)
2747 * , old_time_step(0)
2748 * , timestep_number(0)
2749 * , rebuild_stokes_matrix(
true)
2750 * , rebuild_stokes_preconditioner(
true)
2751 * , rebuild_temperature_matrices(
true)
2752 * , rebuild_temperature_preconditioner(
true)
2755 * computing_timer(MPI_COMM_WORLD,
2766 * <a name=
"TheBoussinesqFlowProblemhelperfunctions"></a>
2767 * <h4>The BoussinesqFlowProblem helper
functions</h4>
2769 * <a name=
"BoussinesqFlowProblemget_maximal_velocity"></a>
2770 * <h5>BoussinesqFlowProblem::get_maximal_velocity</h5>
2774 * Except
for two small details, the
function to compute the global maximum
2775 * of the velocity is the same as in @ref step_31
"step-31". The
first detail is actually
2776 * common to all
functions that implement loops over all cells in the
2778 * on a chunk of cells since each processor only has a certain part of the
2779 * entire
triangulation. This chunk of cells that we want to work on is
2780 * identified via a so-called <code>
subdomain_id</code>, as we also did in
2781 * @ref step_18
"step-18". All we need to change is hence to perform the cell-related
2782 * operations only on cells that are owned by the current process (as
2783 * opposed to ghost or artificial cells), i.e.
for which the subdomain
id
2784 * equals the number of the process ID. Since
this is a commonly used
2785 * operation, there is a shortcut
for this operation: we can ask whether the
2786 * cell is owned by the current processor
using
2787 * <code>cell-@>is_locally_owned()</code>.
2791 * The
second difference is the way we calculate the maximum
value. Before,
2792 * we could simply have a <code>
double</code> variable that we checked
2793 * against on each quadrature
point for each cell. Now, we have to be a bit
2794 * more careful since each processor only operates on a subset of
2795 * cells. What we
do is to
first let each processor calculate the maximum
2796 * among its cells, and then
do a global communication operation
2798 * all the maximum values of the individual processors. MPI provides such a
2799 *
call, but it
's even simpler to use the respective function in namespace
2800 * Utilities::MPI using the MPI communicator object since that will do the
2801 * right thing even if we work without MPI and on a single machine only. The
2802 * call to <code>Utilities::MPI::max</code> needs two arguments, namely the
2803 * local maximum (input) and the MPI communicator, which is MPI_COMM_WORLD
2807 * template <int dim>
2808 * double BoussinesqFlowProblem<dim>::get_maximal_velocity() const
2810 * const QIterated<dim> quadrature_formula(QTrapez<1>(),
2811 * parameters.stokes_velocity_degree);
2812 * const unsigned int n_q_points = quadrature_formula.size();
2814 * FEValues<dim> fe_values(mapping,
2816 * quadrature_formula,
2818 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2820 * const FEValuesExtractors::Vector velocities(0);
2822 * double max_local_velocity = 0;
2824 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2825 * if (cell->is_locally_owned())
2827 * fe_values.reinit(cell);
2828 * fe_values[velocities].get_function_values(stokes_solution,
2831 * for (unsigned int q = 0; q < n_q_points; ++q)
2832 * max_local_velocity =
2833 * std::max(max_local_velocity, velocity_values[q].norm());
2836 * return Utilities::MPI::max(max_local_velocity, MPI_COMM_WORLD);
2843 * <a name="BoussinesqFlowProblemget_cfl_number"></a>
2844 * <h5>BoussinesqFlowProblem::get_cfl_number</h5>
2848 * The next function does something similar, but we now compute the CFL
2849 * number, i.e., maximal velocity on a cell divided by the cell
2850 * diameter. This number is necessary to determine the time step size, as we
2851 * use a semi-explicit time stepping scheme for the temperature equation
2852 * (see @ref step_31 "step-31" for a discussion). We compute it in the same way as above:
2853 * Compute the local maximum over all locally owned cells, then exchange it
2854 * via MPI to find the global maximum.
2857 * template <int dim>
2858 * double BoussinesqFlowProblem<dim>::get_cfl_number() const
2860 * const QIterated<dim> quadrature_formula(QTrapez<1>(),
2861 * parameters.stokes_velocity_degree);
2862 * const unsigned int n_q_points = quadrature_formula.size();
2864 * FEValues<dim> fe_values(mapping,
2866 * quadrature_formula,
2868 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2870 * const FEValuesExtractors::Vector velocities(0);
2872 * double max_local_cfl = 0;
2874 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2875 * if (cell->is_locally_owned())
2877 * fe_values.reinit(cell);
2878 * fe_values[velocities].get_function_values(stokes_solution,
2881 * double max_local_velocity = 1e-10;
2882 * for (unsigned int q = 0; q < n_q_points; ++q)
2883 * max_local_velocity =
2884 * std::max(max_local_velocity, velocity_values[q].norm());
2886 * std::max(max_local_cfl, max_local_velocity / cell->diameter());
2889 * return Utilities::MPI::max(max_local_cfl, MPI_COMM_WORLD);
2896 * <a name="BoussinesqFlowProblemget_entropy_variation"></a>
2897 * <h5>BoussinesqFlowProblem::get_entropy_variation</h5>
2901 * Next comes the computation of the global entropy variation
2902 * @f$\|E(T)-\bar{E}(T)\|_\infty@f$ where the entropy @f$E@f$ is defined as
2903 * discussed in the introduction. This is needed for the evaluation of the
2904 * stabilization in the temperature equation as explained in the
2905 * introduction. The entropy variation is actually only needed if we use
2906 * @f$\alpha=2@f$ as a power in the residual computation. The infinity norm is
2907 * computed by the maxima over quadrature points, as usual in discrete
2912 * In order to compute this quantity, we first have to find the
2913 * space-average @f$\bar{E}(T)@f$ and then evaluate the maximum. However, that
2914 * means that we would need to perform two loops. We can avoid the overhead
2915 * by noting that @f$\|E(T)-\bar{E}(T)\|_\infty =
2916 * \max\big(E_{\textrm{max}}(T)-\bar{E}(T),
2917 * \bar{E}(T)-E_{\textrm{min}}(T)\big)@f$, i.e., the maximum out of the
2918 * deviation from the average entropy in positive and negative
2919 * directions. The four quantities we need for the latter formula (maximum
2920 * entropy, minimum entropy, average entropy, area) can all be evaluated in
2921 * the same loop over all cells, so we choose this simpler variant.
2924 * template <int dim>
2925 * double BoussinesqFlowProblem<dim>::get_entropy_variation(
2926 * const double average_temperature) const
2928 * if (parameters.stabilization_alpha != 2)
2931 * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 1);
2932 * const unsigned int n_q_points = quadrature_formula.size();
2934 * FEValues<dim> fe_values(temperature_fe,
2935 * quadrature_formula,
2936 * update_values | update_JxW_values);
2937 * std::vector<double> old_temperature_values(n_q_points);
2938 * std::vector<double> old_old_temperature_values(n_q_points);
2942 * In the two functions above we computed the maximum of numbers that were
2943 * all non-negative, so we knew that zero was certainly a lower bound. On
2944 * the other hand, here we need to find the maximum deviation from the
2945 * average value, i.e., we will need to know the maximal and minimal
2946 * values of the entropy for which we don't a priori know the
sign.
2950 * To compute it, we can therefore start with the largest and smallest
2951 * possible values we can store in a
double precision number: The minimum
2952 * is initialized with a bigger and the maximum with a smaller number than
2953 * any
one that is going to appear. We are then guaranteed that these
2955 * processor does not own any cells, in the communication step at the
2956 * latest. The following
loop then computes the minimum and maximum local
2957 * entropy as well as keeps track of the area/
volume of the part of the
2958 * domain we locally own and the integral over the entropy on it:
2963 * entropy_integrated = 0;
2965 *
for (
const auto &cell : temperature_dof_handler.active_cell_iterators())
2966 *
if (cell->is_locally_owned())
2968 * fe_values.reinit(cell);
2969 * fe_values.get_function_values(old_temperature_solution,
2970 * old_temperature_values);
2971 * fe_values.get_function_values(old_old_temperature_solution,
2972 * old_old_temperature_values);
2973 *
for (
unsigned int q = 0; q < n_q_points; ++q)
2976 * (old_temperature_values[q] + old_old_temperature_values[q]) / 2;
2977 *
const double entropy =
2978 * ((
T - average_temperature) * (
T - average_temperature));
2980 * min_entropy =
std::min(min_entropy, entropy);
2981 * max_entropy =
std::max(max_entropy, entropy);
2982 * area += fe_values.JxW(q);
2983 * entropy_integrated += fe_values.JxW(q) * entropy;
2989 * Now we only need to exchange data between processors: we need to
sum
2990 * the two integrals (<code>area</code>, <code>entropy_integrated</code>),
2991 * and get the extrema
for maximum and minimum. We could
do this through
2992 * four different data exchanges, but we can it with two:
2994 * values that are all to be summed up. And we can also utilize the
2996 * the minimal entropies equals forming the negative of the maximum over
2997 * the negative of the minimal entropies;
this maximum can then be
2998 * combined with forming the maximum over the maximal entropies.
3001 *
const double local_sums[2] = {entropy_integrated, area},
3002 * local_maxima[2] = {-min_entropy, max_entropy};
3003 *
double global_sums[2], global_maxima[2];
3010 * Having computed everything
this way, we can then compute the average
3011 * entropy and find the @f$L^\infty@f$
norm by taking the larger of the
3012 * deviation of the maximum or minimum from the average:
3015 *
const double average_entropy = global_sums[0] / global_sums[1];
3016 *
const double entropy_diff =
std::max(global_maxima[1] - average_entropy,
3017 * average_entropy - (-global_maxima[0]));
3018 *
return entropy_diff;
3026 * <a name=
"BoussinesqFlowProblemget_extrapolated_temperature_range"></a>
3027 * <h5>BoussinesqFlowProblem::get_extrapolated_temperature_range</h5>
3031 * The next
function computes the minimal and maximal
value of the
3032 * extrapolated temperature over the entire domain. Again,
this is only a
3033 * slightly modified version of the respective
function in @ref step_31
"step-31". As in
3034 * the
function above, we collect local minima and maxima and then compute
3035 * the global extrema
using the same trick as above.
3039 * As already discussed in @ref step_31
"step-31", the
function needs to distinguish
3040 * between the
first and all following time steps because it uses a higher
3041 * order temperature extrapolation scheme when at least two previous time
3042 * steps are available.
3045 *
template <
int dim>
3046 * std::pair<double, double>
3047 * BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range() const
3050 * parameters.temperature_degree);
3051 *
const unsigned int n_q_points = quadrature_formula.size();
3055 * quadrature_formula,
3057 * std::vector<double> old_temperature_values(n_q_points);
3058 * std::vector<double> old_old_temperature_values(n_q_points);
3063 *
if (timestep_number != 0)
3065 *
for (
const auto &cell : temperature_dof_handler.active_cell_iterators())
3066 *
if (cell->is_locally_owned())
3068 * fe_values.reinit(cell);
3069 * fe_values.get_function_values(old_temperature_solution,
3070 * old_temperature_values);
3071 * fe_values.get_function_values(old_old_temperature_solution,
3072 * old_old_temperature_values);
3074 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3076 *
const double temperature =
3077 * (1. + time_step / old_time_step) *
3078 * old_temperature_values[q] -
3079 * time_step / old_time_step * old_old_temperature_values[q];
3081 * min_local_temperature =
3082 *
std::min(min_local_temperature, temperature);
3083 * max_local_temperature =
3084 *
std::max(max_local_temperature, temperature);
3090 *
for (
const auto &cell : temperature_dof_handler.active_cell_iterators())
3091 *
if (cell->is_locally_owned())
3093 * fe_values.reinit(cell);
3094 * fe_values.get_function_values(old_temperature_solution,
3095 * old_temperature_values);
3097 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3099 *
const double temperature = old_temperature_values[q];
3101 * min_local_temperature =
3102 *
std::min(min_local_temperature, temperature);
3103 * max_local_temperature =
3104 *
std::max(max_local_temperature, temperature);
3109 *
double local_extrema[2] = {-min_local_temperature, max_local_temperature};
3110 *
double global_extrema[2];
3113 *
return std::make_pair(-global_extrema[0], global_extrema[1]);
3120 * <a name=
"BoussinesqFlowProblemcompute_viscosity"></a>
3121 * <h5>BoussinesqFlowProblem::compute_viscosity</h5>
3125 * The
function that calculates the viscosity is purely local and so needs
3126 * no communication at all. It is mostly the same as in @ref step_31
"step-31" but with an
3127 * updated formulation of the viscosity
if @f$\alpha=2@f$ is chosen:
3130 *
template <
int dim>
3131 *
double BoussinesqFlowProblem<dim>::compute_viscosity(
3132 *
const std::vector<double> & old_temperature,
3133 *
const std::vector<double> & old_old_temperature,
3136 *
const std::vector<double> & old_temperature_laplacians,
3137 *
const std::vector<double> & old_old_temperature_laplacians,
3142 *
const double global_u_infty,
3143 *
const double global_T_variation,
3144 *
const double average_temperature,
3145 *
const double global_entropy_variation,
3146 *
const double cell_diameter)
const
3148 *
if (global_u_infty == 0)
3149 *
return 5
e-3 * cell_diameter;
3151 *
const unsigned int n_q_points = old_temperature.size();
3153 *
double max_residual = 0;
3154 *
double max_velocity = 0;
3156 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3159 * (old_velocity_values[q] + old_old_velocity_values[q]) / 2;
3162 * (old_strain_rates[q] + old_old_strain_rates[q]) / 2;
3164 *
const double T = (old_temperature[q] + old_old_temperature[q]) / 2;
3165 *
const double dT_dt =
3166 * (old_temperature[q] - old_old_temperature[q]) / old_time_step;
3167 *
const double u_grad_T =
3168 * u * (old_temperature_grads[q] + old_old_temperature_grads[q]) / 2;
3170 *
const double kappa_Delta_T =
3171 * EquationData::kappa *
3172 * (old_temperature_laplacians[q] + old_old_temperature_laplacians[q]) /
3174 *
const double gamma =
3175 * ((EquationData::radiogenic_heating * EquationData::density(
T) +
3176 * 2 * EquationData::eta * strain_rate * strain_rate) /
3177 * (EquationData::density(
T) * EquationData::specific_heat));
3179 *
double residual =
std::abs(dT_dt + u_grad_T - kappa_Delta_T -
gamma);
3180 *
if (parameters.stabilization_alpha == 2)
3181 * residual *=
std::abs(
T - average_temperature);
3183 * max_residual =
std::max(residual, max_residual);
3184 * max_velocity =
std::max(std::sqrt(u * u), max_velocity);
3187 *
const double max_viscosity =
3188 * (parameters.stabilization_beta * max_velocity * cell_diameter);
3189 *
if (timestep_number == 0)
3190 *
return max_viscosity;
3195 *
double entropy_viscosity;
3196 *
if (parameters.stabilization_alpha == 2)
3197 * entropy_viscosity =
3198 * (parameters.stabilization_c_R * cell_diameter * cell_diameter *
3199 * max_residual / global_entropy_variation);
3201 * entropy_viscosity =
3202 * (parameters.stabilization_c_R * cell_diameter *
3203 * global_Omega_diameter * max_velocity * max_residual /
3204 * (global_u_infty * global_T_variation));
3206 *
return std::min(max_viscosity, entropy_viscosity);
3215 * <a name=
"TheBoussinesqFlowProblemsetupfunctions"></a>
3216 * <h4>The BoussinesqFlowProblem setup
functions</h4>
3221 *
for the Stokes preconditioner, and the temperature
matrix. The code is
3222 * mostly the same as in @ref step_31
"step-31", but it has been broken out into three
3223 *
functions of their own
for simplicity.
3227 * The main functional difference between the code here and that in @ref step_31
"step-31"
3228 * is that the matrices we want to
set up are distributed across multiple
3229 * processors. Since we still want to build up the sparsity pattern
first
3230 *
for efficiency reasons, we could
continue to build the <i>entire</i>
3232 * @ref step_31
"step-31". However, that would be inefficient: every processor would build
3233 * the same sparsity pattern, but only initialize a small part of the
matrix
3234 *
using it. It also violates the principle that every processor should only
3235 * work on those cells it owns (and,
if necessary the layer of ghost cells
3241 * which is (obviously) a wrapper around a sparsity pattern
object provided
3242 * by Trilinos. The advantage is that the Trilinos sparsity pattern
class
3243 * can communicate across multiple processors:
if this processor fills in
3244 * all the
nonzero entries that result from the cells it owns, and every
3245 * other processor does so as well, then at the
end after some MPI
3246 * communication initiated by the <code>
compress()</code>
call, we will have
3247 * the globally assembled sparsity pattern available with which the global
3248 *
matrix can be initialized.
3252 * There is
one important aspect when initializing Trilinos sparsity
3253 * patterns in
parallel: In addition to specifying the locally owned rows
3254 * and columns of the matrices via the @p stokes_partitioning index
set, we
3255 * also supply information about all the rows we are possibly going to write
3256 * into when assembling on a certain processor. The
set of locally relevant
3257 * rows contains all such rows (possibly also a few unnecessary ones, but it
3258 * is difficult to find the exact row indices before actually getting
3259 * indices on all cells and resolving constraints). This additional
3260 * information allows to exactly determine the structure
for the
3261 * off-processor data found during assembly. While Trilinos matrices are
3262 * able to collect
this information on the fly as well (when initializing
3263 * them from some other
reinit method), it is less efficient and leads to
3264 * problems when assembling matrices with multiple threads. In
this program,
3265 * we pessimistically assume that only
one processor at a time can write
3266 * into the
matrix while assembly (whereas the computation is
parallel),
3267 * which is fine
for Trilinos matrices. In practice,
one can
do better by
3269 * parallelism among those cells (see the graph coloring algorithms and
3270 *
WorkStream with colored iterators argument). However, that only works
3271 * when only
one MPI processor is present because Trilinos
' internal data
3272 * structures for accumulating off-processor data on the fly are not thread
3273 * safe. With the initialization presented here, there is no such problem
3274 * and one could safely introduce graph coloring for this algorithm.
3278 * The only other change we need to make is to tell the
3279 * DoFTools::make_sparsity_pattern() function that it is only supposed to
3280 * work on a subset of cells, namely the ones whose
3281 * <code>subdomain_id</code> equals the number of the current processor, and
3282 * to ignore all other cells.
3286 * This strategy is replicated across all three of the following functions.
3290 * Note that Trilinos matrices store the information contained in the
3291 * sparsity patterns, so we can safely release the <code>sp</code> variable
3292 * once the matrix has been given the sparsity structure.
3295 * template <int dim>
3296 * void BoussinesqFlowProblem<dim>::setup_stokes_matrix(
3297 * const std::vector<IndexSet> &stokes_partitioning,
3298 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3300 * stokes_matrix.clear();
3302 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3303 * stokes_partitioning,
3304 * stokes_relevant_partitioning,
3307 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3308 * for (unsigned int c = 0; c < dim + 1; ++c)
3309 * for (unsigned int d = 0; d < dim + 1; ++d)
3310 * if (!((c == dim) && (d == dim)))
3311 * coupling[c][d] = DoFTools::always;
3313 * coupling[c][d] = DoFTools::none;
3315 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3318 * stokes_constraints,
3320 * Utilities::MPI::this_mpi_process(
3324 * stokes_matrix.reinit(sp);
3329 * template <int dim>
3330 * void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner(
3331 * const std::vector<IndexSet> &stokes_partitioning,
3332 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3334 * Amg_preconditioner.reset();
3335 * Mp_preconditioner.reset();
3337 * stokes_preconditioner_matrix.clear();
3339 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3340 * stokes_partitioning,
3341 * stokes_relevant_partitioning,
3344 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3345 * for (unsigned int c = 0; c < dim + 1; ++c)
3346 * for (unsigned int d = 0; d < dim + 1; ++d)
3348 * coupling[c][d] = DoFTools::always;
3350 * coupling[c][d] = DoFTools::none;
3352 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3355 * stokes_constraints,
3357 * Utilities::MPI::this_mpi_process(
3361 * stokes_preconditioner_matrix.reinit(sp);
3365 * template <int dim>
3366 * void BoussinesqFlowProblem<dim>::setup_temperature_matrices(
3367 * const IndexSet &temperature_partitioner,
3368 * const IndexSet &temperature_relevant_partitioner)
3370 * T_preconditioner.reset();
3371 * temperature_mass_matrix.clear();
3372 * temperature_stiffness_matrix.clear();
3373 * temperature_matrix.clear();
3375 * TrilinosWrappers::SparsityPattern sp(temperature_partitioner,
3376 * temperature_partitioner,
3377 * temperature_relevant_partitioner,
3379 * DoFTools::make_sparsity_pattern(temperature_dof_handler,
3381 * temperature_constraints,
3383 * Utilities::MPI::this_mpi_process(
3387 * temperature_matrix.reinit(sp);
3388 * temperature_mass_matrix.reinit(sp);
3389 * temperature_stiffness_matrix.reinit(sp);
3396 * The remainder of the setup function (after splitting out the three
3397 * functions above) mostly has to deal with the things we need to do for
3398 * parallelization across processors. Because setting all of this up is a
3399 * significant compute time expense of the program, we put everything we do
3400 * here into a timer group so that we can get summary information about the
3401 * fraction of time spent in this part of the program at its end.
3405 * At the top as usual we enumerate degrees of freedom and sort them by
3406 * component/block, followed by writing their numbers to the screen from
3407 * processor zero. The DoFHandler::distributed_dofs() function, when applied
3408 * to a parallel::distributed::Triangulation object, sorts degrees of
3409 * freedom in such a way that all degrees of freedom associated with
3410 * subdomain zero come before all those associated with subdomain one,
3411 * etc. For the Stokes part, this entails, however, that velocities and
3412 * pressures become intermixed, but this is trivially solved by sorting
3413 * again by blocks; it is worth noting that this latter operation leaves the
3414 * relative ordering of all velocities and pressures alone, i.e. within the
3415 * velocity block we will still have all those associated with subdomain
3416 * zero before all velocities associated with subdomain one, etc. This is
3417 * important since we store each of the blocks of this matrix distributed
3418 * across all processors and want this to be done in such a way that each
3419 * processor stores that part of the matrix that is roughly equal to the
3420 * degrees of freedom located on those cells that it will actually work on.
3424 * When printing the numbers of degrees of freedom, note that these numbers
3425 * are going to be large if we use many processors. Consequently, we let the
3426 * stream put a comma separator in between every three digits. The state of
3427 * the stream, using the locale, is saved from before to after this
3428 * operation. While slightly opaque, the code works because the default
3429 * locale (which we get using the constructor call
3430 * <code>std::locale("")</code>) implies printing numbers with a comma
3431 * separator for every third digit (i.e., thousands, millions, billions).
3435 * In this function as well as many below, we measure how much time
3436 * we spend here and collect that in a section called "Setup dof
3437 * systems" across function invocations. This is done using an
3438 * TimerOutput::Scope object that gets a timer going in the section
3439 * with above name of the `computing_timer` object upon construction
3440 * of the local variable; the timer is stopped again when the
3441 * destructor of the `timing_section` variable is called. This, of
3442 * course, happens either at the end of the function, or if we leave
3443 * the function through a `return` statement or when an exception is
3444 * thrown somewhere -- in other words, whenever we leave this
3445 * function in any way. The use of such "scope" objects therefore
3446 * makes sure that we do not have to manually add code that tells
3447 * the timer to stop at every location where this function may be
3451 * template <int dim>
3452 * void BoussinesqFlowProblem<dim>::setup_dofs()
3454 * TimerOutput::Scope timing_section(computing_timer, "Setup dof systems");
3456 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
3457 * stokes_sub_blocks[dim] = 1;
3458 * stokes_dof_handler.distribute_dofs(stokes_fe);
3459 * DoFRenumbering::component_wise(stokes_dof_handler, stokes_sub_blocks);
3461 * temperature_dof_handler.distribute_dofs(temperature_fe);
3463 * const std::vector<types::global_dof_index> stokes_dofs_per_block =
3464 * DoFTools::count_dofs_per_fe_block(stokes_dof_handler, stokes_sub_blocks);
3466 * const unsigned int n_u = stokes_dofs_per_block[0],
3467 * n_p = stokes_dofs_per_block[1],
3468 * n_T = temperature_dof_handler.n_dofs();
3470 * std::locale s = pcout.get_stream().getloc();
3471 * pcout.get_stream().imbue(std::locale(""));
3472 * pcout << "Number of active cells: " << triangulation.n_global_active_cells()
3473 * << " (on " << triangulation.n_levels() << " levels)" << std::endl
3474 * << "Number of degrees of freedom: " << n_u + n_p + n_T << " (" << n_u
3475 * << '+
' << n_p << '+
' << n_T << ')
' << std::endl
3477 * pcout.get_stream().imbue(s);
3482 * After this, we have to set up the various partitioners (of type
3483 * <code>IndexSet</code>, see the introduction) that describe which parts
3484 * of each matrix or vector will be stored where, then call the functions
3485 * that actually set up the matrices, and at the end also resize the
3486 * various vectors we keep around in this program.
3489 * std::vector<IndexSet> stokes_partitioning, stokes_relevant_partitioning;
3490 * IndexSet temperature_partitioning(n_T),
3491 * temperature_relevant_partitioning(n_T);
3492 * IndexSet stokes_relevant_set;
3494 * IndexSet stokes_index_set = stokes_dof_handler.locally_owned_dofs();
3495 * stokes_partitioning.push_back(stokes_index_set.get_view(0, n_u));
3496 * stokes_partitioning.push_back(stokes_index_set.get_view(n_u, n_u + n_p));
3498 * DoFTools::extract_locally_relevant_dofs(stokes_dof_handler,
3499 * stokes_relevant_set);
3500 * stokes_relevant_partitioning.push_back(
3501 * stokes_relevant_set.get_view(0, n_u));
3502 * stokes_relevant_partitioning.push_back(
3503 * stokes_relevant_set.get_view(n_u, n_u + n_p));
3505 * temperature_partitioning = temperature_dof_handler.locally_owned_dofs();
3506 * DoFTools::extract_locally_relevant_dofs(
3507 * temperature_dof_handler, temperature_relevant_partitioning);
3512 * Following this, we can compute constraints for the solution vectors,
3513 * including hanging node constraints and homogeneous and inhomogeneous
3514 * boundary values for the Stokes and temperature fields. Note that as for
3515 * everything else, the constraint objects can not hold <i>all</i>
3516 * constraints on every processor. Rather, each processor needs to store
3517 * only those that are actually necessary for correctness given that it
3518 * only assembles linear systems on cells it owns. As discussed in the
3519 * @ref distributed_paper "this paper", the set of constraints we need to
3520 * know about is exactly the set of constraints on all locally relevant
3521 * degrees of freedom, so this is what we use to initialize the constraint
3526 * stokes_constraints.clear();
3527 * stokes_constraints.reinit(stokes_relevant_set);
3529 * DoFTools::make_hanging_node_constraints(stokes_dof_handler,
3530 * stokes_constraints);
3532 * FEValuesExtractors::Vector velocity_components(0);
3533 * VectorTools::interpolate_boundary_values(
3534 * stokes_dof_handler,
3536 * Functions::ZeroFunction<dim>(dim + 1),
3537 * stokes_constraints,
3538 * stokes_fe.component_mask(velocity_components));
3540 * std::set<types::boundary_id> no_normal_flux_boundaries;
3541 * no_normal_flux_boundaries.insert(1);
3542 * VectorTools::compute_no_normal_flux_constraints(stokes_dof_handler,
3544 * no_normal_flux_boundaries,
3545 * stokes_constraints,
3547 * stokes_constraints.close();
3550 * temperature_constraints.clear();
3551 * temperature_constraints.reinit(temperature_relevant_partitioning);
3553 * DoFTools::make_hanging_node_constraints(temperature_dof_handler,
3554 * temperature_constraints);
3555 * VectorTools::interpolate_boundary_values(
3556 * temperature_dof_handler,
3558 * EquationData::TemperatureInitialValues<dim>(),
3559 * temperature_constraints);
3560 * VectorTools::interpolate_boundary_values(
3561 * temperature_dof_handler,
3563 * EquationData::TemperatureInitialValues<dim>(),
3564 * temperature_constraints);
3565 * temperature_constraints.close();
3570 * All this done, we can then initialize the various matrix and vector
3571 * objects to their proper sizes. At the end, we also record that all
3572 * matrices and preconditioners have to be re-computed at the beginning of
3573 * the next time step. Note how we initialize the vectors for the Stokes
3574 * and temperature right hand sides: These are writable vectors (last
3575 * boolean argument set to @p true) that have the correct one-to-one
3576 * partitioning of locally owned elements but are still given the relevant
3577 * partitioning for means of figuring out the vector entries that are
3578 * going to be set right away. As for matrices, this allows for writing
3579 * local contributions into the vector with multiple threads (always
3580 * assuming that the same vector entry is not accessed by multiple threads
3581 * at the same time). The other vectors only allow for read access of
3582 * individual elements, including ghosts, but are not suitable for
3586 * setup_stokes_matrix(stokes_partitioning, stokes_relevant_partitioning);
3587 * setup_stokes_preconditioner(stokes_partitioning,
3588 * stokes_relevant_partitioning);
3589 * setup_temperature_matrices(temperature_partitioning,
3590 * temperature_relevant_partitioning);
3592 * stokes_rhs.reinit(stokes_partitioning,
3593 * stokes_relevant_partitioning,
3596 * stokes_solution.reinit(stokes_relevant_partitioning, MPI_COMM_WORLD);
3597 * old_stokes_solution.reinit(stokes_solution);
3599 * temperature_rhs.reinit(temperature_partitioning,
3600 * temperature_relevant_partitioning,
3603 * temperature_solution.reinit(temperature_relevant_partitioning,
3605 * old_temperature_solution.reinit(temperature_solution);
3606 * old_old_temperature_solution.reinit(temperature_solution);
3608 * rebuild_stokes_matrix = true;
3609 * rebuild_stokes_preconditioner = true;
3610 * rebuild_temperature_matrices = true;
3611 * rebuild_temperature_preconditioner = true;
3619 * <a name="TheBoussinesqFlowProblemassemblyfunctions"></a>
3620 * <h4>The BoussinesqFlowProblem assembly functions</h4>
3624 * Following the discussion in the introduction and in the @ref threads
3625 * module, we split the assembly functions into different parts:
3629 * <ul> <li> The local calculations of matrices and right hand sides, given
3630 * a certain cell as input (these functions are named
3631 * <code>local_assemble_*</code> below). The resulting function is, in other
3632 * words, essentially the body of the loop over all cells in @ref step_31 "step-31". Note,
3633 * however, that these functions store the result from the local
3634 * calculations in variables of classes from the CopyData namespace.
3638 * <li>These objects are then given to the second step which writes the
3639 * local data into the global data structures (these functions are named
3640 * <code>copy_local_to_global_*</code> below). These functions are pretty
3645 * <li>These two subfunctions are then used in the respective assembly
3646 * routine (called <code>assemble_*</code> below), where a WorkStream object
3647 * is set up and runs over all the cells that belong to the processor's
3653 * <a name=
"Stokespreconditionerassembly"></a>
3654 * <h5>Stokes preconditioner assembly</h5>
3658 * Let us start with the
functions that builds the Stokes
3659 * preconditioner. The
first two of these are pretty trivial, given the
3660 * discussion above. Note in particular that the main
point in
using the
3661 * scratch data
object is that we want to avoid allocating any objects on
3662 * the
free space each time we visit a
new cell. As a consequence, the
3663 * assembly
function below only has automatic local variables, and
3664 * everything
else is accessed through the scratch data
object, which is
3665 * allocated only once before we start the
loop over all cells:
3668 * template <int dim>
3669 *
void BoussinesqFlowProblem<dim>::local_assemble_stokes_preconditioner(
3671 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3672 * Assembly::CopyData::StokesPreconditioner<dim> & data)
3674 *
const unsigned int dofs_per_cell = stokes_fe.dofs_per_cell;
3675 *
const unsigned int n_q_points =
3676 * scratch.stokes_fe_values.n_quadrature_points;
3681 * scratch.stokes_fe_values.reinit(cell);
3682 * cell->get_dof_indices(data.local_dof_indices);
3684 * data.local_matrix = 0;
3686 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3688 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
3690 * scratch.grad_phi_u[k] =
3691 * scratch.stokes_fe_values[velocities].gradient(k, q);
3692 * scratch.phi_p[k] = scratch.stokes_fe_values[pressure].value(k, q);
3695 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3696 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
3697 * data.local_matrix(i, j) +=
3698 * (EquationData::eta *
3700 * (1. / EquationData::eta) * EquationData::pressure_scaling *
3701 * EquationData::pressure_scaling *
3702 * (scratch.phi_p[i] * scratch.phi_p[j])) *
3703 * scratch.stokes_fe_values.JxW(q);
3709 *
template <
int dim>
3710 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_preconditioner(
3711 *
const Assembly::CopyData::StokesPreconditioner<dim> &data)
3713 * stokes_constraints.distribute_local_to_global(data.local_matrix,
3714 * data.local_dof_indices,
3715 * stokes_preconditioner_matrix);
3721 * Now
for the
function that actually puts things together,
using the
3723 * enumerate the cells it is supposed to work on. Typically,
one would use
3725 * actually only want the subset of cells that in fact are owned by the
3727 * play: you give it a range of cells and it provides an iterator that only
3728 * iterates over that subset of cells that satisfy a certain predicate (a
3729 * predicate is a function of
one argument that either returns true or
3730 * false). The predicate we use here is
IteratorFilters::LocallyOwnedCell,
3731 * i.
e., it returns true exactly if the cell is owned by the current
3732 * processor. The resulting iterator range is then exactly what we need.
3737 * function with this
set of cells, scratch and
copy objects, and
3738 * with pointers to two
functions: the local assembly and
3739 *
copy-local-to-global function. These
functions need to have very
3740 * specific signatures: three arguments in the
first and
one
3741 * argument in the latter case (see the documentation of the
3742 *
WorkStream::
run function for the meaning of these arguments).
3744 * create a function
object that satisfies this requirement. It uses
3745 * function arguments for the local assembly function that specify
3746 * cell, scratch data, and
copy data, as well as function argument
3747 * for the
copy function that expects the
3748 * data to be written into the global
matrix (also see the discussion in
3749 * @ref step_13 "step-13"'s <code>assemble_linear_system()</code> function). On the other
3750 * hand, the implicit zeroth argument of member
functions (namely
3751 * the <code>this</code> pointer of the
object on which that member
3752 * function is to operate on) is <i>bound</i> to the
3753 * <code>this</code> pointer of the current function and is captured. The
3754 *
WorkStream::
run function, as a consequence, does not need to know
3755 * anything about the
object these
functions work on.
3759 * When the
WorkStream is executed, it will create several local assembly
3760 * routines of the
first kind for several cells and let some available
3761 * processors work on them. The function that needs to be synchronized,
3762 * i.
e., the write operation into the global
matrix, however, is executed by
3763 * only
one thread at a time in the prescribed order. Of course, this only
3764 * holds for the parallelization on a single MPI process. Different MPI
3765 * processes will have their own
WorkStream objects and do that work
3766 * completely independently (and in different memory spaces). In a
3767 * distributed calculation, some data will accumulate at degrees of freedom
3768 * that are not owned by the respective processor. It would be inefficient
3769 * to send data around every time we encounter such a dof. What happens
3770 * instead is that the Trilinos sparse
matrix will keep that data and send
3771 * it to the owner at the
end of assembly, by calling the
3775 * template <
int dim>
3776 *
void BoussinesqFlowProblem<dim>::assemble_stokes_preconditioner()
3778 * stokes_preconditioner_matrix = 0;
3780 *
const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3782 *
using CellFilter =
3787 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3788 * Assembly::CopyData::StokesPreconditioner<dim> & data) {
3789 * this->local_assemble_stokes_preconditioner(cell, scratch, data);
3793 * [
this](
const Assembly::CopyData::StokesPreconditioner<dim> &data) {
3794 * this->copy_local_to_global_stokes_preconditioner(data);
3798 * stokes_dof_handler.begin_active()),
3800 * stokes_dof_handler.end()),
3803 * Assembly::Scratch::StokesPreconditioner<dim>(
3805 * quadrature_formula,
3808 * Assembly::CopyData::StokesPreconditioner<dim>(stokes_fe));
3817 * The
final function in
this block initiates assembly of the Stokes
3818 * preconditioner
matrix and then in fact builds the Stokes
3819 * preconditioner. It is mostly the same as in the serial
case. The only
3820 * difference to @ref step_31
"step-31" is that we use a Jacobi preconditioner
for the
3821 * pressure mass
matrix instead of IC, as discussed in the introduction.
3824 *
template <
int dim>
3825 *
void BoussinesqFlowProblem<dim>::build_stokes_preconditioner()
3827 *
if (rebuild_stokes_preconditioner ==
false)
3831 *
" Build Stokes preconditioner");
3832 * pcout <<
" Rebuilding Stokes preconditioner..." << std::flush;
3834 * assemble_stokes_preconditioner();
3836 * std::vector<std::vector<bool>> constant_modes;
3839 * stokes_fe.component_mask(
3840 * velocity_components),
3843 * Mp_preconditioner =
3844 * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
3845 * Amg_preconditioner = std::make_shared<TrilinosWrappers::PreconditionAMG>();
3854 * Mp_preconditioner->initialize(stokes_preconditioner_matrix.block(1, 1));
3855 * Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0, 0),
3858 * rebuild_stokes_preconditioner =
false;
3860 * pcout << std::endl;
3867 * <a name=
"Stokessystemassembly"></a>
3868 * <h5>Stokes system assembly</h5>
3872 * The next three
functions implement the assembly of the Stokes system,
3873 * again
split up into a part performing local calculations,
one for writing
3874 * the local data into the global
matrix and vector, and
one for actually
3875 * running the
loop over all cells with the help of the
WorkStream
3876 *
class. Note that the assembly of the Stokes
matrix needs only to be done
3877 * in
case we have changed the mesh. Otherwise, just the
3878 * (temperature-dependent) right hand side needs to be calculated
3879 * here. Since we are working with distributed matrices and vectors, we have
3881 * the assembly in order to send non-local data to the owner process.
3884 *
template <
int dim>
3885 *
void BoussinesqFlowProblem<dim>::local_assemble_stokes_system(
3887 * Assembly::Scratch::StokesSystem<dim> & scratch,
3888 * Assembly::CopyData::StokesSystem<dim> & data)
3890 *
const unsigned int dofs_per_cell =
3892 *
const unsigned int n_q_points =
3893 * scratch.stokes_fe_values.n_quadrature_points;
3898 * scratch.stokes_fe_values.reinit(cell);
3901 * &
triangulation, cell->level(), cell->index(), &temperature_dof_handler);
3902 * scratch.temperature_fe_values.reinit(temperature_cell);
3904 *
if (rebuild_stokes_matrix)
3905 * data.local_matrix = 0;
3906 * data.local_rhs = 0;
3908 * scratch.temperature_fe_values.get_function_values(
3909 * old_temperature_solution, scratch.old_temperature_values);
3911 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3913 *
const double old_temperature = scratch.old_temperature_values[q];
3915 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
3917 * scratch.phi_u[k] = scratch.stokes_fe_values[velocities].value(k, q);
3918 *
if (rebuild_stokes_matrix)
3920 * scratch.grads_phi_u[k] =
3921 * scratch.stokes_fe_values[velocities].symmetric_gradient(k, q);
3922 * scratch.div_phi_u[k] =
3923 * scratch.stokes_fe_values[velocities].divergence(k, q);
3924 * scratch.phi_p[k] =
3925 * scratch.stokes_fe_values[pressure].value(k, q);
3929 *
if (rebuild_stokes_matrix ==
true)
3930 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3931 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
3932 * data.local_matrix(i, j) +=
3933 * (EquationData::eta * 2 *
3934 * (scratch.grads_phi_u[i] * scratch.grads_phi_u[j]) -
3935 * (EquationData::pressure_scaling * scratch.div_phi_u[i] *
3936 * scratch.phi_p[j]) -
3937 * (EquationData::pressure_scaling * scratch.phi_p[i] *
3938 * scratch.div_phi_u[j])) *
3939 * scratch.stokes_fe_values.JxW(q);
3942 * scratch.stokes_fe_values.quadrature_point(q));
3944 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3945 * data.local_rhs(i) += (EquationData::density(old_temperature) *
3946 * gravity * scratch.phi_u[i]) *
3947 * scratch.stokes_fe_values.JxW(q);
3950 * cell->get_dof_indices(data.local_dof_indices);
3955 *
template <
int dim>
3956 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_system(
3957 *
const Assembly::CopyData::StokesSystem<dim> &data)
3959 *
if (rebuild_stokes_matrix ==
true)
3960 * stokes_constraints.distribute_local_to_global(data.local_matrix,
3962 * data.local_dof_indices,
3966 * stokes_constraints.distribute_local_to_global(data.local_rhs,
3967 * data.local_dof_indices,
3973 *
template <
int dim>
3974 *
void BoussinesqFlowProblem<dim>::assemble_stokes_system()
3977 *
" Assemble Stokes system");
3979 *
if (rebuild_stokes_matrix ==
true)
3980 * stokes_matrix = 0;
3984 *
const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3986 *
using CellFilter =
3991 * stokes_dof_handler.begin_active()),
3994 * Assembly::Scratch::StokesSystem<dim> & scratch,
3995 * Assembly::CopyData::StokesSystem<dim> & data) {
3996 * this->local_assemble_stokes_system(cell, scratch, data);
3998 * [
this](
const Assembly::CopyData::StokesSystem<dim> &data) {
3999 * this->copy_local_to_global_stokes_system(data);
4001 * Assembly::Scratch::StokesSystem<dim>(
4004 * quadrature_formula,
4009 * Assembly::CopyData::StokesSystem<dim>(stokes_fe));
4011 *
if (rebuild_stokes_matrix ==
true)
4015 * rebuild_stokes_matrix =
false;
4017 * pcout << std::endl;
4024 * <a name=
"Temperaturematrixassembly"></a>
4025 * <h5>Temperature
matrix assembly</h5>
4029 * The task to be performed by the next three
functions is to calculate a
4030 * mass
matrix and a Laplace
matrix on the temperature system. These will be
4031 * combined in order to yield the semi-implicit time stepping
matrix that
4032 * consists of the mass
matrix plus a time step-dependent weight factor
4033 * times the Laplace
matrix. This
function is again essentially the body of
4034 * the
loop over all cells from @ref step_31
"step-31".
4038 * The two following
functions perform similar services as the ones above.
4041 *
template <
int dim>
4042 *
void BoussinesqFlowProblem<dim>::local_assemble_temperature_matrix(
4044 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4045 * Assembly::CopyData::TemperatureMatrix<dim> & data)
4047 *
const unsigned int dofs_per_cell =
4048 * scratch.temperature_fe_values.get_fe().dofs_per_cell;
4049 *
const unsigned int n_q_points =
4050 * scratch.temperature_fe_values.n_quadrature_points;
4052 * scratch.temperature_fe_values.reinit(cell);
4053 * cell->get_dof_indices(data.local_dof_indices);
4055 * data.local_mass_matrix = 0;
4056 * data.local_stiffness_matrix = 0;
4058 *
for (
unsigned int q = 0; q < n_q_points; ++q)
4060 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
4062 * scratch.grad_phi_T[k] =
4063 * scratch.temperature_fe_values.shape_grad(k, q);
4064 * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4067 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
4068 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
4070 * data.local_mass_matrix(i, j) +=
4071 * (scratch.phi_T[i] * scratch.phi_T[j] *
4072 * scratch.temperature_fe_values.JxW(q));
4073 * data.local_stiffness_matrix(i, j) +=
4074 * (EquationData::kappa * scratch.grad_phi_T[i] *
4075 * scratch.grad_phi_T[j] * scratch.temperature_fe_values.JxW(q));
4082 *
template <
int dim>
4083 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_matrix(
4084 *
const Assembly::CopyData::TemperatureMatrix<dim> &data)
4086 * temperature_constraints.distribute_local_to_global(data.local_mass_matrix,
4087 * data.local_dof_indices,
4088 * temperature_mass_matrix);
4089 * temperature_constraints.distribute_local_to_global(
4090 * data.local_stiffness_matrix,
4091 * data.local_dof_indices,
4092 * temperature_stiffness_matrix);
4096 *
template <
int dim>
4097 *
void BoussinesqFlowProblem<dim>::assemble_temperature_matrix()
4099 *
if (rebuild_temperature_matrices ==
false)
4103 *
" Assemble temperature matrices");
4104 * temperature_mass_matrix = 0;
4105 * temperature_stiffness_matrix = 0;
4107 *
const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4109 *
using CellFilter =
4114 * temperature_dof_handler.begin_active()),
4116 * temperature_dof_handler.end()),
4118 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4119 * Assembly::CopyData::TemperatureMatrix<dim> & data) {
4120 * this->local_assemble_temperature_matrix(cell, scratch, data);
4122 * [
this](
const Assembly::CopyData::TemperatureMatrix<dim> &data) {
4123 * this->copy_local_to_global_temperature_matrix(data);
4125 * Assembly::Scratch::TemperatureMatrix<dim>(temperature_fe,
4127 * quadrature_formula),
4128 * Assembly::CopyData::TemperatureMatrix<dim>(temperature_fe));
4133 * rebuild_temperature_matrices =
false;
4134 * rebuild_temperature_preconditioner =
true;
4141 * <a name=
"Temperaturerighthandsideassembly"></a>
4142 * <h5>Temperature right hand side assembly</h5>
4146 * This is the last assembly
function. It calculates the right hand side of
4147 * the temperature system, which includes the convection and the
4148 * stabilization terms. It includes a lot of evaluations of old solutions at
4149 * the quadrature points (which are necessary
for calculating the artificial
4150 * viscosity of stabilization), but is otherwise similar to the other
4151 * assembly
functions. Notice, once again, how we resolve the dilemma of
4152 * having inhomogeneous boundary conditions, by just making a right hand
4153 * side at
this point (compare the comments
for the <code>
project()</code>
4154 *
function above): We create some
matrix columns with exactly the values
4155 * that would be entered for the temperature stiffness
matrix, in case we
4156 * have inhomogeneously constrained dofs. That will account for the correct
4157 * balance of the right hand side vector with the
matrix system of
4161 * template <
int dim>
4162 * void BoussinesqFlowProblem<dim>::local_assemble_temperature_rhs(
4164 * const
double global_max_velocity,
4165 * const
double global_entropy_variation,
4166 * const typename
DoFHandler<dim>::active_cell_iterator &cell,
4167 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4168 * Assembly::CopyData::TemperatureRHS<dim> & data)
4170 *
const bool use_bdf2_scheme = (timestep_number != 0);
4172 *
const unsigned int dofs_per_cell =
4173 * scratch.temperature_fe_values.get_fe().dofs_per_cell;
4174 *
const unsigned int n_q_points =
4175 * scratch.temperature_fe_values.n_quadrature_points;
4179 * data.local_rhs = 0;
4180 * data.matrix_for_bc = 0;
4181 * cell->get_dof_indices(data.local_dof_indices);
4183 * scratch.temperature_fe_values.reinit(cell);
4186 * &
triangulation, cell->level(), cell->index(), &stokes_dof_handler);
4187 * scratch.stokes_fe_values.reinit(stokes_cell);
4189 * scratch.temperature_fe_values.get_function_values(
4190 * old_temperature_solution, scratch.old_temperature_values);
4191 * scratch.temperature_fe_values.get_function_values(
4192 * old_old_temperature_solution, scratch.old_old_temperature_values);
4194 * scratch.temperature_fe_values.get_function_gradients(
4195 * old_temperature_solution, scratch.old_temperature_grads);
4196 * scratch.temperature_fe_values.get_function_gradients(
4197 * old_old_temperature_solution, scratch.old_old_temperature_grads);
4199 * scratch.temperature_fe_values.get_function_laplacians(
4200 * old_temperature_solution, scratch.old_temperature_laplacians);
4201 * scratch.temperature_fe_values.get_function_laplacians(
4202 * old_old_temperature_solution, scratch.old_old_temperature_laplacians);
4204 * scratch.stokes_fe_values[velocities].get_function_values(
4205 * stokes_solution, scratch.old_velocity_values);
4206 * scratch.stokes_fe_values[velocities].get_function_values(
4207 * old_stokes_solution, scratch.old_old_velocity_values);
4208 * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4209 * stokes_solution, scratch.old_strain_rates);
4210 * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4211 * old_stokes_solution, scratch.old_old_strain_rates);
4214 * compute_viscosity(scratch.old_temperature_values,
4215 * scratch.old_old_temperature_values,
4216 * scratch.old_temperature_grads,
4217 * scratch.old_old_temperature_grads,
4218 * scratch.old_temperature_laplacians,
4219 * scratch.old_old_temperature_laplacians,
4220 * scratch.old_velocity_values,
4221 * scratch.old_old_velocity_values,
4222 * scratch.old_strain_rates,
4223 * scratch.old_old_strain_rates,
4224 * global_max_velocity,
4225 * global_T_range.second - global_T_range.first,
4226 * 0.5 * (global_T_range.second + global_T_range.first),
4227 * global_entropy_variation,
4228 * cell->diameter());
4230 *
for (
unsigned int q = 0; q < n_q_points; ++q)
4232 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
4234 * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4235 * scratch.grad_phi_T[k] =
4236 * scratch.temperature_fe_values.shape_grad(k, q);
4240 *
const double T_term_for_rhs =
4241 * (use_bdf2_scheme ?
4242 * (scratch.old_temperature_values[q] *
4243 * (1 + time_step / old_time_step) -
4244 * scratch.old_old_temperature_values[q] * (time_step * time_step) /
4245 * (old_time_step * (time_step + old_time_step))) :
4246 * scratch.old_temperature_values[q]);
4248 *
const double ext_T =
4249 * (use_bdf2_scheme ? (scratch.old_temperature_values[q] *
4250 * (1 + time_step / old_time_step) -
4251 * scratch.old_old_temperature_values[q] *
4252 * time_step / old_time_step) :
4253 * scratch.old_temperature_values[q]);
4256 * (use_bdf2_scheme ? (scratch.old_temperature_grads[q] *
4257 * (1 + time_step / old_time_step) -
4258 * scratch.old_old_temperature_grads[q] * time_step /
4260 * scratch.old_temperature_grads[q]);
4263 * (use_bdf2_scheme ?
4264 * (scratch.old_velocity_values[q] * (1 + time_step / old_time_step) -
4265 * scratch.old_old_velocity_values[q] * time_step / old_time_step) :
4266 * scratch.old_velocity_values[q]);
4269 * (use_bdf2_scheme ?
4270 * (scratch.old_strain_rates[q] * (1 + time_step / old_time_step) -
4271 * scratch.old_old_strain_rates[q] * time_step / old_time_step) :
4272 * scratch.old_strain_rates[q]);
4274 *
const double gamma =
4275 * ((EquationData::radiogenic_heating * EquationData::density(ext_T) +
4276 * 2 * EquationData::eta * extrapolated_strain_rate *
4277 * extrapolated_strain_rate) /
4278 * (EquationData::density(ext_T) * EquationData::specific_heat));
4280 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
4282 * data.local_rhs(i) +=
4283 * (T_term_for_rhs * scratch.phi_T[i] -
4284 * time_step * extrapolated_u * ext_grad_T * scratch.phi_T[i] -
4285 * time_step * nu * ext_grad_T * scratch.grad_phi_T[i] +
4286 * time_step *
gamma * scratch.phi_T[i]) *
4287 * scratch.temperature_fe_values.JxW(q);
4289 *
if (temperature_constraints.is_inhomogeneously_constrained(
4290 * data.local_dof_indices[i]))
4292 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
4293 * data.matrix_for_bc(j, i) +=
4294 * (scratch.phi_T[i] * scratch.phi_T[j] *
4295 * (use_bdf2_scheme ? ((2 * time_step + old_time_step) /
4296 * (time_step + old_time_step)) :
4298 * scratch.grad_phi_T[i] * scratch.grad_phi_T[j] *
4299 * EquationData::kappa * time_step) *
4300 * scratch.temperature_fe_values.JxW(q);
4307 *
template <
int dim>
4308 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_rhs(
4309 *
const Assembly::CopyData::TemperatureRHS<dim> &data)
4311 * temperature_constraints.distribute_local_to_global(data.local_rhs,
4312 * data.local_dof_indices,
4314 * data.matrix_for_bc);
4321 * In the
function that runs the
WorkStream for actually calculating the
4322 * right hand side, we also generate the
final matrix. As mentioned above,
4323 * it is a
sum of the mass
matrix and the Laplace
matrix, times some time
4324 * step-dependent weight. This weight is specified by the BDF-2 time
4325 * integration scheme, see the introduction in @ref step_31
"step-31". What is
new in
this
4326 * tutorial program (in addition to the use of MPI parallelization and the
4327 *
WorkStream class), is that we now precompute the temperature
4328 * preconditioner as well. The reason is that the setup of the Jacobi
4329 * preconditioner takes a noticeable time compared to the solver because we
4330 * usually only need between 10 and 20 iterations
for solving the
4331 * temperature system (
this might sound strange, as Jacobi really only
4332 * consists of a
diagonal, but in Trilinos it is derived from more
general
4333 * framework
for point relaxation preconditioners which is a bit
4334 * inefficient). Hence, it is more efficient to precompute the
4335 * preconditioner, even though the
matrix entries may slightly change
4336 * because the time step might change. This is not too big a problem because
4337 * we
remesh every few time steps (and regenerate the preconditioner then).
4340 *
template <
int dim>
4341 *
void BoussinesqFlowProblem<dim>::assemble_temperature_system(
4342 *
const double maximal_velocity)
4344 *
const bool use_bdf2_scheme = (timestep_number != 0);
4346 *
if (use_bdf2_scheme ==
true)
4348 * temperature_matrix.copy_from(temperature_mass_matrix);
4349 * temperature_matrix *=
4350 * (2 * time_step + old_time_step) / (time_step + old_time_step);
4351 * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4355 * temperature_matrix.copy_from(temperature_mass_matrix);
4356 * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4359 *
if (rebuild_temperature_preconditioner ==
true)
4361 * T_preconditioner =
4362 * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
4363 * T_preconditioner->initialize(temperature_matrix);
4364 * rebuild_temperature_preconditioner =
false;
4369 * The next part is computing the right hand side vectors. To
do so, we
4370 *
first compute the average temperature @f$T_m@f$ that we use
for evaluating
4371 * the artificial viscosity stabilization through the residual @f$E(
T) =
4372 * (
T-T_m)^2@f$. We
do this by defining the midpoint between maximum and
4373 * minimum temperature as average temperature in the definition of the
4374 * entropy viscosity. An alternative would be to use the integral average,
4375 * but the results are not very sensitive to
this choice. The rest then
4376 * only requires calling
WorkStream::run again, binding the arguments to
4377 * the <code>local_assemble_temperature_rhs</code>
function that are the
4378 * same in every
call to the correct values:
4381 * temperature_rhs = 0;
4383 *
const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4384 *
const std::pair<double, double> global_T_range =
4385 * get_extrapolated_temperature_range();
4387 *
const double average_temperature =
4388 * 0.5 * (global_T_range.first + global_T_range.second);
4389 *
const double global_entropy_variation =
4390 * get_entropy_variation(average_temperature);
4392 *
using CellFilter =
4397 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4398 * Assembly::CopyData::TemperatureRHS<dim> & data) {
4399 * this->local_assemble_temperature_rhs(global_T_range,
4401 * global_entropy_variation,
4407 *
auto copier = [
this](
const Assembly::CopyData::TemperatureRHS<dim> &data) {
4408 * this->copy_local_to_global_temperature_rhs(data);
4412 * temperature_dof_handler.begin_active()),
4414 * temperature_dof_handler.end()),
4417 * Assembly::Scratch::TemperatureRHS<dim>(
4418 * temperature_fe, stokes_fe, mapping, quadrature_formula),
4419 * Assembly::CopyData::TemperatureRHS<dim>(temperature_fe));
4429 * <a name=
"BoussinesqFlowProblemsolve"></a>
4430 * <h4>BoussinesqFlowProblem::solve</h4>
4434 * This
function solves the linear systems in each time step of the
4435 * Boussinesq problem. First, we work on the Stokes system and then on the
4436 * temperature system. In essence, it does the same things as the respective
4437 *
function in @ref step_31
"step-31". However, there are a few changes here.
4441 * The
first change is related to the way we store our solution: we keep the
4442 * vectors with locally owned degrees of freedom plus ghost nodes on each
4443 * MPI node. When we enter a solver which is supposed to perform
4444 *
matrix-vector products with a distributed
matrix,
this is not the
4445 * appropriate form, though. There, we will want to have the solution vector
4446 * to be distributed in the same way as the
matrix, i.e. without any
4447 * ghosts. So what we
do first is to generate a distributed vector called
4448 * <code>distributed_stokes_solution</code> and put only the locally owned
4449 * dofs into that, which is neatly done by the <code>
operator=</code> of the
4454 * Next, we
scale the pressure solution (or rather, the
initial guess)
for
4455 * the solver so that it matches with the length scales in the matrices, as
4456 * discussed in the introduction. We also immediately
scale the pressure
4457 * solution back to the correct units after the solution is completed. We
4458 * also need to
set the pressure values at hanging nodes to
zero. This we
4459 * also did in @ref step_31
"step-31" in order not to disturb the Schur complement by some
4460 * vector entries that actually are irrelevant during the solve stage. As a
4461 * difference to @ref step_31
"step-31", here we
do it only
for the locally owned pressure
4462 * dofs. After solving
for the Stokes solution, each processor copies the
4463 * distributed solution back into the solution vector that also includes
4468 * The third and most obvious change is that we have two variants
for the
4469 * Stokes solver:
A fast solver that sometimes breaks down, and a robust
4470 * solver that is slower. This is what we already discussed in the
4471 * introduction. Here is how we realize it: First, we perform 30 iterations
4472 * with the fast solver based on the simple preconditioner based on the AMG
4473 *
V-cycle instead of an
approximate solve (
this is indicated by the
4474 * <code>false</code> argument to the
4475 * <code>LinearSolvers::BlockSchurPreconditioner</code>
object). If we
4476 * converge, everything is fine. If we
do not converge, the solver control
4478 *
this would
abort the program because we don
't catch them in our usual
4479 * <code>solve()</code> functions. This is certainly not what we want to
4480 * happen here. Rather, we want to switch to the strong solver and continue
4481 * the solution process with whatever vector we got so far. Hence, we catch
4482 * the exception with the C++ try/catch mechanism. We then simply go through
4483 * the same solver sequence again in the <code>catch</code> clause, this
4484 * time passing the @p true flag to the preconditioner for the strong
4485 * solver, signaling an approximate CG solve.
4488 * template <int dim>
4489 * void BoussinesqFlowProblem<dim>::solve()
4492 * TimerOutput::Scope timer_section(computing_timer,
4493 * " Solve Stokes system");
4495 * pcout << " Solving Stokes system... " << std::flush;
4497 * TrilinosWrappers::MPI::BlockVector distributed_stokes_solution(
4499 * distributed_stokes_solution = stokes_solution;
4501 * distributed_stokes_solution.block(1) /= EquationData::pressure_scaling;
4503 * const unsigned int
4504 * start = (distributed_stokes_solution.block(0).size() +
4505 * distributed_stokes_solution.block(1).local_range().first),
4506 * end = (distributed_stokes_solution.block(0).size() +
4507 * distributed_stokes_solution.block(1).local_range().second);
4508 * for (unsigned int i = start; i < end; ++i)
4509 * if (stokes_constraints.is_constrained(i))
4510 * distributed_stokes_solution(i) = 0;
4513 * PrimitiveVectorMemory<TrilinosWrappers::MPI::BlockVector> mem;
4515 * unsigned int n_iterations = 0;
4516 * const double solver_tolerance = 1e-8 * stokes_rhs.l2_norm();
4517 * SolverControl solver_control(30, solver_tolerance);
4521 * const LinearSolvers::BlockSchurPreconditioner<
4522 * TrilinosWrappers::PreconditionAMG,
4523 * TrilinosWrappers::PreconditionJacobi>
4524 * preconditioner(stokes_matrix,
4525 * stokes_preconditioner_matrix,
4526 * *Mp_preconditioner,
4527 * *Amg_preconditioner,
4530 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4533 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4535 * solver.solve(stokes_matrix,
4536 * distributed_stokes_solution,
4540 * n_iterations = solver_control.last_step();
4543 * catch (SolverControl::NoConvergence &)
4545 * const LinearSolvers::BlockSchurPreconditioner<
4546 * TrilinosWrappers::PreconditionAMG,
4547 * TrilinosWrappers::PreconditionJacobi>
4548 * preconditioner(stokes_matrix,
4549 * stokes_preconditioner_matrix,
4550 * *Mp_preconditioner,
4551 * *Amg_preconditioner,
4554 * SolverControl solver_control_refined(stokes_matrix.m(),
4555 * solver_tolerance);
4556 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4557 * solver_control_refined,
4559 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4561 * solver.solve(stokes_matrix,
4562 * distributed_stokes_solution,
4567 * (solver_control.last_step() + solver_control_refined.last_step());
4571 * stokes_constraints.distribute(distributed_stokes_solution);
4573 * distributed_stokes_solution.block(1) *= EquationData::pressure_scaling;
4575 * stokes_solution = distributed_stokes_solution;
4576 * pcout << n_iterations << " iterations." << std::endl;
4582 * Now let's turn to the temperature part: First, we compute the time step
4583 * size. We found that we need smaller time steps
for 3D than
for 2D
for
4584 * the shell geometry. This is because the cells are more distorted in
4585 * that
case (it is the smallest edge length that determines the CFL
4586 * number). Instead of computing the time step from maximum velocity and
4587 * minimal mesh size as in @ref step_31
"step-31", we compute local CFL
numbers, i.e., on
4588 * each cell we compute the maximum velocity times the mesh size, and
4589 * compute the maximum of them. Hence, we need to choose the factor in
4590 * front of the time step slightly smaller.
4594 * After temperature right hand side assembly, we solve the linear system
4595 *
for temperature (with fully distributed vectors without any ghosts),
4596 *
apply constraints and
copy the vector back to
one with ghosts.
4600 * In the
end, we
extract the temperature range similarly to @ref step_31
"step-31" to
4601 * produce some output (
for example in order to help us choose the
4602 * stabilization constants, as discussed in the introduction). The only
4603 * difference is that we need to exchange maxima over all processors.
4608 *
" Assemble temperature rhs");
4610 * old_time_step = time_step;
4612 *
const double scaling = (dim == 3 ? 0.25 : 1.0);
4613 * time_step = (scaling / (2.1 * dim *
std::sqrt(1. * dim)) /
4614 * (parameters.temperature_degree * get_cfl_number()));
4616 *
const double maximal_velocity = get_maximal_velocity();
4617 * pcout <<
" Maximal velocity: "
4618 * << maximal_velocity * EquationData::year_in_seconds * 100
4619 * <<
" cm/year" << std::endl;
4621 * <<
"Time step: " << time_step / EquationData::year_in_seconds
4622 * <<
" years" << std::endl;
4624 * temperature_solution = old_temperature_solution;
4625 * assemble_temperature_system(maximal_velocity);
4630 *
" Solve temperature system");
4633 * 1
e-12 * temperature_rhs.l2_norm());
4638 * distributed_temperature_solution = temperature_solution;
4640 * cg.solve(temperature_matrix,
4641 * distributed_temperature_solution,
4643 * *T_preconditioner);
4645 * temperature_constraints.distribute(distributed_temperature_solution);
4646 * temperature_solution = distributed_temperature_solution;
4648 * pcout <<
" " << solver_control.last_step()
4649 * <<
" CG iterations for temperature" << std::endl;
4653 *
double global_temperature[2];
4655 *
for (
unsigned int i =
4656 * distributed_temperature_solution.local_range().first;
4657 * i < distributed_temperature_solution.local_range().second;
4661 * std::min<double>(temperature[0],
4662 * distributed_temperature_solution(i));
4664 * std::max<double>(temperature[1],
4665 * distributed_temperature_solution(i));
4668 * temperature[0] *= -1.0;
4670 * global_temperature[0] *= -1.0;
4672 * pcout <<
" Temperature range: " << global_temperature[0] <<
' '
4673 * << global_temperature[1] << std::endl;
4681 * <a name=
"BoussinesqFlowProblemoutput_results"></a>
4682 * <h4>BoussinesqFlowProblem::output_results</h4>
4686 * Next comes the
function that generates the output. The quantities to
4687 * output could be introduced manually like we did in @ref step_31
"step-31". An
4688 * alternative is to hand
this task over to a
class PostProcessor that
4690 *
DataOut. This allows us to output derived quantities from the solution,
4691 * like the friction heating included in
this example. It overloads the
4694 * give it values of the numerical solution, its derivatives, normals to the
4695 * cell, the actual evaluation points and any additional quantities. This
4696 * follows the same procedure as discussed in @ref step_29
"step-29" and other programs.
4699 *
template <
int dim>
4700 *
class BoussinesqFlowProblem<dim>::Postprocessor
4704 * Postprocessor(
const unsigned int partition,
const double minimal_pressure);
4708 * std::vector<
Vector<double>> &computed_quantities)
const override;
4710 *
virtual std::vector<std::string>
get_names()
const override;
4712 *
virtual std::vector<
4720 *
const double minimal_pressure;
4724 *
template <
int dim>
4725 * BoussinesqFlowProblem<dim>::Postprocessor::Postprocessor(
4727 *
const double minimal_pressure)
4729 * , minimal_pressure(minimal_pressure)
4735 * Here we define the names
for the variables we want to output. These are
4736 * the actual solution values
for velocity, pressure, and temperature, as
4737 * well as the friction heating and to each cell the number of the processor
4738 * that owns it. This allows us to visualize the partitioning of the domain
4739 * among the processors. Except
for the velocity, which is vector-valued,
4740 * all other quantities are scalar.
4743 *
template <
int dim>
4744 * std::vector<std::string>
4745 * BoussinesqFlowProblem<dim>::Postprocessor::get_names() const
4747 * std::vector<std::string> solution_names(dim,
"velocity");
4748 * solution_names.emplace_back(
"p");
4749 * solution_names.emplace_back(
"T");
4750 * solution_names.emplace_back(
"friction_heating");
4751 * solution_names.emplace_back(
"partition");
4753 *
return solution_names;
4757 *
template <
int dim>
4758 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4759 * BoussinesqFlowProblem<dim>::Postprocessor::get_data_component_interpretation()
4762 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4763 * interpretation(dim,
4771 *
return interpretation;
4775 *
template <
int dim>
4777 * BoussinesqFlowProblem<dim>::Postprocessor::get_needed_update_flags() const
4785 * Now we implement the
function that computes the derived quantities. As we
4786 * also did
for the output, we rescale the velocity from its SI units to
4787 * something more readable, namely cm/year. Next, the pressure is scaled to
4788 * be between 0 and the maximum pressure. This makes it more easily
4789 * comparable -- in essence making all pressure variables positive or
4790 *
zero. Temperature is taken as is, and the friction heating is computed as
4791 * @f$2 \eta \varepsilon(\mathbf{u}) \cdot \varepsilon(\mathbf{u})@f$.
4795 * The quantities we output here are more
for illustration, rather than
for
4796 * actual scientific
value. We come back to
this briefly in the results
4797 * section of
this program and explain what
one may in fact be interested in.
4800 * template <int dim>
4801 *
void BoussinesqFlowProblem<dim>::Postprocessor::evaluate_vector_field(
4805 *
const unsigned int n_quadrature_points = inputs.
solution_values.size();
4808 *
Assert(computed_quantities.size() == n_quadrature_points,
4812 *
for (
unsigned int q = 0; q < n_quadrature_points; ++q)
4814 *
for (
unsigned int d = 0;
d < dim; ++
d)
4816 * EquationData::year_in_seconds * 100);
4818 *
const double pressure =
4820 * computed_quantities[q](dim) = pressure;
4823 * computed_quantities[q](dim + 1) = temperature;
4826 *
for (
unsigned int d = 0;
d < dim; ++
d)
4829 * computed_quantities[q](dim + 2) =
4830 * 2 * EquationData::eta * strain_rate * strain_rate;
4832 * computed_quantities[q](dim + 3) =
partition;
4839 * The <code>output_results()</code>
function has a similar task to the
one
4840 * in @ref step_31
"step-31". However, here we are going to demonstrate a different
4841 * technique on how to
merge output from different
DoFHandler objects. The
4842 * way we
're going to achieve this recombination is to create a joint
4843 * DoFHandler that collects both components, the Stokes solution and the
4844 * temperature solution. This can be nicely done by combining the finite
4845 * elements from the two systems to form one FESystem, and let this
4846 * collective system define a new DoFHandler object. To be sure that
4847 * everything was done correctly, we perform a sanity check that ensures
4848 * that we got all the dofs from both Stokes and temperature even in the
4849 * combined system. We then combine the data vectors. Unfortunately, there
4850 * is no straight-forward relation that tells us how to sort Stokes and
4851 * temperature vector into the joint vector. The way we can get around this
4852 * trouble is to rely on the information collected in the FESystem. For each
4853 * dof on a cell, the joint finite element knows to which equation component
4854 * (velocity component, pressure, or temperature) it belongs – that's the
4855 * information we need! So we step through all cells (with iterators into
4856 * all three DoFHandlers moving in sync), and
for each joint cell dof, we
4858 *
function (see there
for a description of what the various parts of its
4859 *
return value contain). We also need to keep track whether we
're on a
4860 * Stokes dof or a temperature dof, which is contained in
4861 * joint_fe.system_to_base_index(i).first.first. Eventually, the dof_indices
4862 * data structures on either of the three systems tell us how the relation
4863 * between global vector and local dofs looks like on the present cell,
4864 * which concludes this tedious work. We make sure that each processor only
4865 * works on the subdomain it owns locally (and not on ghost or artificial
4866 * cells) when building the joint solution vector. The same will then have
4867 * to be done in DataOut::build_patches(), but that function does so
4872 * What we end up with is a set of patches that we can write using the
4873 * functions in DataOutBase in a variety of output formats. Here, we then
4874 * have to pay attention that what each processor writes is really only its
4875 * own part of the domain, i.e. we will want to write each processor's
4876 * contribution into a separate file. This we
do by adding an additional
4877 * number to the filename when we write the solution. This is not really
4878 *
new, we did it similarly in @ref step_40
"step-40". Note that we write in the compressed
4879 * format @p .vtu instead of plain
vtk files, which saves quite some
4884 * All the rest of the work is done in the PostProcessor
class.
4887 * template <int dim>
4888 *
void BoussinesqFlowProblem<dim>::output_results()
4892 *
const FESystem<dim> joint_fe(stokes_fe, 1, temperature_fe, 1);
4895 * joint_dof_handler.distribute_dofs(joint_fe);
4896 *
Assert(joint_dof_handler.n_dofs() ==
4897 * stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(),
4901 * joint_solution.
reinit(joint_dof_handler.locally_owned_dofs(),
4905 * std::vector<types::global_dof_index> local_joint_dof_indices(
4906 * joint_fe.dofs_per_cell);
4907 * std::vector<types::global_dof_index> local_stokes_dof_indices(
4908 * stokes_fe.dofs_per_cell);
4909 * std::vector<types::global_dof_index> local_temperature_dof_indices(
4910 * temperature_fe.dofs_per_cell);
4914 * joint_endc = joint_dof_handler.end(),
4915 * stokes_cell = stokes_dof_handler.begin_active(),
4916 * temperature_cell = temperature_dof_handler.begin_active();
4917 *
for (; joint_cell != joint_endc;
4918 * ++joint_cell, ++stokes_cell, ++temperature_cell)
4919 *
if (joint_cell->is_locally_owned())
4921 * joint_cell->get_dof_indices(local_joint_dof_indices);
4922 * stokes_cell->get_dof_indices(local_stokes_dof_indices);
4923 * temperature_cell->get_dof_indices(local_temperature_dof_indices);
4925 *
for (
unsigned int i = 0; i < joint_fe.dofs_per_cell; ++i)
4926 *
if (joint_fe.system_to_base_index(i).first.first == 0)
4928 *
Assert(joint_fe.system_to_base_index(i).second <
4929 * local_stokes_dof_indices.size(),
4932 * joint_solution(local_joint_dof_indices[i]) = stokes_solution(
4933 * local_stokes_dof_indices[joint_fe.system_to_base_index(i)
4938 *
Assert(joint_fe.system_to_base_index(i).first.first == 1,
4940 *
Assert(joint_fe.system_to_base_index(i).second <
4941 * local_temperature_dof_indices.size(),
4943 * joint_solution(local_joint_dof_indices[i]) =
4944 * temperature_solution(
4945 * local_temperature_dof_indices
4946 * [joint_fe.system_to_base_index(i).second]);
4953 *
IndexSet locally_relevant_joint_dofs(joint_dof_handler.n_dofs());
4955 * locally_relevant_joint_dofs);
4957 * locally_relevant_joint_solution.
reinit(locally_relevant_joint_dofs,
4959 * locally_relevant_joint_solution = joint_solution;
4963 * stokes_solution.block(1).min());
4967 * data_out.
add_data_vector(locally_relevant_joint_solution, postprocessor);
4970 *
static int out_index = 0;
4972 *
"./",
"solution", out_index, MPI_COMM_WORLD, 5);
4982 * <a name=
"BoussinesqFlowProblemrefine_mesh"></a>
4983 * <h4>BoussinesqFlowProblem::refine_mesh</h4>
4987 * This
function isn
't really new either. Since the <code>setup_dofs</code>
4988 * function that we call in the middle has its own timer section, we split
4989 * timing this function into two sections. It will also allow us to easily
4990 * identify which of the two is more expensive.
4994 * One thing of note, however, is that we only want to compute error
4995 * indicators on the locally owned subdomain. In order to achieve this, we
4996 * pass one additional argument to the KellyErrorEstimator::estimate
4997 * function. Note that the vector for error estimates is resized to the
4998 * number of active cells present on the current process, which is less than
4999 * the total number of active cells on all processors (but more than the
5000 * number of locally owned active cells); each processor only has a few
5001 * coarse cells around the locally owned ones, as also explained in @ref step_40 "step-40".
5005 * The local error estimates are then handed to a %parallel version of
5006 * GridRefinement (in namespace parallel::distributed::GridRefinement, see
5007 * also @ref step_40 "step-40") which looks at the errors and finds the cells that need
5008 * refinement by comparing the error values across processors. As in
5009 * @ref step_31 "step-31", we want to limit the maximum grid level. So in case some cells
5010 * have been marked that are already at the finest level, we simply clear
5014 * template <int dim>
5016 * BoussinesqFlowProblem<dim>::refine_mesh(const unsigned int max_grid_level)
5018 * parallel::distributed::SolutionTransfer<dim, TrilinosWrappers::MPI::Vector>
5019 * temperature_trans(temperature_dof_handler);
5020 * parallel::distributed::SolutionTransfer<dim,
5021 * TrilinosWrappers::MPI::BlockVector>
5022 * stokes_trans(stokes_dof_handler);
5025 * TimerOutput::Scope timer_section(computing_timer,
5026 * "Refine mesh structure, part 1");
5028 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
5030 * KellyErrorEstimator<dim>::estimate(
5031 * temperature_dof_handler,
5032 * QGauss<dim - 1>(parameters.temperature_degree + 1),
5033 * std::map<types::boundary_id, const Function<dim> *>(),
5034 * temperature_solution,
5035 * estimated_error_per_cell,
5039 * triangulation.locally_owned_subdomain());
5041 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction(
5042 * triangulation, estimated_error_per_cell, 0.3, 0.1);
5044 * if (triangulation.n_levels() > max_grid_level)
5045 * for (typename Triangulation<dim>::active_cell_iterator cell =
5046 * triangulation.begin_active(max_grid_level);
5047 * cell != triangulation.end();
5049 * cell->clear_refine_flag();
5053 * With all flags marked as necessary, we can then tell the
5054 * parallel::distributed::SolutionTransfer objects to get ready to
5055 * transfer data from one mesh to the next, which they will do when
5057 * Triangulation as part of the @p execute_coarsening_and_refinement() call.
5058 * The syntax is similar to the non-%parallel solution transfer (with the
5059 * exception that here a pointer to the vector entries is enough). The
5060 * remainder of the function further down below is then concerned with
5061 * setting up the data structures again after mesh refinement and
5062 * restoring the solution vectors on the new mesh.
5065 * std::vector<const TrilinosWrappers::MPI::Vector *> x_temperature(2);
5066 * x_temperature[0] = &temperature_solution;
5067 * x_temperature[1] = &old_temperature_solution;
5068 * std::vector<const TrilinosWrappers::MPI::BlockVector *> x_stokes(2);
5069 * x_stokes[0] = &stokes_solution;
5070 * x_stokes[1] = &old_stokes_solution;
5072 * triangulation.prepare_coarsening_and_refinement();
5074 * temperature_trans.prepare_for_coarsening_and_refinement(x_temperature);
5075 * stokes_trans.prepare_for_coarsening_and_refinement(x_stokes);
5077 * triangulation.execute_coarsening_and_refinement();
5083 * TimerOutput::Scope timer_section(computing_timer,
5084 * "Refine mesh structure, part 2");
5087 * TrilinosWrappers::MPI::Vector distributed_temp1(temperature_rhs);
5088 * TrilinosWrappers::MPI::Vector distributed_temp2(temperature_rhs);
5090 * std::vector<TrilinosWrappers::MPI::Vector *> tmp(2);
5091 * tmp[0] = &(distributed_temp1);
5092 * tmp[1] = &(distributed_temp2);
5093 * temperature_trans.interpolate(tmp);
5097 * enforce constraints to make the interpolated solution conforming on
5101 * temperature_constraints.distribute(distributed_temp1);
5102 * temperature_constraints.distribute(distributed_temp2);
5104 * temperature_solution = distributed_temp1;
5105 * old_temperature_solution = distributed_temp2;
5109 * TrilinosWrappers::MPI::BlockVector distributed_stokes(stokes_rhs);
5110 * TrilinosWrappers::MPI::BlockVector old_distributed_stokes(stokes_rhs);
5112 * std::vector<TrilinosWrappers::MPI::BlockVector *> stokes_tmp(2);
5113 * stokes_tmp[0] = &(distributed_stokes);
5114 * stokes_tmp[1] = &(old_distributed_stokes);
5116 * stokes_trans.interpolate(stokes_tmp);
5120 * enforce constraints to make the interpolated solution conforming on
5124 * stokes_constraints.distribute(distributed_stokes);
5125 * stokes_constraints.distribute(old_distributed_stokes);
5127 * stokes_solution = distributed_stokes;
5128 * old_stokes_solution = old_distributed_stokes;
5138 * <a name="BoussinesqFlowProblemrun"></a>
5139 * <h4>BoussinesqFlowProblem::run</h4>
5143 * This is the final and controlling function in this class. It, in fact,
5144 * runs the entire rest of the program and is, once more, very similar to
5145 * @ref step_31 "step-31". The only substantial difference is that we use a different mesh
5146 * now (a GridGenerator::hyper_shell instead of a simple cube geometry).
5149 * template <int dim>
5150 * void BoussinesqFlowProblem<dim>::run()
5152 * GridGenerator::hyper_shell(triangulation,
5156 * (dim == 3) ? 96 : 12,
5159 * global_Omega_diameter = GridTools::diameter(triangulation);
5161 * triangulation.refine_global(parameters.initial_global_refinement);
5165 * unsigned int pre_refinement_step = 0;
5167 * start_time_iteration:
5170 * TrilinosWrappers::MPI::Vector solution(
5171 * temperature_dof_handler.locally_owned_dofs());
5174 * VectorTools::project supports parallel vector classes with most
5175 * standard finite elements via deal.II's own native
MatrixFree framework:
5176 * since we use standard Lagrange elements of moderate order
this function
5181 * temperature_constraints,
5183 * EquationData::TemperatureInitialValues<dim>(),
5187 * Having so computed the current temperature field, let us
set the member
5188 * variable that holds the temperature nodes. Strictly speaking, we really
5189 * only need to set <code>old_temperature_solution</code> since the
first
5190 * thing we will
do is to compute the Stokes solution that only requires
5191 * the previous time step
's temperature field. That said, nothing good can
5192 * come from not initializing the other vectors as well (especially since
5193 * it's a relatively cheap operation and we only have to
do it once at the
5194 * beginning of the program)
if we ever want to extend our numerical
5195 * method or physical model, and so we initialize
5196 * <code>old_temperature_solution</code> and
5197 * <code>old_old_temperature_solution</code> as well. The assignment makes
5198 * sure that the vectors on the left hand side (which where initialized to
5199 * contain ghost elements as well) also get the correct ghost elements. In
5200 * other words, the assignment here requires communication between
5204 * temperature_solution = solution;
5205 * old_temperature_solution = solution;
5206 * old_old_temperature_solution = solution;
5209 * timestep_number = 0;
5210 * time_step = old_time_step = 0;
5216 * pcout <<
"Timestep " << timestep_number
5217 * <<
": t=" << time / EquationData::year_in_seconds <<
" years"
5220 * assemble_stokes_system();
5221 * build_stokes_preconditioner();
5222 * assemble_temperature_matrix();
5226 * pcout << std::endl;
5228 *
if ((timestep_number == 0) &&
5229 * (pre_refinement_step < parameters.initial_adaptive_refinement))
5231 * refine_mesh(parameters.initial_global_refinement +
5232 * parameters.initial_adaptive_refinement);
5233 * ++pre_refinement_step;
5234 *
goto start_time_iteration;
5236 *
else if ((timestep_number > 0) &&
5237 * (timestep_number % parameters.adaptive_refinement_interval ==
5239 * refine_mesh(parameters.initial_global_refinement +
5240 * parameters.initial_adaptive_refinement);
5242 *
if ((parameters.generate_graphical_output ==
true) &&
5243 * (timestep_number % parameters.graphical_output_interval == 0))
5248 * In order to speed up linear solvers, we
extrapolate the solutions
5249 * from the old time levels to the
new one. This gives a very good
5250 *
initial guess, cutting the number of iterations needed in solvers
5251 * by more than
one half. We
do not need to
extrapolate in the last
5252 * iteration, so
if we reached the
final time, we stop here.
5256 * As the last thing during a time step (before actually bumping up
5257 * the number of the time step), we check whether the current time
5258 * step number is divisible by 100, and
if so we let the computing
5259 * timer print a summary of CPU times spent so far.
5262 *
if (time > parameters.end_time * EquationData::year_in_seconds)
5266 * old_old_stokes_solution = old_stokes_solution;
5267 * old_stokes_solution = stokes_solution;
5268 * old_old_temperature_solution = old_temperature_solution;
5269 * old_temperature_solution = temperature_solution;
5270 *
if (old_time_step > 0)
5274 * Trilinos
sadd does not like ghost vectors even as input. Copy
5275 * into distributed vectors
for now:
5280 * distr_solution = stokes_solution;
5282 * distr_old_solution = old_old_stokes_solution;
5283 * distr_solution.
sadd(1. + time_step / old_time_step,
5284 * -time_step / old_time_step,
5285 * distr_old_solution);
5286 * stokes_solution = distr_solution;
5290 * distr_solution = temperature_solution;
5292 * distr_old_solution = old_old_temperature_solution;
5293 * distr_solution.sadd(1. + time_step / old_time_step,
5294 * -time_step / old_time_step,
5295 * distr_old_solution);
5296 * temperature_solution = distr_solution;
5300 *
if ((timestep_number > 0) && (timestep_number % 100 == 0))
5301 * computing_timer.print_summary();
5303 * time += time_step;
5304 * ++timestep_number;
5310 * If we are generating graphical output,
do so also
for the last time
5311 * step unless we had just done so before we left the
do-
while loop
5314 *
if ((parameters.generate_graphical_output ==
true) &&
5315 * !((timestep_number - 1) % parameters.graphical_output_interval == 0))
5325 * <a name=
"Thecodemaincodefunction"></a>
5326 * <h3>The <code>main</code>
function</h3>
5330 * The main
function is
short as usual and very similar to the
one in
5331 * @ref step_31
"step-31". Since we use a parameter file which is specified as an argument in
5332 * the command line, we have to read it in here and pass it on to the
5333 * Parameters
class for parsing. If no filename is given in the command line,
5334 * we simply use the <code>step-32.prm</code> file which is distributed
5335 * together with the program.
5339 * Because 3
d computations are simply very slow unless you
throw a lot of
5340 * processors at them, the program defaults to 2
d. You can get the 3
d version
5341 * by changing the constant dimension below to 3.
5344 *
int main(
int argc,
char *argv[])
5348 *
using namespace Step32;
5349 *
using namespace dealii;
5354 * std::string parameter_filename;
5356 * parameter_filename = argv[1];
5358 * parameter_filename =
"step-32.prm";
5360 *
const int dim = 2;
5361 * BoussinesqFlowProblem<dim>::Parameters parameters(parameter_filename);
5362 * BoussinesqFlowProblem<dim> flow_problem(parameters);
5363 * flow_problem.run();
5365 *
catch (std::exception &exc)
5367 * std::cerr << std::endl
5369 * <<
"----------------------------------------------------"
5371 * std::cerr <<
"Exception on processing: " << std::endl
5372 * << exc.what() << std::endl
5373 * <<
"Aborting!" << std::endl
5374 * <<
"----------------------------------------------------"
5381 * std::cerr << std::endl
5383 * <<
"----------------------------------------------------"
5385 * std::cerr <<
"Unknown exception!" << std::endl
5386 * <<
"Aborting!" << std::endl
5387 * <<
"----------------------------------------------------"
5395 <a name=
"Results"></a><h1>Results</h1>
5398 When
run, the program simulates convection in 3
d in much the same way
5399 as @ref step_31
"step-31" did, though with an entirely different testcase.
5402 <a name=
"Comparisonofresultswithstep31"></a><h3>Comparison of results with step-31</h3>
5405 Before we go to
this testcase, however, let us show a few results from a
5406 slightly earlier version of
this program that was solving exactly the
5407 testcase we used in @ref step_31
"step-31", just that we now solve it in
parallel and with
5408 much higher resolution. We show these results mainly
for comparison.
5410 Here are two images that show
this higher resolution
if we choose a 3
d
5411 computation in <code>main()</code> and
if we
set
5412 <code>initial_refinement=3</code> and
5413 <code>n_pre_refinement_steps=4</code>. At the time steps shown, the
5414 meshes had around 72,000 and 236,000 cells,
for a total of 2,680,000
5415 and 8,250,000 degrees of freedom, respectively, more than an order of
5416 magnitude more than we had available in @ref step_31
"step-31":
5418 <table align=
"center" class=
"doxtable">
5421 <img src=
"https://www.dealii.org/images/steps/developer/step-32.3d.cube.0.png" alt=
"">
5426 <img src=
"https://www.dealii.org/images/steps/developer/step-32.3d.cube.1.png" alt=
"">
5431 The computation was done on a subset of 50 processors of the Brazos
5432 cluster at Texas
A&M University.
5435 <a name=
"Resultsfora2dcircularshelltestcase"></a><h3>Results
for a 2
d circular shell testcase</h3>
5438 Next, we will
run @ref step_32
"step-32" with the parameter file in the directory with
one
5439 change: we increase the
final time to 1e9. Here we are
using 16 processors. The
5440 command to launch is (note that @ref step_32
"step-32".prm is the
default):
5444 $ mpirun -np 16 ./step-32
5448 Note that running a job on a cluster typically requires going through a job
5449 scheduler, which we won
't discuss here. The output will look roughly like
5454 $ mpirun -np 16 ./step-32
5455 Number of active cells: 12,288 (on 6 levels)
5456 Number of degrees of freedom: 186,624 (99,840+36,864+49,920)
5458 Timestep 0: t=0 years
5460 Rebuilding Stokes preconditioner...
5461 Solving Stokes system... 41 iterations.
5462 Maximal velocity: 60.4935 cm/year
5463 Time step: 18166.9 years
5464 17 CG iterations for temperature
5465 Temperature range: 973 4273.16
5467 Number of active cells: 15,921 (on 7 levels)
5468 Number of degrees of freedom: 252,723 (136,640+47,763+68,320)
5470 Timestep 0: t=0 years
5472 Rebuilding Stokes preconditioner...
5473 Solving Stokes system... 50 iterations.
5474 Maximal velocity: 60.3223 cm/year
5475 Time step: 10557.6 years
5476 19 CG iterations for temperature
5477 Temperature range: 973 4273.16
5479 Number of active cells: 19,926 (on 8 levels)
5480 Number of degrees of freedom: 321,246 (174,312+59,778+87,156)
5482 Timestep 0: t=0 years
5484 Rebuilding Stokes preconditioner...
5485 Solving Stokes system... 50 iterations.
5486 Maximal velocity: 57.8396 cm/year
5487 Time step: 5453.78 years
5488 18 CG iterations for temperature
5489 Temperature range: 973 4273.16
5491 Timestep 1: t=5453.78 years
5493 Solving Stokes system... 49 iterations.
5494 Maximal velocity: 59.0231 cm/year
5495 Time step: 5345.86 years
5496 18 CG iterations for temperature
5497 Temperature range: 973 4273.16
5499 Timestep 2: t=10799.6 years
5501 Solving Stokes system... 24 iterations.
5502 Maximal velocity: 60.2139 cm/year
5503 Time step: 5241.51 years
5504 17 CG iterations for temperature
5505 Temperature range: 973 4273.16
5509 Timestep 100: t=272151 years
5511 Solving Stokes system... 21 iterations.
5512 Maximal velocity: 161.546 cm/year
5513 Time step: 1672.96 years
5514 17 CG iterations for temperature
5515 Temperature range: 973 4282.57
5517 Number of active cells: 56,085 (on 8 levels)
5518 Number of degrees of freedom: 903,408 (490,102+168,255+245,051)
5522 +---------------------------------------------+------------+------------+
5523 | Total wallclock time elapsed since start | 115s | |
5525 | Section | no. calls | wall time | % of total |
5526 +---------------------------------+-----------+------------+------------+
5527 | Assemble Stokes system | 103 | 2.82s | 2.5% |
5528 | Assemble temperature matrices | 12 | 0.452s | 0.39% |
5529 | Assemble temperature rhs | 103 | 11.5s | 10% |
5530 | Build Stokes preconditioner | 12 | 2.09s | 1.8% |
5531 | Solve Stokes system | 103 | 90.4s | 79% |
5532 | Solve temperature system | 103 | 1.53s | 1.3% |
5533 | Postprocessing | 3 | 0.532s | 0.46% |
5534 | Refine mesh structure, part 1 | 12 | 0.93s | 0.81% |
5535 | Refine mesh structure, part 2 | 12 | 0.384s | 0.33% |
5536 | Setup dof systems | 13 | 2.96s | 2.6% |
5537 +---------------------------------+-----------+------------+------------+
5541 +---------------------------------------------+------------+------------+
5542 | Total wallclock time elapsed since start | 9.14e+04s | |
5544 | Section | no. calls | wall time | % of total |
5545 +---------------------------------+-----------+------------+------------+
5546 | Assemble Stokes system | 47045 | 2.05e+03s | 2.2% |
5547 | Assemble temperature matrices | 4707 | 310s | 0.34% |
5548 | Assemble temperature rhs | 47045 | 8.7e+03s | 9.5% |
5549 | Build Stokes preconditioner | 4707 | 1.48e+03s | 1.6% |
5550 | Solve Stokes system | 47045 | 7.34e+04s | 80% |
5551 | Solve temperature system | 47045 | 1.46e+03s | 1.6% |
5552 | Postprocessing | 1883 | 222s | 0.24% |
5553 | Refine mesh structure, part 1 | 4706 | 641s | 0.7% |
5554 | Refine mesh structure, part 2 | 4706 | 259s | 0.28% |
5555 | Setup dof systems | 4707 | 1.86e+03s | 2% |
5556 +---------------------------------+-----------+------------+------------+
5560 The simulation terminates when the time reaches the 1 billion years
5561 selected in the input file. You can extrapolate from this how long a
5562 simulation would take for a different final time (the time step size
5563 ultimately settles on somewhere around 20,000 years, so computing for
5564 two billion years will take 100,000 time steps, give or take 20%). As
5565 can be seen here, we spend most of the compute time in assembling
5566 linear systems and — above all — in solving Stokes
5570 To demonstrate the output we show the output from every 1250th time step here:
5574 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-000.png" alt="">
5577 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-050.png" alt="">
5580 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-100.png" alt="">
5585 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-150.png" alt="">
5588 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-200.png" alt="">
5591 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-250.png" alt="">
5596 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-300.png" alt="">
5599 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-350.png" alt="">
5602 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-400.png" alt="">
5607 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-450.png" alt="">
5610 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-500.png" alt="">
5613 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-550.png" alt="">
5618 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-600.png" alt="">
5621 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-cells.png" alt="">
5624 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-partition.png" alt="">
5629 The last two images show the grid as well as the partitioning of the mesh for
5630 the same computation with 16 subdomains and 16 processors. The full dynamics of
5631 this simulation are really only visible by looking at an animation, for example
5633 href="https://www.dealii.org/images/steps/developer/step-32-2d-temperature.webm">shown
5634 on this site</a>. This image is well worth watching due to its artistic quality
5635 and entrancing depiction of the evolution of the magma plumes.
5637 If you watch the movie, you'll see that the convection pattern goes
5638 through several stages: First, it gets rid of the instable temperature
5639 layering with the hot material overlain by the dense cold
5640 material. After this great driver is removed and we have a sort of
5641 stable situation, a few blobs start to separate from the hot boundary
5642 layer at the inner ring and rise up, with a few cold fingers also
5643 dropping down from the outer boundary layer. During this phase, the solution
5644 remains mostly
symmetric, reflecting the 12-fold symmetry of the
5645 original mesh. In a final phase, the fluid enters vigorous chaotic
5646 stirring in which all symmetries are lost. This is a pattern that then
5647 continues to dominate flow.
5649 These different phases can also be identified if we look at the
5650 maximal velocity as a function of time in the simulation:
5652 <img src=
"https://www.dealii.org/images/steps/developer/step-32.2d.t_vs_vmax.png" alt=
"">
5654 Here, the velocity (shown in centimeters per year) becomes very large,
5655 to the order of several meters per year) at the beginning when the
5656 temperature layering is instable. It then calms down to relatively
5657 small values before picking up again in the chaotic stirring
5658 regime. There, it remains in the range of 10-40 centimeters per year,
5659 quite within the physically expected region.
5662 <a name=
"Resultsfora3dsphericalshelltestcase"></a><h3>Results for a 3
d spherical shell testcase</h3>
5665 3
d computations are very expensive computationally. Furthermore, as
5666 seen above, interesting behavior only starts after quite a long time
5667 requiring more CPU hours than is available on a typical
5668 cluster. Consequently, rather than showing a complete simulation here,
5669 let us simply show a couple of pictures we have obtained using the
5670 successor to this program, called <i>ASPECT</i> (short for <i>Advanced
5671 %
Solver for Problems in Earth
's ConvecTion</i>), that is being
5672 developed independently of deal.II and that already incorporates some
5673 of the extensions discussed below. The following two pictures show
5674 isocontours of the temperature and the partition of the domain (along
5675 with the mesh) onto 512 processors:
5678 <img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.solution.png" alt="">
5680 <img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.partition.png" alt="">
5684 <a name="extensions"></a>
5685 <a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
5688 There are many directions in which this program could be extended. As
5689 mentioned at the end of the introduction, most of these are under active
5690 development in the <i>ASPECT</i> (short for <i>Advanced %Solver for Problems
5691 in Earth's ConvecTion</i>) code at the time this tutorial program is being
5692 finished. Specifically, the following are certainly topics that
one should
5693 address to make the program more useful:
5696 <li> <
b>Adiabatic heating/cooling:</
b>
5697 The temperature field we get in our simulations after a while
5698 is mostly constant with boundary layers at the inner and outer
5699 boundary, and streamers of cold and hot material mixing
5700 everything. Yet, this doesn
't match our expectation that things
5701 closer to the earth core should be hotter than closer to the
5702 surface. The reason is that the energy equation we have used does
5703 not include a term that describes adiabatic cooling and heating:
5704 rock, like gas, heats up as you compress it. Consequently, material
5705 that rises up cools adiabatically, and cold material that sinks down
5706 heats adiabatically. The correct temperature equation would
5707 therefore look somewhat like this:
5711 \nabla \cdot \kappa \nabla T &=& \gamma + \tau\frac{Dp}{Dt},
5713 or, expanding the advected derivative @f$\frac{D}{Dt} =
5714 \frac{\partial}{\partial t} + \mathbf u \cdot \nabla@f$:
5716 \frac{\partial T}{\partial t}
5718 {\mathbf u} \cdot \nabla T
5720 \nabla \cdot \kappa \nabla T &=& \gamma +
5721 \tau\left\{\frac{\partial
5722 p}{\partial t} + \mathbf u \cdot \nabla p \right\}.
5724 In other words, as pressure increases in a rock volume
5725 (@f$\frac{Dp}{Dt}>0@f$) we get an additional heat source, and vice
5728 The time derivative of the pressure is a bit awkward to
5729 implement. If necessary, one could approximate using the fact
5730 outlined in the introduction that the pressure can be decomposed
5731 into a dynamic component due to temperature differences and the
5732 resulting flow, and a static component that results solely from the
5733 static pressure of the overlying rock. Since the latter is much
5734 bigger, one may approximate @f$p\approx p_{\text{static}}=-\rho_{\text{ref}}
5735 [1+\beta T_{\text{ref}}] \varphi@f$, and consequently
5736 @f$\frac{Dp}{Dt} \approx \left\{- \mathbf u \cdot \nabla \rho_{\text{ref}}
5737 [1+\beta T_{\text{ref}}]\varphi\right\} = \rho_{\text{ref}}
5738 [1+\beta T_{\text{ref}}] \mathbf u \cdot \mathbf g@f$.
5739 In other words, if the fluid is moving in the direction of gravity
5740 (downward) it will be compressed and because in that case @f$\mathbf u
5741 \cdot \mathbf g > 0@f$ we get a positive heat source. Conversely, the
5742 fluid will cool down if it moves against the direction of gravity.
5744 <li> <b>Compressibility:</b>
5745 As already hinted at in the temperature model above,
5746 mantle rocks are not incompressible. Rather, given the enormous pressures in
5747 the earth mantle (at the core-mantle boundary, the pressure is approximately
5748 140 GPa, equivalent to 1,400,000 times atmospheric pressure), rock actually
5749 does compress to something around 1.5 times the density it would have
5750 at surface pressure. Modeling this presents any number of
5751 difficulties. Primarily, the mass conservation equation is no longer
5752 @f$\textrm{div}\;\mathbf u=0@f$ but should read
5753 @f$\textrm{div}(\rho\mathbf u)=0@f$ where the density @f$\rho@f$ is now no longer
5754 spatially constant but depends on temperature and pressure. A consequence is
5755 that the model is now no longer linear; a linearized version of the Stokes
5756 equation is also no longer symmetric requiring us to rethink preconditioners
5757 and, possibly, even the discretization. We won't go into detail here as to
5758 how this can be resolved.
5760 <li> <
b>Nonlinear material models:</
b> As already hinted at in various places,
5761 material parameters such as the density, the viscosity, and the various
5762 thermal parameters are not constant throughout the earth mantle. Rather,
5763 they nonlinearly depend on the pressure and temperature, and in the case of
5764 the viscosity on the strain rate @f$\varepsilon(\mathbf u)@f$. For complicated
5765 models, the only way to solve such models accurately may be to actually
5766 iterate this dependence out in each time step, rather than simply freezing
5767 coefficients at values extrapolated from the previous time step(s).
5769 <li> <
b>Checkpoint/restart:</
b> Running this program in 2
d on a number of
5770 processors allows solving realistic models in a day or two. However, in 3
d,
5771 compute times are so large that
one runs into two typical problems: (i) On
5772 most compute clusters, the queuing system limits
run times for individual
5773 jobs are to 2 or 3 days; (ii) losing the results of a computation due to
5774 hardware failures, misconfigurations, or power outages is a shame when
5775 running on hundreds of processors
for a couple of days. Both of these
5776 problems can be addressed by periodically saving the state of the program
5777 and,
if necessary, restarting the program at
this point. This technique is
5778 commonly called <i>checkpoint/restart</i> and it requires that the entire
5779 state of the program is written to a permanent storage location (
e.g. a hard
5780 drive). Given the complexity of the data structures of
this program,
this is
5781 not entirely trivial (it may also involve writing gigabytes or more of
5782 data), but it can be made easier by realizing that
one can save the state
5783 between two time steps where it essentially only consists of the mesh and
5784 solution vectors; during restart
one would then
first re-enumerate degrees
5785 of freedom in the same way as done before and then re-
assemble
5786 matrices. Nevertheless, given the distributed nature of the data structures
5787 involved here, saving and restoring the state of a program is not
5788 trivial. An additional complexity is introduced by the fact that
one may
5789 want to change the number of processors between runs,
for example because
5790 one may wish to
continue computing on a mesh that is finer than the
one used
5791 to precompute a starting temperature field at an intermediate time.
5793 <li> <
b>Predictive postprocessing:</
b> The
point of computations like
this is
5794 not simply to solve the equations. Rather, it is typically the exploration
5795 of different physical models and their comparison with things that we can
5796 measure at the earth surface, in order to find which models are realistic
5797 and which are contradicted by reality. To
this end, we need to compute
5798 quantities from our solution vectors that are related to what we can
5799 observe. Among these are,
for example, heatfluxes at the surface of the
5800 earth, as well as seismic velocities throughout the mantle as these affect
5801 earthquake waves that are recorded by seismographs.
5803 <li> <
b>Better refinement criteria:</
b> As can be seen above
for the
5804 3
d case, the mesh in 3
d is primarily refined along the inner
5805 boundary. This is because the boundary layer there is stronger than
5806 any other transition in the domain, leading us to
refine there almost
5807 exclusively and basically not at all following the plumes. One
5808 certainly needs better refinement criteria to track the parts of the
5809 solution we are really interested in better than the criterion used
5815 There are many other ways to extend the current program. However, rather than
5816 discussing them here, let us
point to the much larger open
5817 source code ASPECT (see https:
5818 further development of @ref step_32
"step-32" and that already includes many such possible
5822 <a name=
"PlainProg"></a>
5823 <h1> The plain program</h1>
5824 @include
"step-32.cc"