1514 *
constexpr double kappa = 1
e-6;
1515 *
constexpr double reference_density = 3300;
1516 *
constexpr double reference_temperature = 293;
1517 *
constexpr double expansion_coefficient = 2
e-5;
1518 *
constexpr double specific_heat = 1250;
1519 *
constexpr double radiogenic_heating = 7.4e-12;
1522 *
constexpr double R0 = 6371000. - 2890000.;
1523 *
constexpr double R1 = 6371000. - 35000.;
1525 *
constexpr double T0 = 4000 + 273;
1526 *
constexpr double T1 = 700 + 273;
1531 * The next
set of definitions are
for functions that encode the density
1532 * as a function of temperature, the gravity vector, and the
initial
1533 *
values for the temperature. Again, all of these (along with the
values
1534 * they compute) are discussed in the introduction:
1537 *
double density(
const double temperature)
1540 * reference_density *
1541 * (1 - expansion_coefficient * (temperature - reference_temperature)));
1545 *
template <
int dim>
1548 *
const double r = p.
norm();
1549 *
return -(1.245e-6 * r + 7.714e13 / r / r) * p / r;
1554 *
template <
int dim>
1555 *
class TemperatureInitialValues :
public Function<dim>
1558 * TemperatureInitialValues()
1563 *
const unsigned int component = 0)
const override;
1571 *
template <
int dim>
1572 *
double TemperatureInitialValues<dim>::value(
const Point<dim> &p,
1573 *
const unsigned int)
const
1575 *
const double r = p.norm();
1576 *
const double h = R1 - R0;
1578 *
const double s = (r - R0) / h;
1582 *
const double tau = s + 0.2 * s * (1 - s) *
std::sin(6 * phi) * q;
1584 *
return T0 * (1.0 - tau) + T1 * tau;
1588 *
template <
int dim>
1590 * TemperatureInitialValues<dim>::vector_value(
const Point<dim> &p,
1593 *
for (
unsigned int c = 0; c < this->n_components; ++c)
1594 *
values(c) = TemperatureInitialValues<dim>::value(p, c);
1600 * As mentioned in the introduction we need to rescale the pressure to
1601 * avoid the relative ill-conditioning of the momentum and mass
1602 * conservation equations. The scaling factor is @f$\frac{\eta}{
L}@f$ where
1603 * @f$L@f$ was a typical length
scale. By experimenting it turns out that a
1604 * good length
scale is the
diameter of plumes, which is around 10 km:
1607 *
constexpr double pressure_scaling = eta / 10000;
1611 * The
final number in
this namespace is a constant that denotes the
1612 * number of seconds per (average, tropical) year. We use
this only when
1613 * generating screen output: internally, all computations of
this program
1614 * happen in SI units (kilogram, meter, seconds) but writing geological
1615 * times in seconds yields
numbers that
one can
't relate to reality, and
1616 * so we convert to years using the factor defined here:
1619 * const double year_in_seconds = 60 * 60 * 24 * 365.2425;
1621 * } // namespace EquationData
1628 * <a name="PreconditioningtheStokessystem"></a>
1629 * <h3>Preconditioning the Stokes system</h3>
1633 * This namespace implements the preconditioner. As discussed in the
1634 * introduction, this preconditioner differs in a number of key portions
1635 * from the one used in @ref step_31 "step-31". Specifically, it is a right preconditioner,
1636 * implementing the matrix
1638 * \left(\begin{array}{cc}A^{-1} & B^T
1640 * \end{array}\right)
1642 * where the two inverse matrix operations
1643 * are approximated by linear solvers or, if the right flag is given to the
1644 * constructor of this class, by a single AMG V-cycle for the velocity
1645 * block. The three code blocks of the <code>vmult</code> function implement
1646 * the multiplications with the three blocks of this preconditioner matrix
1647 * and should be self explanatory if you have read through @ref step_31 "step-31" or the
1648 * discussion of composing solvers in @ref step_20 "step-20".
1651 * namespace LinearSolvers
1653 * template <class PreconditionerTypeA, class PreconditionerTypeMp>
1654 * class BlockSchurPreconditioner : public Subscriptor
1657 * BlockSchurPreconditioner(const TrilinosWrappers::BlockSparseMatrix &S,
1658 * const TrilinosWrappers::BlockSparseMatrix &Spre,
1659 * const PreconditionerTypeMp &Mppreconditioner,
1660 * const PreconditionerTypeA & Apreconditioner,
1661 * const bool do_solve_A)
1662 * : stokes_matrix(&S)
1663 * , stokes_preconditioner_matrix(&Spre)
1664 * , mp_preconditioner(Mppreconditioner)
1665 * , a_preconditioner(Apreconditioner)
1666 * , do_solve_A(do_solve_A)
1669 * void vmult(TrilinosWrappers::MPI::BlockVector & dst,
1670 * const TrilinosWrappers::MPI::BlockVector &src) const
1672 * TrilinosWrappers::MPI::Vector utmp(src.block(0));
1675 * SolverControl solver_control(5000, 1e-6 * src.block(1).l2_norm());
1677 * SolverCG<TrilinosWrappers::MPI::Vector> solver(solver_control);
1679 * solver.solve(stokes_preconditioner_matrix->block(1, 1),
1682 * mp_preconditioner);
1684 * dst.block(1) *= -1.0;
1688 * stokes_matrix->block(0, 1).vmult(utmp, dst.block(1));
1690 * utmp.add(src.block(0));
1693 * if (do_solve_A == true)
1695 * SolverControl solver_control(5000, utmp.l2_norm() * 1e-2);
1696 * TrilinosWrappers::SolverCG solver(solver_control);
1697 * solver.solve(stokes_matrix->block(0, 0),
1700 * a_preconditioner);
1703 * a_preconditioner.vmult(dst.block(0), utmp);
1707 * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1709 * const SmartPointer<const TrilinosWrappers::BlockSparseMatrix>
1710 * stokes_preconditioner_matrix;
1711 * const PreconditionerTypeMp &mp_preconditioner;
1712 * const PreconditionerTypeA & a_preconditioner;
1713 * const bool do_solve_A;
1715 * } // namespace LinearSolvers
1722 * <a name="Definitionofassemblydatastructures"></a>
1723 * <h3>Definition of assembly data structures</h3>
1727 * As described in the introduction, we will use the WorkStream mechanism
1728 * discussed in the @ref threads module to parallelize operations among the
1729 * processors of a single machine. The WorkStream class requires that data
1730 * is passed around in two kinds of data structures, one for scratch data
1731 * and one to pass data from the assembly function to the function that
1732 * copies local contributions into global objects.
1736 * The following namespace (and the two sub-namespaces) contains a
1737 * collection of data structures that serve this purpose, one pair for each
1738 * of the four operations discussed in the introduction that we will want to
1739 * parallelize. Each assembly routine gets two sets of data: a Scratch array
1740 * that collects all the classes and arrays that are used for the
1741 * calculation of the cell contribution, and a CopyData array that keeps
1742 * local matrices and vectors which will be written into the global
1743 * matrix. Whereas CopyData is a container for the final data that is
1744 * written into the global matrices and vector (and, thus, absolutely
1745 * necessary), the Scratch arrays are merely there for performance reasons
1746 * — it would be much more expensive to set up a FEValues object on
1747 * each cell, than creating it only once and updating some derivative data.
1751 * @ref step_31 "step-31" had four assembly routines: One for the preconditioner matrix of
1752 * the Stokes system, one for the Stokes matrix and right hand side, one for
1753 * the temperature matrices and one for the right hand side of the
1754 * temperature equation. We here organize the scratch arrays and CopyData
1755 * objects for each of those four assembly components using a
1756 * <code>struct</code> environment (since we consider these as temporary
1757 * objects we pass around, rather than classes that implement functionality
1758 * of their own, though this is a more subjective point of view to
1759 * distinguish between <code>struct</code>s and <code>class</code>es).
1763 * Regarding the Scratch objects, each struct is equipped with a constructor
1764 * that creates an @ref FEValues object using the @ref FiniteElement,
1765 * Quadrature, @ref Mapping (which describes the interpolation of curved
1766 * boundaries), and @ref UpdateFlags instances. Moreover, we manually
1767 * implement a copy constructor (since the FEValues class is not copyable by
1768 * itself), and provide some additional vector fields that are used to hold
1769 * intermediate data during the computation of local contributions.
1773 * Let us start with the scratch arrays and, specifically, the one used for
1774 * assembly of the Stokes preconditioner:
1777 * namespace Assembly
1781 * template <int dim>
1782 * struct StokesPreconditioner
1784 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe,
1785 * const Quadrature<dim> & stokes_quadrature,
1786 * const Mapping<dim> & mapping,
1787 * const UpdateFlags update_flags);
1789 * StokesPreconditioner(const StokesPreconditioner &data);
1792 * FEValues<dim> stokes_fe_values;
1794 * std::vector<Tensor<2, dim>> grad_phi_u;
1795 * std::vector<double> phi_p;
1798 * template <int dim>
1799 * StokesPreconditioner<dim>::StokesPreconditioner(
1800 * const FiniteElement<dim> &stokes_fe,
1801 * const Quadrature<dim> & stokes_quadrature,
1802 * const Mapping<dim> & mapping,
1803 * const UpdateFlags update_flags)
1804 * : stokes_fe_values(mapping, stokes_fe, stokes_quadrature, update_flags)
1805 * , grad_phi_u(stokes_fe.n_dofs_per_cell())
1806 * , phi_p(stokes_fe.n_dofs_per_cell())
1811 * template <int dim>
1812 * StokesPreconditioner<dim>::StokesPreconditioner(
1813 * const StokesPreconditioner &scratch)
1814 * : stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
1815 * scratch.stokes_fe_values.get_fe(),
1816 * scratch.stokes_fe_values.get_quadrature(),
1817 * scratch.stokes_fe_values.get_update_flags())
1818 * , grad_phi_u(scratch.grad_phi_u)
1819 * , phi_p(scratch.phi_p)
1826 * The next one is the scratch object used for the assembly of the full
1827 * Stokes system. Observe that we derive the StokesSystem scratch class
1828 * from the StokesPreconditioner class above. We do this because all the
1829 * objects that are necessary for the assembly of the preconditioner are
1830 * also needed for the actual matrix system and right hand side, plus
1831 * some extra data. This makes the program more compact. Note also that
1832 * the assembly of the Stokes system and the temperature right hand side
1833 * further down requires data from temperature and velocity,
1834 * respectively, so we actually need two FEValues objects for those two
1838 * template <int dim>
1839 * struct StokesSystem : public StokesPreconditioner<dim>
1841 * StokesSystem(const FiniteElement<dim> &stokes_fe,
1842 * const Mapping<dim> & mapping,
1843 * const Quadrature<dim> & stokes_quadrature,
1844 * const UpdateFlags stokes_update_flags,
1845 * const FiniteElement<dim> &temperature_fe,
1846 * const UpdateFlags temperature_update_flags);
1848 * StokesSystem(const StokesSystem<dim> &data);
1851 * FEValues<dim> temperature_fe_values;
1853 * std::vector<Tensor<1, dim>> phi_u;
1854 * std::vector<SymmetricTensor<2, dim>> grads_phi_u;
1855 * std::vector<double> div_phi_u;
1857 * std::vector<double> old_temperature_values;
1861 * template <int dim>
1862 * StokesSystem<dim>::StokesSystem(
1863 * const FiniteElement<dim> &stokes_fe,
1864 * const Mapping<dim> & mapping,
1865 * const Quadrature<dim> & stokes_quadrature,
1866 * const UpdateFlags stokes_update_flags,
1867 * const FiniteElement<dim> &temperature_fe,
1868 * const UpdateFlags temperature_update_flags)
1869 * : StokesPreconditioner<dim>(stokes_fe,
1870 * stokes_quadrature,
1872 * stokes_update_flags)
1873 * , temperature_fe_values(mapping,
1875 * stokes_quadrature,
1876 * temperature_update_flags)
1877 * , phi_u(stokes_fe.n_dofs_per_cell())
1878 * , grads_phi_u(stokes_fe.n_dofs_per_cell())
1879 * , div_phi_u(stokes_fe.n_dofs_per_cell())
1880 * , old_temperature_values(stokes_quadrature.size())
1884 * template <int dim>
1885 * StokesSystem<dim>::StokesSystem(const StokesSystem<dim> &scratch)
1886 * : StokesPreconditioner<dim>(scratch)
1887 * , temperature_fe_values(
1888 * scratch.temperature_fe_values.get_mapping(),
1889 * scratch.temperature_fe_values.get_fe(),
1890 * scratch.temperature_fe_values.get_quadrature(),
1891 * scratch.temperature_fe_values.get_update_flags())
1892 * , phi_u(scratch.phi_u)
1893 * , grads_phi_u(scratch.grads_phi_u)
1894 * , div_phi_u(scratch.div_phi_u)
1895 * , old_temperature_values(scratch.old_temperature_values)
1901 * After defining the objects used in the assembly of the Stokes system,
1902 * we do the same for the assembly of the matrices necessary for the
1903 * temperature system. The general structure is very similar:
1906 * template <int dim>
1907 * struct TemperatureMatrix
1909 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe,
1910 * const Mapping<dim> & mapping,
1911 * const Quadrature<dim> & temperature_quadrature);
1913 * TemperatureMatrix(const TemperatureMatrix &data);
1916 * FEValues<dim> temperature_fe_values;
1918 * std::vector<double> phi_T;
1919 * std::vector<Tensor<1, dim>> grad_phi_T;
1923 * template <int dim>
1924 * TemperatureMatrix<dim>::TemperatureMatrix(
1925 * const FiniteElement<dim> &temperature_fe,
1926 * const Mapping<dim> & mapping,
1927 * const Quadrature<dim> & temperature_quadrature)
1928 * : temperature_fe_values(mapping,
1930 * temperature_quadrature,
1931 * update_values | update_gradients |
1932 * update_JxW_values)
1933 * , phi_T(temperature_fe.n_dofs_per_cell())
1934 * , grad_phi_T(temperature_fe.n_dofs_per_cell())
1938 * template <int dim>
1939 * TemperatureMatrix<dim>::TemperatureMatrix(
1940 * const TemperatureMatrix &scratch)
1941 * : temperature_fe_values(
1942 * scratch.temperature_fe_values.get_mapping(),
1943 * scratch.temperature_fe_values.get_fe(),
1944 * scratch.temperature_fe_values.get_quadrature(),
1945 * scratch.temperature_fe_values.get_update_flags())
1946 * , phi_T(scratch.phi_T)
1947 * , grad_phi_T(scratch.grad_phi_T)
1953 * The final scratch object is used in the assembly of the right hand
1954 * side of the temperature system. This object is significantly larger
1955 * than the ones above because a lot more quantities enter the
1956 * computation of the right hand side of the temperature equation. In
1957 * particular, the temperature values and gradients of the previous two
1958 * time steps need to be evaluated at the quadrature points, as well as
1959 * the velocities and the strain rates (i.e. the symmetric gradients of
1960 * the velocity) that enter the right hand side as friction heating
1961 * terms. Despite the number of terms, the following should be rather
1965 * template <int dim>
1966 * struct TemperatureRHS
1968 * TemperatureRHS(const FiniteElement<dim> &temperature_fe,
1969 * const FiniteElement<dim> &stokes_fe,
1970 * const Mapping<dim> & mapping,
1971 * const Quadrature<dim> & quadrature);
1973 * TemperatureRHS(const TemperatureRHS &data);
1976 * FEValues<dim> temperature_fe_values;
1977 * FEValues<dim> stokes_fe_values;
1979 * std::vector<double> phi_T;
1980 * std::vector<Tensor<1, dim>> grad_phi_T;
1982 * std::vector<Tensor<1, dim>> old_velocity_values;
1983 * std::vector<Tensor<1, dim>> old_old_velocity_values;
1985 * std::vector<SymmetricTensor<2, dim>> old_strain_rates;
1986 * std::vector<SymmetricTensor<2, dim>> old_old_strain_rates;
1988 * std::vector<double> old_temperature_values;
1989 * std::vector<double> old_old_temperature_values;
1990 * std::vector<Tensor<1, dim>> old_temperature_grads;
1991 * std::vector<Tensor<1, dim>> old_old_temperature_grads;
1992 * std::vector<double> old_temperature_laplacians;
1993 * std::vector<double> old_old_temperature_laplacians;
1997 * template <int dim>
1998 * TemperatureRHS<dim>::TemperatureRHS(
1999 * const FiniteElement<dim> &temperature_fe,
2000 * const FiniteElement<dim> &stokes_fe,
2001 * const Mapping<dim> & mapping,
2002 * const Quadrature<dim> & quadrature)
2003 * : temperature_fe_values(mapping,
2006 * update_values | update_gradients |
2007 * update_hessians | update_quadrature_points |
2008 * update_JxW_values)
2009 * , stokes_fe_values(mapping,
2012 * update_values | update_gradients)
2013 * , phi_T(temperature_fe.n_dofs_per_cell())
2014 * , grad_phi_T(temperature_fe.n_dofs_per_cell())
2017 * old_velocity_values(quadrature.size())
2018 * , old_old_velocity_values(quadrature.size())
2019 * , old_strain_rates(quadrature.size())
2020 * , old_old_strain_rates(quadrature.size())
2023 * old_temperature_values(quadrature.size())
2024 * , old_old_temperature_values(quadrature.size())
2025 * , old_temperature_grads(quadrature.size())
2026 * , old_old_temperature_grads(quadrature.size())
2027 * , old_temperature_laplacians(quadrature.size())
2028 * , old_old_temperature_laplacians(quadrature.size())
2032 * template <int dim>
2033 * TemperatureRHS<dim>::TemperatureRHS(const TemperatureRHS &scratch)
2034 * : temperature_fe_values(
2035 * scratch.temperature_fe_values.get_mapping(),
2036 * scratch.temperature_fe_values.get_fe(),
2037 * scratch.temperature_fe_values.get_quadrature(),
2038 * scratch.temperature_fe_values.get_update_flags())
2039 * , stokes_fe_values(scratch.stokes_fe_values.get_mapping(),
2040 * scratch.stokes_fe_values.get_fe(),
2041 * scratch.stokes_fe_values.get_quadrature(),
2042 * scratch.stokes_fe_values.get_update_flags())
2043 * , phi_T(scratch.phi_T)
2044 * , grad_phi_T(scratch.grad_phi_T)
2047 * old_velocity_values(scratch.old_velocity_values)
2048 * , old_old_velocity_values(scratch.old_old_velocity_values)
2049 * , old_strain_rates(scratch.old_strain_rates)
2050 * , old_old_strain_rates(scratch.old_old_strain_rates)
2053 * old_temperature_values(scratch.old_temperature_values)
2054 * , old_old_temperature_values(scratch.old_old_temperature_values)
2055 * , old_temperature_grads(scratch.old_temperature_grads)
2056 * , old_old_temperature_grads(scratch.old_old_temperature_grads)
2057 * , old_temperature_laplacians(scratch.old_temperature_laplacians)
2058 * , old_old_temperature_laplacians(scratch.old_old_temperature_laplacians)
2060 * } // namespace Scratch
2065 * The CopyData objects are even simpler than the Scratch objects as all
2066 * they have to do is to store the results of local computations until
2067 * they can be copied into the global matrix or vector objects. These
2068 * structures therefore only need to provide a constructor, a copy
2069 * operation, and some arrays for local matrix, local vectors and the
2070 * relation between local and global degrees of freedom (a.k.a.
2071 * <code>local_dof_indices</code>). Again, we have one such structure for
2072 * each of the four operations we will parallelize using the WorkStream
2076 * namespace CopyData
2078 * template <int dim>
2079 * struct StokesPreconditioner
2081 * StokesPreconditioner(const FiniteElement<dim> &stokes_fe);
2082 * StokesPreconditioner(const StokesPreconditioner &data);
2083 * StokesPreconditioner &operator=(const StokesPreconditioner &) = default;
2085 * FullMatrix<double> local_matrix;
2086 * std::vector<types::global_dof_index> local_dof_indices;
2089 * template <int dim>
2090 * StokesPreconditioner<dim>::StokesPreconditioner(
2091 * const FiniteElement<dim> &stokes_fe)
2092 * : local_matrix(stokes_fe.n_dofs_per_cell(), stokes_fe.n_dofs_per_cell())
2093 * , local_dof_indices(stokes_fe.n_dofs_per_cell())
2096 * template <int dim>
2097 * StokesPreconditioner<dim>::StokesPreconditioner(
2098 * const StokesPreconditioner &data)
2099 * : local_matrix(data.local_matrix)
2100 * , local_dof_indices(data.local_dof_indices)
2105 * template <int dim>
2106 * struct StokesSystem : public StokesPreconditioner<dim>
2108 * StokesSystem(const FiniteElement<dim> &stokes_fe);
2110 * Vector<double> local_rhs;
2113 * template <int dim>
2114 * StokesSystem<dim>::StokesSystem(const FiniteElement<dim> &stokes_fe)
2115 * : StokesPreconditioner<dim>(stokes_fe)
2116 * , local_rhs(stokes_fe.n_dofs_per_cell())
2121 * template <int dim>
2122 * struct TemperatureMatrix
2124 * TemperatureMatrix(const FiniteElement<dim> &temperature_fe);
2126 * FullMatrix<double> local_mass_matrix;
2127 * FullMatrix<double> local_stiffness_matrix;
2128 * std::vector<types::global_dof_index> local_dof_indices;
2131 * template <int dim>
2132 * TemperatureMatrix<dim>::TemperatureMatrix(
2133 * const FiniteElement<dim> &temperature_fe)
2134 * : local_mass_matrix(temperature_fe.n_dofs_per_cell(),
2135 * temperature_fe.n_dofs_per_cell())
2136 * , local_stiffness_matrix(temperature_fe.n_dofs_per_cell(),
2137 * temperature_fe.n_dofs_per_cell())
2138 * , local_dof_indices(temperature_fe.n_dofs_per_cell())
2143 * template <int dim>
2144 * struct TemperatureRHS
2146 * TemperatureRHS(const FiniteElement<dim> &temperature_fe);
2148 * Vector<double> local_rhs;
2149 * std::vector<types::global_dof_index> local_dof_indices;
2150 * FullMatrix<double> matrix_for_bc;
2153 * template <int dim>
2154 * TemperatureRHS<dim>::TemperatureRHS(
2155 * const FiniteElement<dim> &temperature_fe)
2156 * : local_rhs(temperature_fe.n_dofs_per_cell())
2157 * , local_dof_indices(temperature_fe.n_dofs_per_cell())
2158 * , matrix_for_bc(temperature_fe.n_dofs_per_cell(),
2159 * temperature_fe.n_dofs_per_cell())
2161 * } // namespace CopyData
2162 * } // namespace Assembly
2169 * <a name="ThecodeBoussinesqFlowProblemcodeclasstemplate"></a>
2170 * <h3>The <code>BoussinesqFlowProblem</code> class template</h3>
2174 * This is the declaration of the main class. It is very similar to @ref step_31 "step-31"
2175 * but there are a number differences we will comment on below.
2179 * The top of the class is essentially the same as in @ref step_31 "step-31", listing the
2180 * public methods and a set of private functions that do the heavy
2181 * lifting. Compared to @ref step_31 "step-31" there are only two additions to this
2182 * section: the function <code>get_cfl_number()</code> that computes the
2183 * maximum CFL number over all cells which we then compute the global time
2184 * step from, and the function <code>get_entropy_variation()</code> that is
2185 * used in the computation of the entropy stabilization. It is akin to the
2186 * <code>get_extrapolated_temperature_range()</code> we have used in @ref step_31 "step-31"
2187 * for this purpose, but works on the entropy instead of the temperature
2191 * template <int dim>
2192 * class BoussinesqFlowProblem
2195 * struct Parameters;
2196 * BoussinesqFlowProblem(Parameters ¶meters);
2200 * void setup_dofs();
2201 * void assemble_stokes_preconditioner();
2202 * void build_stokes_preconditioner();
2203 * void assemble_stokes_system();
2204 * void assemble_temperature_matrix();
2205 * void assemble_temperature_system(const double maximal_velocity);
2206 * double get_maximal_velocity() const;
2207 * double get_cfl_number() const;
2208 * double get_entropy_variation(const double average_temperature) const;
2209 * std::pair<double, double> get_extrapolated_temperature_range() const;
2211 * void output_results();
2212 * void refine_mesh(const unsigned int max_grid_level);
2214 * double compute_viscosity(
2215 * const std::vector<double> & old_temperature,
2216 * const std::vector<double> & old_old_temperature,
2217 * const std::vector<Tensor<1, dim>> &old_temperature_grads,
2218 * const std::vector<Tensor<1, dim>> &old_old_temperature_grads,
2219 * const std::vector<double> & old_temperature_laplacians,
2220 * const std::vector<double> & old_old_temperature_laplacians,
2221 * const std::vector<Tensor<1, dim>> &old_velocity_values,
2222 * const std::vector<Tensor<1, dim>> &old_old_velocity_values,
2223 * const std::vector<SymmetricTensor<2, dim>> &old_strain_rates,
2224 * const std::vector<SymmetricTensor<2, dim>> &old_old_strain_rates,
2225 * const double global_u_infty,
2226 * const double global_T_variation,
2227 * const double average_temperature,
2228 * const double global_entropy_variation,
2229 * const double cell_diameter) const;
2234 * The first significant new component is the definition of a struct for
2235 * the parameters according to the discussion in the introduction. This
2236 * structure is initialized by reading from a parameter file during
2237 * construction of this object.
2242 * Parameters(const std::string ¶meter_filename);
2244 * static void declare_parameters(ParameterHandler &prm);
2245 * void parse_parameters(ParameterHandler &prm);
2249 * unsigned int initial_global_refinement;
2250 * unsigned int initial_adaptive_refinement;
2252 * bool generate_graphical_output;
2253 * unsigned int graphical_output_interval;
2255 * unsigned int adaptive_refinement_interval;
2257 * double stabilization_alpha;
2258 * double stabilization_c_R;
2259 * double stabilization_beta;
2261 * unsigned int stokes_velocity_degree;
2262 * bool use_locally_conservative_discretization;
2264 * unsigned int temperature_degree;
2268 * Parameters ¶meters;
2272 * The <code>pcout</code> (for <i>%parallel <code>std::cout</code></i>)
2273 * object is used to simplify writing output: each MPI process can use
2274 * this to generate output as usual, but since each of these processes
2275 * will (hopefully) produce the same output it will just be replicated
2276 * many times over; with the ConditionalOStream class, only the output
2277 * generated by one MPI process will actually be printed to screen,
2278 * whereas the output by all the other threads will simply be forgotten.
2281 * ConditionalOStream pcout;
2285 * The following member variables will then again be similar to those in
2286 * @ref step_31 "step-31" (and to other tutorial programs). As mentioned in the
2287 * introduction, we fully distribute computations, so we will have to use
2288 * the parallel::distributed::Triangulation class (see @ref step_40 "step-40") but the
2289 * remainder of these variables is rather standard with two exceptions:
2293 * - The <code>mapping</code> variable is used to denote a higher-order
2294 * polynomial mapping. As mentioned in the introduction, we use this
2295 * mapping when forming integrals through quadrature for all cells that
2296 * are adjacent to either the inner or outer boundaries of our domain
2297 * where the boundary is curved.
2301 * - In a bit of naming confusion, you will notice below that some of the
2302 * variables from namespace TrilinosWrappers are taken from namespace
2303 * TrilinosWrappers::MPI (such as the right hand side vectors) whereas
2304 * others are not (such as the various matrices). This is due to legacy
2305 * reasons. We will frequently have to query velocities
2306 * and temperatures at arbitrary quadrature points; consequently, rather
2307 * than importing ghost information of a vector whenever we need access
2308 * to degrees of freedom that are relevant locally but owned by another
2309 * processor, we solve linear systems in %parallel but then immediately
2310 * initialize a vector including ghost entries of the solution for further
2311 * processing. The various <code>*_solution</code> vectors are therefore
2312 * filled immediately after solving their respective linear system in
2313 * %parallel and will always contain values for all
2314 * @ref GlossLocallyRelevantDof "locally relevant degrees of freedom";
2315 * the fully distributed vectors that we obtain from the solution process
2316 * and that only ever contain the
2317 * @ref GlossLocallyOwnedDof "locally owned degrees of freedom" are
2318 * destroyed immediately after the solution process and after we have
2319 * copied the relevant values into the member variable vectors.
2322 * parallel::distributed::Triangulation<dim> triangulation;
2323 * double global_Omega_diameter;
2325 * const MappingQ<dim> mapping;
2327 * const FESystem<dim> stokes_fe;
2328 * DoFHandler<dim> stokes_dof_handler;
2329 * AffineConstraints<double> stokes_constraints;
2331 * TrilinosWrappers::BlockSparseMatrix stokes_matrix;
2332 * TrilinosWrappers::BlockSparseMatrix stokes_preconditioner_matrix;
2334 * TrilinosWrappers::MPI::BlockVector stokes_solution;
2335 * TrilinosWrappers::MPI::BlockVector old_stokes_solution;
2336 * TrilinosWrappers::MPI::BlockVector stokes_rhs;
2339 * FE_Q<dim> temperature_fe;
2340 * DoFHandler<dim> temperature_dof_handler;
2341 * AffineConstraints<double> temperature_constraints;
2343 * TrilinosWrappers::SparseMatrix temperature_mass_matrix;
2344 * TrilinosWrappers::SparseMatrix temperature_stiffness_matrix;
2345 * TrilinosWrappers::SparseMatrix temperature_matrix;
2347 * TrilinosWrappers::MPI::Vector temperature_solution;
2348 * TrilinosWrappers::MPI::Vector old_temperature_solution;
2349 * TrilinosWrappers::MPI::Vector old_old_temperature_solution;
2350 * TrilinosWrappers::MPI::Vector temperature_rhs;
2354 * double old_time_step;
2355 * unsigned int timestep_number;
2357 * std::shared_ptr<TrilinosWrappers::PreconditionAMG> Amg_preconditioner;
2358 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> Mp_preconditioner;
2359 * std::shared_ptr<TrilinosWrappers::PreconditionJacobi> T_preconditioner;
2361 * bool rebuild_stokes_matrix;
2362 * bool rebuild_stokes_preconditioner;
2363 * bool rebuild_temperature_matrices;
2364 * bool rebuild_temperature_preconditioner;
2368 * The next member variable, <code>computing_timer</code> is used to
2369 * conveniently account for compute time spent in certain "sections" of
2370 * the code that are repeatedly entered. For example, we will enter (and
2371 * leave) sections for Stokes matrix assembly and would like to accumulate
2372 * the run time spent in this section over all time steps. Every so many
2373 * time steps as well as at the end of the program (through the destructor
2374 * of the TimerOutput class) we will then produce a nice summary of the
2375 * times spent in the different sections into which we categorize the
2376 * run-time of this program.
2379 * TimerOutput computing_timer;
2383 * After these member variables we have a number of auxiliary functions
2384 * that have been broken out of the ones listed above. Specifically, there
2385 * are first three functions that we call from <code>setup_dofs</code> and
2386 * then the ones that do the assembling of linear systems:
2389 * void setup_stokes_matrix(
2390 * const std::vector<IndexSet> &stokes_partitioning,
2391 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2392 * void setup_stokes_preconditioner(
2393 * const std::vector<IndexSet> &stokes_partitioning,
2394 * const std::vector<IndexSet> &stokes_relevant_partitioning);
2395 * void setup_temperature_matrices(
2396 * const IndexSet &temperature_partitioning,
2397 * const IndexSet &temperature_relevant_partitioning);
2402 * Following the @ref MTWorkStream "task-based parallelization" paradigm,
2403 * we split all the assembly routines into two parts: a first part that
2404 * can do all the calculations on a certain cell without taking care of
2405 * other threads, and a second part (which is writing the local data into
2406 * the global matrices and vectors) which can be entered by only one
2407 * thread at a time. In order to implement that, we provide functions for
2408 * each of those two steps for all the four assembly routines that we use
2409 * in this program. The following eight functions do exactly this:
2412 * void local_assemble_stokes_preconditioner(
2413 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2414 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
2415 * Assembly::CopyData::StokesPreconditioner<dim> & data);
2417 * void copy_local_to_global_stokes_preconditioner(
2418 * const Assembly::CopyData::StokesPreconditioner<dim> &data);
2421 * void local_assemble_stokes_system(
2422 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2423 * Assembly::Scratch::StokesSystem<dim> & scratch,
2424 * Assembly::CopyData::StokesSystem<dim> & data);
2426 * void copy_local_to_global_stokes_system(
2427 * const Assembly::CopyData::StokesSystem<dim> &data);
2430 * void local_assemble_temperature_matrix(
2431 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2432 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
2433 * Assembly::CopyData::TemperatureMatrix<dim> & data);
2435 * void copy_local_to_global_temperature_matrix(
2436 * const Assembly::CopyData::TemperatureMatrix<dim> &data);
2440 * void local_assemble_temperature_rhs(
2441 * const std::pair<double, double> global_T_range,
2442 * const double global_max_velocity,
2443 * const double global_entropy_variation,
2444 * const typename DoFHandler<dim>::active_cell_iterator &cell,
2445 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
2446 * Assembly::CopyData::TemperatureRHS<dim> & data);
2448 * void copy_local_to_global_temperature_rhs(
2449 * const Assembly::CopyData::TemperatureRHS<dim> &data);
2453 * Finally, we forward declare a member class that we will define later on
2454 * and that will be used to compute a number of quantities from our
2455 * solution vectors that we'd like to put into the output files
for
2459 *
class Postprocessor;
2466 * <a name=
"BoussinesqFlowProblemclassimplementation"></a>
2467 * <h3>BoussinesqFlowProblem
class implementation</h3>
2472 * <a name=
"BoussinesqFlowProblemParameters"></a>
2473 * <h4>BoussinesqFlowProblem::Parameters</h4>
2477 * Here comes the definition of the parameters
for the Stokes problem. We
2478 * allow to
set the
end time
for the simulation, the
level of refinements
2479 * (both global and adaptive, which in the
sum specify what maximum
level
2480 * the cells are allowed to have), and the interval between refinements in
2481 * the time stepping.
2485 * Then, we let the user specify constants
for the stabilization parameters
2486 * (as discussed in the introduction), the polynomial degree
for the Stokes
2487 * velocity space, whether to use the locally conservative discretization
2488 * based on
FE_DGP elements
for the pressure or not (
FE_Q elements
for
2489 * pressure), and the polynomial degree
for the temperature interpolation.
2493 * The constructor checks
for a
valid input file (
if not, a file with
2494 *
default parameters
for the quantities is written), and eventually parses
2498 *
template <
int dim>
2499 * BoussinesqFlowProblem<dim>::Parameters::Parameters(
2500 *
const std::string ¶meter_filename)
2502 * , initial_global_refinement(2)
2503 * , initial_adaptive_refinement(2)
2504 * , adaptive_refinement_interval(10)
2505 * , stabilization_alpha(2)
2506 * , stabilization_c_R(0.11)
2507 * , stabilization_beta(0.078)
2508 * , stokes_velocity_degree(2)
2509 * , use_locally_conservative_discretization(
true)
2510 * , temperature_degree(2)
2513 * BoussinesqFlowProblem<dim>::Parameters::declare_parameters(prm);
2515 * std::ifstream parameter_file(parameter_filename);
2517 *
if (!parameter_file)
2519 * parameter_file.close();
2521 * std::ofstream parameter_out(parameter_filename);
2527 *
"Input parameter file <" + parameter_filename +
2528 *
"> not found. Creating a template file of the same name."));
2532 * parse_parameters(prm);
2539 * Next we have a function that declares the parameters that we expect in
2540 * the input file, together with their data
types,
default values and a
2544 *
template <
int dim>
2545 *
void BoussinesqFlowProblem<dim>::Parameters::declare_parameters(
2551 *
"The end time of the simulation in years.");
2555 *
"The number of global refinement steps performed on "
2556 *
"the initial coarse mesh, before the problem is first "
2561 *
"The number of adaptive refinement steps performed after "
2562 *
"initial global refinement.");
2566 *
"The number of time steps after which the mesh is to be "
2567 *
"adapted based on computed error indicators.");
2571 *
"Whether graphical output is to be generated or not. "
2572 *
"You may not want to get graphical output if the number "
2573 *
"of processors is large.");
2577 *
"The number of time steps between each generation of "
2578 *
"graphical output files.");
2585 *
"The exponent in the entropy viscosity stabilization.");
2589 *
"The c_R factor in the entropy viscosity "
2590 *
"stabilization.");
2594 *
"The beta factor in the artificial viscosity "
2595 *
"stabilization. An appropriate value for 2d is 0.052 "
2596 *
"and 0.078 for 3d.");
2603 *
"Stokes velocity polynomial degree",
2606 *
"The polynomial degree to use for the velocity variables "
2607 *
"in the Stokes system.");
2609 *
"Temperature polynomial degree",
2612 *
"The polynomial degree to use for the temperature variable.");
2614 *
"Use locally conservative discretization",
2617 *
"Whether to use a Stokes discretization that is locally "
2618 *
"conservative at the expense of a larger number of degrees "
2619 *
"of freedom, or to go with a cheaper discretization "
2620 *
"that does not locally conserve mass (although it is "
2621 *
"globally conservative.");
2630 * And then we need a function that reads the contents of the
2632 * results into variables that store the
values of the parameters we have
2633 * previously declared:
2636 *
template <
int dim>
2637 *
void BoussinesqFlowProblem<dim>::Parameters::parse_parameters(
2641 * initial_global_refinement = prm.
get_integer(
"Initial global refinement");
2642 * initial_adaptive_refinement =
2645 * adaptive_refinement_interval =
2646 * prm.
get_integer(
"Time steps between mesh refinement");
2648 * generate_graphical_output = prm.
get_bool(
"Generate graphical output");
2649 * graphical_output_interval =
2650 * prm.
get_integer(
"Time steps between graphical output");
2654 * stabilization_alpha = prm.
get_double(
"alpha");
2656 * stabilization_beta = prm.
get_double(
"beta");
2662 * stokes_velocity_degree =
2663 * prm.
get_integer(
"Stokes velocity polynomial degree");
2664 * temperature_degree = prm.
get_integer(
"Temperature polynomial degree");
2665 * use_locally_conservative_discretization =
2666 * prm.
get_bool(
"Use locally conservative discretization");
2676 * <a name=
"BoussinesqFlowProblemBoussinesqFlowProblem"></a>
2677 * <h4>BoussinesqFlowProblem::BoussinesqFlowProblem</h4>
2681 * The constructor of the problem is very similar to the constructor in
2682 * @ref step_31
"step-31". What is different is the %
parallel communication: Trilinos uses
2683 * a message passing interface (MPI)
for data distribution. When entering
2684 * the BoussinesqFlowProblem
class, we have to decide how the parallelization
2685 * is to be done. We choose a rather simple strategy and let all processors
2686 * that are running the program work together, specified by the communicator
2687 * <code>MPI_COMM_WORLD</code>. Next, we create the output stream (as we
2688 * already did in @ref step_18
"step-18") that only generates output on the
first MPI
2689 * process and is completely forgetful on all others. The implementation of
2690 *
this idea is to check the process number when <code>pcout</code> gets a
2691 *
true argument, and it uses the <code>std::cout</code> stream
for
2692 * output. If we are
one processor five,
for instance, then we will give a
2693 * <code>
false</code> argument to <code>pcout</code>, which means that the
2694 * output of that processor will not be printed. With the exception of the
2695 * mapping object (
for which we use polynomials of degree 4) all but the
2696 *
final member variable are exactly the same as in @ref step_31
"step-31".
2700 * This
final object, the
TimerOutput object, is then told to restrict
2701 * output to the <code>pcout</code> stream (processor 0), and then we
2702 * specify that we want to get a summary table at the
end of the program
2703 * which shows us wallclock times (as opposed to CPU times). We will
2704 * manually also request intermediate summaries every so many time steps in
2705 * the <code>
run()</code> function below.
2708 *
template <
int dim>
2709 * BoussinesqFlowProblem<dim>::BoussinesqFlowProblem(Parameters ¶meters_)
2710 * : parameters(parameters_)
2720 * global_Omega_diameter(0.)
2726 * stokes_fe(
FE_Q<dim>(parameters.stokes_velocity_degree),
2728 * (parameters.use_locally_conservative_discretization ?
2730 *
FE_DGP<dim>(parameters.stokes_velocity_degree - 1)) :
2732 *
FE_Q<dim>(parameters.stokes_velocity_degree - 1))),
2739 * temperature_fe(parameters.temperature_degree)
2744 * , old_time_step(0)
2745 * , timestep_number(0)
2746 * , rebuild_stokes_matrix(
true)
2747 * , rebuild_stokes_preconditioner(
true)
2748 * , rebuild_temperature_matrices(
true)
2749 * , rebuild_temperature_preconditioner(
true)
2752 * computing_timer(MPI_COMM_WORLD,
2763 * <a name=
"TheBoussinesqFlowProblemhelperfunctions"></a>
2764 * <h4>The BoussinesqFlowProblem helper
functions</h4>
2766 * <a name=
"BoussinesqFlowProblemget_maximal_velocity"></a>
2767 * <h5>BoussinesqFlowProblem::get_maximal_velocity</h5>
2771 * Except
for two small details, the function to compute the global maximum
2772 * of the velocity is the same as in @ref step_31
"step-31". The
first detail is actually
2773 * common to all
functions that implement loops over all cells in the
2775 * on a chunk of cells since each processor only has a certain part of the
2776 * entire
triangulation. This chunk of cells that we want to work on is
2777 * identified via a so-called <code>
subdomain_id</code>, as we also did in
2778 * @ref step_18
"step-18". All we need to change is hence to perform the cell-related
2779 * operations only on cells that are owned by the current process (as
2780 * opposed to ghost or artificial cells), i.e.
for which the subdomain
id
2781 * equals the number of the process ID. Since
this is a commonly used
2782 * operation, there is a shortcut
for this operation: we can ask whether the
2783 * cell is owned by the current processor
using
2784 * <code>cell-@>is_locally_owned()</code>.
2788 * The
second difference is the way we calculate the maximum
value. Before,
2789 * we could simply have a <code>
double</code> variable that we checked
2790 * against on each quadrature
point for each cell. Now, we have to be a bit
2791 * more careful since each processor only operates on a subset of
2792 * cells. What we
do is to
first let each processor calculate the maximum
2793 * among its cells, and then
do a global communication operation
2795 * all the maximum
values of the individual processors. MPI provides such a
2796 *
call, but it
's even simpler to use the respective function in namespace
2797 * Utilities::MPI using the MPI communicator object since that will do the
2798 * right thing even if we work without MPI and on a single machine only. The
2799 * call to <code>Utilities::MPI::max</code> needs two arguments, namely the
2800 * local maximum (input) and the MPI communicator, which is MPI_COMM_WORLD
2804 * template <int dim>
2805 * double BoussinesqFlowProblem<dim>::get_maximal_velocity() const
2807 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
2808 * parameters.stokes_velocity_degree);
2809 * const unsigned int n_q_points = quadrature_formula.size();
2811 * FEValues<dim> fe_values(mapping,
2813 * quadrature_formula,
2815 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2817 * const FEValuesExtractors::Vector velocities(0);
2819 * double max_local_velocity = 0;
2821 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2822 * if (cell->is_locally_owned())
2824 * fe_values.reinit(cell);
2825 * fe_values[velocities].get_function_values(stokes_solution,
2828 * for (unsigned int q = 0; q < n_q_points; ++q)
2829 * max_local_velocity =
2830 * std::max(max_local_velocity, velocity_values[q].norm());
2833 * return Utilities::MPI::max(max_local_velocity, MPI_COMM_WORLD);
2840 * <a name="BoussinesqFlowProblemget_cfl_number"></a>
2841 * <h5>BoussinesqFlowProblem::get_cfl_number</h5>
2845 * The next function does something similar, but we now compute the CFL
2846 * number, i.e., maximal velocity on a cell divided by the cell
2847 * diameter. This number is necessary to determine the time step size, as we
2848 * use a semi-explicit time stepping scheme for the temperature equation
2849 * (see @ref step_31 "step-31" for a discussion). We compute it in the same way as above:
2850 * Compute the local maximum over all locally owned cells, then exchange it
2851 * via MPI to find the global maximum.
2854 * template <int dim>
2855 * double BoussinesqFlowProblem<dim>::get_cfl_number() const
2857 * const QIterated<dim> quadrature_formula(QTrapezoid<1>(),
2858 * parameters.stokes_velocity_degree);
2859 * const unsigned int n_q_points = quadrature_formula.size();
2861 * FEValues<dim> fe_values(mapping,
2863 * quadrature_formula,
2865 * std::vector<Tensor<1, dim>> velocity_values(n_q_points);
2867 * const FEValuesExtractors::Vector velocities(0);
2869 * double max_local_cfl = 0;
2871 * for (const auto &cell : stokes_dof_handler.active_cell_iterators())
2872 * if (cell->is_locally_owned())
2874 * fe_values.reinit(cell);
2875 * fe_values[velocities].get_function_values(stokes_solution,
2878 * double max_local_velocity = 1e-10;
2879 * for (unsigned int q = 0; q < n_q_points; ++q)
2880 * max_local_velocity =
2881 * std::max(max_local_velocity, velocity_values[q].norm());
2883 * std::max(max_local_cfl, max_local_velocity / cell->diameter());
2886 * return Utilities::MPI::max(max_local_cfl, MPI_COMM_WORLD);
2893 * <a name="BoussinesqFlowProblemget_entropy_variation"></a>
2894 * <h5>BoussinesqFlowProblem::get_entropy_variation</h5>
2898 * Next comes the computation of the global entropy variation
2899 * @f$\|E(T)-\bar{E}(T)\|_\infty@f$ where the entropy @f$E@f$ is defined as
2900 * discussed in the introduction. This is needed for the evaluation of the
2901 * stabilization in the temperature equation as explained in the
2902 * introduction. The entropy variation is actually only needed if we use
2903 * @f$\alpha=2@f$ as a power in the residual computation. The infinity norm is
2904 * computed by the maxima over quadrature points, as usual in discrete
2909 * In order to compute this quantity, we first have to find the
2910 * space-average @f$\bar{E}(T)@f$ and then evaluate the maximum. However, that
2911 * means that we would need to perform two loops. We can avoid the overhead
2912 * by noting that @f$\|E(T)-\bar{E}(T)\|_\infty =
2913 * \max\big(E_{\textrm{max}}(T)-\bar{E}(T),
2914 * \bar{E}(T)-E_{\textrm{min}}(T)\big)@f$, i.e., the maximum out of the
2915 * deviation from the average entropy in positive and negative
2916 * directions. The four quantities we need for the latter formula (maximum
2917 * entropy, minimum entropy, average entropy, area) can all be evaluated in
2918 * the same loop over all cells, so we choose this simpler variant.
2921 * template <int dim>
2922 * double BoussinesqFlowProblem<dim>::get_entropy_variation(
2923 * const double average_temperature) const
2925 * if (parameters.stabilization_alpha != 2)
2928 * const QGauss<dim> quadrature_formula(parameters.temperature_degree + 1);
2929 * const unsigned int n_q_points = quadrature_formula.size();
2931 * FEValues<dim> fe_values(temperature_fe,
2932 * quadrature_formula,
2933 * update_values | update_JxW_values);
2934 * std::vector<double> old_temperature_values(n_q_points);
2935 * std::vector<double> old_old_temperature_values(n_q_points);
2939 * In the two functions above we computed the maximum of numbers that were
2940 * all non-negative, so we knew that zero was certainly a lower bound. On
2941 * the other hand, here we need to find the maximum deviation from the
2942 * average value, i.e., we will need to know the maximal and minimal
2943 * values of the entropy for which we don't a priori know the
sign.
2947 * To compute it, we can therefore start with the largest and smallest
2948 * possible
values we can store in a
double precision number: The minimum
2949 * is initialized with a bigger and the maximum with a smaller number than
2950 * any
one that is going to appear. We are then guaranteed that these
2952 * processor does not own any cells, in the communication step at the
2953 * latest. The following
loop then computes the minimum and maximum local
2954 * entropy as well as keeps track of the area/
volume of the part of the
2955 * domain we locally own and the integral over the entropy on it:
2960 * entropy_integrated = 0;
2962 *
for (
const auto &cell : temperature_dof_handler.active_cell_iterators())
2963 *
if (cell->is_locally_owned())
2965 * fe_values.reinit(cell);
2966 * fe_values.get_function_values(old_temperature_solution,
2967 * old_temperature_values);
2968 * fe_values.get_function_values(old_old_temperature_solution,
2969 * old_old_temperature_values);
2970 *
for (
unsigned int q = 0; q < n_q_points; ++q)
2973 * (old_temperature_values[q] + old_old_temperature_values[q]) / 2;
2974 *
const double entropy =
2975 * ((
T - average_temperature) * (
T - average_temperature));
2977 * min_entropy =
std::min(min_entropy, entropy);
2978 * max_entropy =
std::max(max_entropy, entropy);
2979 * area += fe_values.JxW(q);
2980 * entropy_integrated += fe_values.JxW(q) * entropy;
2986 * Now we only need to exchange data between processors: we need to
sum
2987 * the two integrals (<code>area</code>, <code>entropy_integrated</code>),
2988 * and get the extrema
for maximum and minimum. We could
do this through
2989 * four different data exchanges, but we can it with two:
2991 *
values that are all to be summed up. And we can also utilize the
2993 * the minimal entropies equals forming the negative of the maximum over
2994 * the negative of the minimal entropies;
this maximum can then be
2995 * combined with forming the maximum over the maximal entropies.
2998 *
const double local_sums[2] = {entropy_integrated, area},
2999 * local_maxima[2] = {-min_entropy, max_entropy};
3000 *
double global_sums[2], global_maxima[2];
3007 * Having computed everything
this way, we can then compute the average
3008 * entropy and find the @f$L^\infty@f$
norm by taking the larger of the
3009 * deviation of the maximum or minimum from the average:
3012 *
const double average_entropy = global_sums[0] / global_sums[1];
3013 *
const double entropy_diff =
std::max(global_maxima[1] - average_entropy,
3014 * average_entropy - (-global_maxima[0]));
3015 *
return entropy_diff;
3023 * <a name=
"BoussinesqFlowProblemget_extrapolated_temperature_range"></a>
3024 * <h5>BoussinesqFlowProblem::get_extrapolated_temperature_range</h5>
3028 * The next function computes the minimal and maximal
value of the
3029 * extrapolated temperature over the entire domain. Again,
this is only a
3030 * slightly modified version of the respective function in @ref step_31
"step-31". As in
3031 * the function above, we collect local minima and maxima and then compute
3032 * the global extrema
using the same trick as above.
3036 * As already discussed in @ref step_31
"step-31", the function needs to distinguish
3037 * between the
first and all following time steps because it uses a higher
3038 * order temperature extrapolation scheme when at least two previous time
3039 * steps are available.
3042 *
template <
int dim>
3043 * std::pair<double, double>
3044 * BoussinesqFlowProblem<dim>::get_extrapolated_temperature_range() const
3047 * parameters.temperature_degree);
3048 *
const unsigned int n_q_points = quadrature_formula.size();
3052 * quadrature_formula,
3054 * std::vector<double> old_temperature_values(n_q_points);
3055 * std::vector<double> old_old_temperature_values(n_q_points);
3060 *
if (timestep_number != 0)
3062 *
for (
const auto &cell : temperature_dof_handler.active_cell_iterators())
3063 *
if (cell->is_locally_owned())
3065 * fe_values.reinit(cell);
3066 * fe_values.get_function_values(old_temperature_solution,
3067 * old_temperature_values);
3068 * fe_values.get_function_values(old_old_temperature_solution,
3069 * old_old_temperature_values);
3071 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3073 *
const double temperature =
3074 * (1. + time_step / old_time_step) *
3075 * old_temperature_values[q] -
3076 * time_step / old_time_step * old_old_temperature_values[q];
3078 * min_local_temperature =
3079 *
std::min(min_local_temperature, temperature);
3080 * max_local_temperature =
3081 *
std::max(max_local_temperature, temperature);
3087 *
for (
const auto &cell : temperature_dof_handler.active_cell_iterators())
3088 *
if (cell->is_locally_owned())
3090 * fe_values.reinit(cell);
3091 * fe_values.get_function_values(old_temperature_solution,
3092 * old_temperature_values);
3094 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3096 *
const double temperature = old_temperature_values[q];
3098 * min_local_temperature =
3099 *
std::min(min_local_temperature, temperature);
3100 * max_local_temperature =
3101 *
std::max(max_local_temperature, temperature);
3106 *
double local_extrema[2] = {-min_local_temperature, max_local_temperature};
3107 *
double global_extrema[2];
3110 *
return std::make_pair(-global_extrema[0], global_extrema[1]);
3117 * <a name=
"BoussinesqFlowProblemcompute_viscosity"></a>
3118 * <h5>BoussinesqFlowProblem::compute_viscosity</h5>
3122 * The function that calculates the viscosity is purely local and so needs
3123 * no communication at all. It is mostly the same as in @ref step_31
"step-31" but with an
3124 * updated formulation of the viscosity
if @f$\alpha=2@f$ is chosen:
3127 *
template <
int dim>
3128 *
double BoussinesqFlowProblem<dim>::compute_viscosity(
3129 *
const std::vector<double> & old_temperature,
3130 *
const std::vector<double> & old_old_temperature,
3133 *
const std::vector<double> & old_temperature_laplacians,
3134 *
const std::vector<double> & old_old_temperature_laplacians,
3139 *
const double global_u_infty,
3140 *
const double global_T_variation,
3141 *
const double average_temperature,
3142 *
const double global_entropy_variation,
3143 *
const double cell_diameter)
const
3145 *
if (global_u_infty == 0)
3146 *
return 5
e-3 * cell_diameter;
3148 *
const unsigned int n_q_points = old_temperature.size();
3150 *
double max_residual = 0;
3151 *
double max_velocity = 0;
3153 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3156 * (old_velocity_values[q] + old_old_velocity_values[q]) / 2;
3159 * (old_strain_rates[q] + old_old_strain_rates[q]) / 2;
3161 *
const double T = (old_temperature[q] + old_old_temperature[q]) / 2;
3162 *
const double dT_dt =
3163 * (old_temperature[q] - old_old_temperature[q]) / old_time_step;
3164 *
const double u_grad_T =
3165 * u * (old_temperature_grads[q] + old_old_temperature_grads[q]) / 2;
3167 *
const double kappa_Delta_T =
3168 * EquationData::kappa *
3169 * (old_temperature_laplacians[q] + old_old_temperature_laplacians[q]) /
3171 *
const double gamma =
3172 * ((EquationData::radiogenic_heating * EquationData::density(
T) +
3173 * 2 * EquationData::eta * strain_rate * strain_rate) /
3174 * (EquationData::density(
T) * EquationData::specific_heat));
3176 *
double residual =
std::abs(dT_dt + u_grad_T - kappa_Delta_T -
gamma);
3177 *
if (parameters.stabilization_alpha == 2)
3178 * residual *=
std::abs(
T - average_temperature);
3180 * max_residual =
std::max(residual, max_residual);
3184 *
const double max_viscosity =
3185 * (parameters.stabilization_beta * max_velocity * cell_diameter);
3186 *
if (timestep_number == 0)
3187 *
return max_viscosity;
3192 *
double entropy_viscosity;
3193 *
if (parameters.stabilization_alpha == 2)
3194 * entropy_viscosity =
3195 * (parameters.stabilization_c_R * cell_diameter * cell_diameter *
3196 * max_residual / global_entropy_variation);
3198 * entropy_viscosity =
3199 * (parameters.stabilization_c_R * cell_diameter *
3200 * global_Omega_diameter * max_velocity * max_residual /
3201 * (global_u_infty * global_T_variation));
3203 *
return std::min(max_viscosity, entropy_viscosity);
3212 * <a name=
"TheBoussinesqFlowProblemsetupfunctions"></a>
3213 * <h4>The BoussinesqFlowProblem setup
functions</h4>
3218 *
for the Stokes preconditioner, and the temperature
matrix. The code is
3219 * mostly the same as in @ref step_31
"step-31", but it has been broken out into three
3220 *
functions of their own
for simplicity.
3224 * The main functional difference between the code here and that in @ref step_31
"step-31"
3225 * is that the matrices we want to
set up are distributed across multiple
3226 * processors. Since we still want to build up the sparsity pattern
first
3227 *
for efficiency reasons, we could
continue to build the <i>entire</i>
3229 * @ref step_31
"step-31". However, that would be inefficient: every processor would build
3230 * the same sparsity pattern, but only initialize a small part of the
matrix
3231 *
using it. It also violates the principle that every processor should only
3232 * work on those cells it owns (and,
if necessary the layer of ghost cells
3238 * which is (obviously) a wrapper around a sparsity pattern
object provided
3239 * by Trilinos. The advantage is that the Trilinos sparsity pattern
class
3240 * can communicate across multiple processors:
if this processor fills in
3241 * all the
nonzero entries that result from the cells it owns, and every
3242 * other processor does so as well, then at the
end after some MPI
3243 * communication initiated by the <code>
compress()</code>
call, we will have
3244 * the globally assembled sparsity pattern available with which the global
3245 *
matrix can be initialized.
3249 * There is
one important aspect when initializing Trilinos sparsity
3250 * patterns in
parallel: In addition to specifying the locally owned rows
3251 * and columns of the matrices via the @p stokes_partitioning index
set, we
3252 * also supply information about all the rows we are possibly going to write
3253 * into when assembling on a certain processor. The
set of locally relevant
3254 * rows contains all such rows (possibly also a few unnecessary ones, but it
3255 * is difficult to find the exact row indices before actually getting
3256 * indices on all cells and resolving constraints). This additional
3257 * information allows to exactly determine the structure
for the
3258 * off-processor data found during assembly. While Trilinos matrices are
3259 * able to collect
this information on the fly as well (when initializing
3260 * them from some other
reinit method), it is less efficient and leads to
3261 * problems when assembling matrices with multiple threads. In
this program,
3262 * we pessimistically assume that only
one processor at a time can write
3263 * into the
matrix while assembly (whereas the computation is
parallel),
3264 * which is fine
for Trilinos matrices. In practice,
one can
do better by
3266 * parallelism among those cells (see the graph coloring algorithms and
3267 *
WorkStream with colored iterators argument). However, that only works
3268 * when only
one MPI processor is present because Trilinos
' internal data
3269 * structures for accumulating off-processor data on the fly are not thread
3270 * safe. With the initialization presented here, there is no such problem
3271 * and one could safely introduce graph coloring for this algorithm.
3275 * The only other change we need to make is to tell the
3276 * DoFTools::make_sparsity_pattern() function that it is only supposed to
3277 * work on a subset of cells, namely the ones whose
3278 * <code>subdomain_id</code> equals the number of the current processor, and
3279 * to ignore all other cells.
3283 * This strategy is replicated across all three of the following functions.
3287 * Note that Trilinos matrices store the information contained in the
3288 * sparsity patterns, so we can safely release the <code>sp</code> variable
3289 * once the matrix has been given the sparsity structure.
3292 * template <int dim>
3293 * void BoussinesqFlowProblem<dim>::setup_stokes_matrix(
3294 * const std::vector<IndexSet> &stokes_partitioning,
3295 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3297 * stokes_matrix.clear();
3299 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3300 * stokes_partitioning,
3301 * stokes_relevant_partitioning,
3304 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3305 * for (unsigned int c = 0; c < dim + 1; ++c)
3306 * for (unsigned int d = 0; d < dim + 1; ++d)
3307 * if (!((c == dim) && (d == dim)))
3308 * coupling[c][d] = DoFTools::always;
3310 * coupling[c][d] = DoFTools::none;
3312 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3315 * stokes_constraints,
3317 * Utilities::MPI::this_mpi_process(
3321 * stokes_matrix.reinit(sp);
3326 * template <int dim>
3327 * void BoussinesqFlowProblem<dim>::setup_stokes_preconditioner(
3328 * const std::vector<IndexSet> &stokes_partitioning,
3329 * const std::vector<IndexSet> &stokes_relevant_partitioning)
3331 * Amg_preconditioner.reset();
3332 * Mp_preconditioner.reset();
3334 * stokes_preconditioner_matrix.clear();
3336 * TrilinosWrappers::BlockSparsityPattern sp(stokes_partitioning,
3337 * stokes_partitioning,
3338 * stokes_relevant_partitioning,
3341 * Table<2, DoFTools::Coupling> coupling(dim + 1, dim + 1);
3342 * for (unsigned int c = 0; c < dim + 1; ++c)
3343 * for (unsigned int d = 0; d < dim + 1; ++d)
3345 * coupling[c][d] = DoFTools::always;
3347 * coupling[c][d] = DoFTools::none;
3349 * DoFTools::make_sparsity_pattern(stokes_dof_handler,
3352 * stokes_constraints,
3354 * Utilities::MPI::this_mpi_process(
3358 * stokes_preconditioner_matrix.reinit(sp);
3362 * template <int dim>
3363 * void BoussinesqFlowProblem<dim>::setup_temperature_matrices(
3364 * const IndexSet &temperature_partitioner,
3365 * const IndexSet &temperature_relevant_partitioner)
3367 * T_preconditioner.reset();
3368 * temperature_mass_matrix.clear();
3369 * temperature_stiffness_matrix.clear();
3370 * temperature_matrix.clear();
3372 * TrilinosWrappers::SparsityPattern sp(temperature_partitioner,
3373 * temperature_partitioner,
3374 * temperature_relevant_partitioner,
3376 * DoFTools::make_sparsity_pattern(temperature_dof_handler,
3378 * temperature_constraints,
3380 * Utilities::MPI::this_mpi_process(
3384 * temperature_matrix.reinit(sp);
3385 * temperature_mass_matrix.reinit(sp);
3386 * temperature_stiffness_matrix.reinit(sp);
3393 * The remainder of the setup function (after splitting out the three
3394 * functions above) mostly has to deal with the things we need to do for
3395 * parallelization across processors. Because setting all of this up is a
3396 * significant compute time expense of the program, we put everything we do
3397 * here into a timer group so that we can get summary information about the
3398 * fraction of time spent in this part of the program at its end.
3402 * At the top as usual we enumerate degrees of freedom and sort them by
3403 * component/block, followed by writing their numbers to the screen from
3404 * processor zero. The DoFHandler::distributed_dofs() function, when applied
3405 * to a parallel::distributed::Triangulation object, sorts degrees of
3406 * freedom in such a way that all degrees of freedom associated with
3407 * subdomain zero come before all those associated with subdomain one,
3408 * etc. For the Stokes part, this entails, however, that velocities and
3409 * pressures become intermixed, but this is trivially solved by sorting
3410 * again by blocks; it is worth noting that this latter operation leaves the
3411 * relative ordering of all velocities and pressures alone, i.e. within the
3412 * velocity block we will still have all those associated with subdomain
3413 * zero before all velocities associated with subdomain one, etc. This is
3414 * important since we store each of the blocks of this matrix distributed
3415 * across all processors and want this to be done in such a way that each
3416 * processor stores that part of the matrix that is roughly equal to the
3417 * degrees of freedom located on those cells that it will actually work on.
3421 * When printing the numbers of degrees of freedom, note that these numbers
3422 * are going to be large if we use many processors. Consequently, we let the
3423 * stream put a comma separator in between every three digits. The state of
3424 * the stream, using the locale, is saved from before to after this
3425 * operation. While slightly opaque, the code works because the default
3426 * locale (which we get using the constructor call
3427 * <code>std::locale("")</code>) implies printing numbers with a comma
3428 * separator for every third digit (i.e., thousands, millions, billions).
3432 * In this function as well as many below, we measure how much time
3433 * we spend here and collect that in a section called "Setup dof
3434 * systems" across function invocations. This is done using an
3435 * TimerOutput::Scope object that gets a timer going in the section
3436 * with above name of the `computing_timer` object upon construction
3437 * of the local variable; the timer is stopped again when the
3438 * destructor of the `timing_section` variable is called. This, of
3439 * course, happens either at the end of the function, or if we leave
3440 * the function through a `return` statement or when an exception is
3441 * thrown somewhere -- in other words, whenever we leave this
3442 * function in any way. The use of such "scope" objects therefore
3443 * makes sure that we do not have to manually add code that tells
3444 * the timer to stop at every location where this function may be
3448 * template <int dim>
3449 * void BoussinesqFlowProblem<dim>::setup_dofs()
3451 * TimerOutput::Scope timing_section(computing_timer, "Setup dof systems");
3453 * stokes_dof_handler.distribute_dofs(stokes_fe);
3455 * std::vector<unsigned int> stokes_sub_blocks(dim + 1, 0);
3456 * stokes_sub_blocks[dim] = 1;
3457 * DoFRenumbering::component_wise(stokes_dof_handler, stokes_sub_blocks);
3459 * temperature_dof_handler.distribute_dofs(temperature_fe);
3461 * const std::vector<types::global_dof_index> stokes_dofs_per_block =
3462 * DoFTools::count_dofs_per_fe_block(stokes_dof_handler, stokes_sub_blocks);
3464 * const unsigned int n_u = stokes_dofs_per_block[0],
3465 * n_p = stokes_dofs_per_block[1],
3466 * n_T = temperature_dof_handler.n_dofs();
3468 * std::locale s = pcout.get_stream().getloc();
3469 * pcout.get_stream().imbue(std::locale(""));
3470 * pcout << "Number of active cells: " << triangulation.n_global_active_cells()
3471 * << " (on " << triangulation.n_levels() << " levels)" << std::endl
3472 * << "Number of degrees of freedom: " << n_u + n_p + n_T << " (" << n_u
3473 * << '+
' << n_p << '+
' << n_T << ')
' << std::endl
3475 * pcout.get_stream().imbue(s);
3480 * After this, we have to set up the various partitioners (of type
3481 * <code>IndexSet</code>, see the introduction) that describe which parts
3482 * of each matrix or vector will be stored where, then call the functions
3483 * that actually set up the matrices, and at the end also resize the
3484 * various vectors we keep around in this program.
3487 * std::vector<IndexSet> stokes_partitioning, stokes_relevant_partitioning;
3488 * IndexSet temperature_partitioning(n_T),
3489 * temperature_relevant_partitioning(n_T);
3490 * IndexSet stokes_relevant_set;
3492 * IndexSet stokes_index_set = stokes_dof_handler.locally_owned_dofs();
3493 * stokes_partitioning.push_back(stokes_index_set.get_view(0, n_u));
3494 * stokes_partitioning.push_back(stokes_index_set.get_view(n_u, n_u + n_p));
3496 * DoFTools::extract_locally_relevant_dofs(stokes_dof_handler,
3497 * stokes_relevant_set);
3498 * stokes_relevant_partitioning.push_back(
3499 * stokes_relevant_set.get_view(0, n_u));
3500 * stokes_relevant_partitioning.push_back(
3501 * stokes_relevant_set.get_view(n_u, n_u + n_p));
3503 * temperature_partitioning = temperature_dof_handler.locally_owned_dofs();
3504 * DoFTools::extract_locally_relevant_dofs(
3505 * temperature_dof_handler, temperature_relevant_partitioning);
3510 * Following this, we can compute constraints for the solution vectors,
3511 * including hanging node constraints and homogeneous and inhomogeneous
3512 * boundary values for the Stokes and temperature fields. Note that as for
3513 * everything else, the constraint objects can not hold <i>all</i>
3514 * constraints on every processor. Rather, each processor needs to store
3515 * only those that are actually necessary for correctness given that it
3516 * only assembles linear systems on cells it owns. As discussed in the
3517 * @ref distributed_paper "this paper", the set of constraints we need to
3518 * know about is exactly the set of constraints on all locally relevant
3519 * degrees of freedom, so this is what we use to initialize the constraint
3524 * stokes_constraints.clear();
3525 * stokes_constraints.reinit(stokes_relevant_set);
3527 * DoFTools::make_hanging_node_constraints(stokes_dof_handler,
3528 * stokes_constraints);
3530 * FEValuesExtractors::Vector velocity_components(0);
3531 * VectorTools::interpolate_boundary_values(
3532 * stokes_dof_handler,
3534 * Functions::ZeroFunction<dim>(dim + 1),
3535 * stokes_constraints,
3536 * stokes_fe.component_mask(velocity_components));
3538 * std::set<types::boundary_id> no_normal_flux_boundaries;
3539 * no_normal_flux_boundaries.insert(1);
3540 * VectorTools::compute_no_normal_flux_constraints(stokes_dof_handler,
3542 * no_normal_flux_boundaries,
3543 * stokes_constraints,
3545 * stokes_constraints.close();
3548 * temperature_constraints.clear();
3549 * temperature_constraints.reinit(temperature_relevant_partitioning);
3551 * DoFTools::make_hanging_node_constraints(temperature_dof_handler,
3552 * temperature_constraints);
3553 * VectorTools::interpolate_boundary_values(
3554 * temperature_dof_handler,
3556 * EquationData::TemperatureInitialValues<dim>(),
3557 * temperature_constraints);
3558 * VectorTools::interpolate_boundary_values(
3559 * temperature_dof_handler,
3561 * EquationData::TemperatureInitialValues<dim>(),
3562 * temperature_constraints);
3563 * temperature_constraints.close();
3568 * All this done, we can then initialize the various matrix and vector
3569 * objects to their proper sizes. At the end, we also record that all
3570 * matrices and preconditioners have to be re-computed at the beginning of
3571 * the next time step. Note how we initialize the vectors for the Stokes
3572 * and temperature right hand sides: These are writable vectors (last
3573 * boolean argument set to @p true) that have the correct one-to-one
3574 * partitioning of locally owned elements but are still given the relevant
3575 * partitioning for means of figuring out the vector entries that are
3576 * going to be set right away. As for matrices, this allows for writing
3577 * local contributions into the vector with multiple threads (always
3578 * assuming that the same vector entry is not accessed by multiple threads
3579 * at the same time). The other vectors only allow for read access of
3580 * individual elements, including ghosts, but are not suitable for
3584 * setup_stokes_matrix(stokes_partitioning, stokes_relevant_partitioning);
3585 * setup_stokes_preconditioner(stokes_partitioning,
3586 * stokes_relevant_partitioning);
3587 * setup_temperature_matrices(temperature_partitioning,
3588 * temperature_relevant_partitioning);
3590 * stokes_rhs.reinit(stokes_partitioning,
3591 * stokes_relevant_partitioning,
3594 * stokes_solution.reinit(stokes_relevant_partitioning, MPI_COMM_WORLD);
3595 * old_stokes_solution.reinit(stokes_solution);
3597 * temperature_rhs.reinit(temperature_partitioning,
3598 * temperature_relevant_partitioning,
3601 * temperature_solution.reinit(temperature_relevant_partitioning,
3603 * old_temperature_solution.reinit(temperature_solution);
3604 * old_old_temperature_solution.reinit(temperature_solution);
3606 * rebuild_stokes_matrix = true;
3607 * rebuild_stokes_preconditioner = true;
3608 * rebuild_temperature_matrices = true;
3609 * rebuild_temperature_preconditioner = true;
3617 * <a name="TheBoussinesqFlowProblemassemblyfunctions"></a>
3618 * <h4>The BoussinesqFlowProblem assembly functions</h4>
3622 * Following the discussion in the introduction and in the @ref threads
3623 * module, we split the assembly functions into different parts:
3627 * <ul> <li> The local calculations of matrices and right hand sides, given
3628 * a certain cell as input (these functions are named
3629 * <code>local_assemble_*</code> below). The resulting function is, in other
3630 * words, essentially the body of the loop over all cells in @ref step_31 "step-31". Note,
3631 * however, that these functions store the result from the local
3632 * calculations in variables of classes from the CopyData namespace.
3636 * <li>These objects are then given to the second step which writes the
3637 * local data into the global data structures (these functions are named
3638 * <code>copy_local_to_global_*</code> below). These functions are pretty
3643 * <li>These two subfunctions are then used in the respective assembly
3644 * routine (called <code>assemble_*</code> below), where a WorkStream object
3645 * is set up and runs over all the cells that belong to the processor's
3651 * <a name=
"Stokespreconditionerassembly"></a>
3652 * <h5>Stokes preconditioner assembly</h5>
3656 * Let us start with the
functions that builds the Stokes
3657 * preconditioner. The
first two of these are pretty trivial, given the
3658 * discussion above. Note in particular that the main
point in
using the
3659 * scratch data
object is that we want to avoid allocating any objects on
3660 * the
free space each time we visit a
new cell. As a consequence, the
3661 * assembly function below only has automatic local variables, and
3662 * everything
else is accessed through the scratch data
object, which is
3663 * allocated only once before we start the
loop over all cells:
3666 * template <int dim>
3667 *
void BoussinesqFlowProblem<dim>::local_assemble_stokes_preconditioner(
3669 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3670 * Assembly::CopyData::StokesPreconditioner<dim> & data)
3672 *
const unsigned int dofs_per_cell = stokes_fe.n_dofs_per_cell();
3673 *
const unsigned int n_q_points =
3674 * scratch.stokes_fe_values.n_quadrature_points;
3679 * scratch.stokes_fe_values.reinit(cell);
3680 * cell->get_dof_indices(data.local_dof_indices);
3682 * data.local_matrix = 0;
3684 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3686 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
3688 * scratch.grad_phi_u[k] =
3689 * scratch.stokes_fe_values[velocities].gradient(k, q);
3690 * scratch.phi_p[k] = scratch.stokes_fe_values[pressure].value(k, q);
3693 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3694 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
3695 * data.local_matrix(i, j) +=
3696 * (EquationData::eta *
3698 * (1. / EquationData::eta) * EquationData::pressure_scaling *
3699 * EquationData::pressure_scaling *
3700 * (scratch.phi_p[i] * scratch.phi_p[j])) *
3701 * scratch.stokes_fe_values.JxW(q);
3707 *
template <
int dim>
3708 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_preconditioner(
3709 *
const Assembly::CopyData::StokesPreconditioner<dim> &data)
3711 * stokes_constraints.distribute_local_to_global(data.local_matrix,
3712 * data.local_dof_indices,
3713 * stokes_preconditioner_matrix);
3719 * Now
for the function that actually puts things together,
using the
3721 * enumerate the cells it is supposed to work on. Typically,
one would use
3723 * actually only want the subset of cells that in fact are owned by the
3725 * play: you give it a range of cells and it provides an iterator that only
3726 * iterates over that subset of cells that satisfy a certain predicate (a
3727 * predicate is a function of
one argument that either returns true or
3728 * false). The predicate we use here is
IteratorFilters::LocallyOwnedCell,
3729 * i.
e., it returns true exactly if the cell is owned by the current
3730 * processor. The resulting iterator range is then exactly what we need.
3735 * function with this
set of cells, scratch and
copy objects, and
3736 * with pointers to two
functions: the local assembly and
3737 *
copy-local-to-global function. These
functions need to have very
3738 * specific signatures: three arguments in the
first and
one
3739 * argument in the latter case (see the documentation of the
3740 *
WorkStream::
run function for the meaning of these arguments).
3742 * create a function
object that satisfies this requirement. It uses
3743 * function arguments for the local assembly function that specify
3744 * cell, scratch data, and
copy data, as well as function argument
3745 * for the
copy function that expects the
3746 * data to be written into the global
matrix (also see the discussion in
3747 * @ref step_13 "step-13"'s <code>assemble_linear_system()</code> function). On the other
3748 * hand, the implicit zeroth argument of member
functions (namely
3749 * the <code>this</code> pointer of the
object on which that member
3750 * function is to operate on) is <i>bound</i> to the
3751 * <code>this</code> pointer of the current function and is captured. The
3752 *
WorkStream::
run function, as a consequence, does not need to know
3753 * anything about the
object these
functions work on.
3757 * When the
WorkStream is executed, it will create several local assembly
3758 * routines of the
first kind for several cells and let some available
3759 * processors work on them. The function that needs to be synchronized,
3760 * i.
e., the write operation into the global
matrix, however, is executed by
3761 * only
one thread at a time in the prescribed order. Of course, this only
3762 * holds for the parallelization on a single MPI process. Different MPI
3763 * processes will have their own
WorkStream objects and do that work
3764 * completely independently (and in different memory spaces). In a
3765 * distributed calculation, some data will accumulate at degrees of freedom
3766 * that are not owned by the respective processor. It would be inefficient
3767 * to send data around every time we encounter such a dof. What happens
3768 * instead is that the Trilinos sparse
matrix will keep that data and send
3769 * it to the owner at the
end of assembly, by calling the
3773 * template <
int dim>
3774 *
void BoussinesqFlowProblem<dim>::assemble_stokes_preconditioner()
3776 * stokes_preconditioner_matrix = 0;
3778 *
const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3780 *
using CellFilter =
3785 * Assembly::Scratch::StokesPreconditioner<dim> & scratch,
3786 * Assembly::CopyData::StokesPreconditioner<dim> & data) {
3787 * this->local_assemble_stokes_preconditioner(cell, scratch, data);
3791 * [
this](
const Assembly::CopyData::StokesPreconditioner<dim> &data) {
3792 * this->copy_local_to_global_stokes_preconditioner(data);
3796 * stokes_dof_handler.begin_active()),
3798 * stokes_dof_handler.end()),
3801 * Assembly::Scratch::StokesPreconditioner<dim>(
3803 * quadrature_formula,
3806 * Assembly::CopyData::StokesPreconditioner<dim>(stokes_fe));
3815 * The
final function in
this block initiates assembly of the Stokes
3816 * preconditioner
matrix and then in fact builds the Stokes
3817 * preconditioner. It is mostly the same as in the serial
case. The only
3818 * difference to @ref step_31
"step-31" is that we use a Jacobi preconditioner
for the
3819 * pressure mass
matrix instead of IC, as discussed in the introduction.
3822 *
template <
int dim>
3823 *
void BoussinesqFlowProblem<dim>::build_stokes_preconditioner()
3825 *
if (rebuild_stokes_preconditioner ==
false)
3829 *
" Build Stokes preconditioner");
3830 * pcout <<
" Rebuilding Stokes preconditioner..." << std::flush;
3832 * assemble_stokes_preconditioner();
3834 * std::vector<std::vector<bool>> constant_modes;
3837 * stokes_fe.component_mask(
3838 * velocity_components),
3841 * Mp_preconditioner =
3842 * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
3843 * Amg_preconditioner = std::make_shared<TrilinosWrappers::PreconditionAMG>();
3852 * Mp_preconditioner->initialize(stokes_preconditioner_matrix.block(1, 1));
3853 * Amg_preconditioner->initialize(stokes_preconditioner_matrix.block(0, 0),
3856 * rebuild_stokes_preconditioner =
false;
3858 * pcout << std::endl;
3865 * <a name=
"Stokessystemassembly"></a>
3866 * <h5>Stokes system assembly</h5>
3870 * The next three
functions implement the assembly of the Stokes system,
3871 * again
split up into a part performing local calculations,
one for writing
3872 * the local data into the global
matrix and vector, and
one for actually
3873 * running the
loop over all cells with the help of the
WorkStream
3874 *
class. Note that the assembly of the Stokes
matrix needs only to be done
3875 * in
case we have changed the mesh. Otherwise, just the
3876 * (temperature-dependent) right hand side needs to be calculated
3877 * here. Since we are working with distributed matrices and vectors, we have
3879 * the assembly in order to send non-local data to the owner process.
3882 *
template <
int dim>
3883 *
void BoussinesqFlowProblem<dim>::local_assemble_stokes_system(
3885 * Assembly::Scratch::StokesSystem<dim> & scratch,
3886 * Assembly::CopyData::StokesSystem<dim> & data)
3888 *
const unsigned int dofs_per_cell =
3890 *
const unsigned int n_q_points =
3891 * scratch.stokes_fe_values.n_quadrature_points;
3896 * scratch.stokes_fe_values.
reinit(cell);
3899 * &
triangulation, cell->level(), cell->index(), &temperature_dof_handler);
3900 * scratch.temperature_fe_values.
reinit(temperature_cell);
3902 *
if (rebuild_stokes_matrix)
3903 * data.local_matrix = 0;
3904 * data.local_rhs = 0;
3906 * scratch.temperature_fe_values.get_function_values(
3907 * old_temperature_solution, scratch.old_temperature_values);
3909 *
for (
unsigned int q = 0; q < n_q_points; ++q)
3911 *
const double old_temperature = scratch.old_temperature_values[q];
3913 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
3915 * scratch.phi_u[k] = scratch.stokes_fe_values[velocities].value(k, q);
3916 *
if (rebuild_stokes_matrix)
3918 * scratch.grads_phi_u[k] =
3919 * scratch.stokes_fe_values[velocities].symmetric_gradient(k, q);
3920 * scratch.div_phi_u[k] =
3921 * scratch.stokes_fe_values[velocities].divergence(k, q);
3922 * scratch.phi_p[k] =
3923 * scratch.stokes_fe_values[pressure].value(k, q);
3927 *
if (rebuild_stokes_matrix ==
true)
3928 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3929 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
3930 * data.local_matrix(i, j) +=
3931 * (EquationData::eta * 2 *
3932 * (scratch.grads_phi_u[i] * scratch.grads_phi_u[j]) -
3933 * (EquationData::pressure_scaling * scratch.div_phi_u[i] *
3934 * scratch.phi_p[j]) -
3935 * (EquationData::pressure_scaling * scratch.phi_p[i] *
3936 * scratch.div_phi_u[j])) *
3937 * scratch.stokes_fe_values.JxW(q);
3940 * scratch.stokes_fe_values.quadrature_point(q));
3942 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
3943 * data.local_rhs(i) += (EquationData::density(old_temperature) *
3944 * gravity * scratch.phi_u[i]) *
3945 * scratch.stokes_fe_values.JxW(q);
3948 * cell->get_dof_indices(data.local_dof_indices);
3953 *
template <
int dim>
3954 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_stokes_system(
3955 *
const Assembly::CopyData::StokesSystem<dim> &data)
3957 *
if (rebuild_stokes_matrix ==
true)
3958 * stokes_constraints.distribute_local_to_global(data.local_matrix,
3960 * data.local_dof_indices,
3964 * stokes_constraints.distribute_local_to_global(data.local_rhs,
3965 * data.local_dof_indices,
3971 *
template <
int dim>
3972 *
void BoussinesqFlowProblem<dim>::assemble_stokes_system()
3975 *
" Assemble Stokes system");
3977 *
if (rebuild_stokes_matrix ==
true)
3978 * stokes_matrix = 0;
3982 *
const QGauss<dim> quadrature_formula(parameters.stokes_velocity_degree + 1);
3984 *
using CellFilter =
3989 * stokes_dof_handler.begin_active()),
3992 * Assembly::Scratch::StokesSystem<dim> & scratch,
3993 * Assembly::CopyData::StokesSystem<dim> & data) {
3994 * this->local_assemble_stokes_system(cell, scratch, data);
3996 * [
this](
const Assembly::CopyData::StokesSystem<dim> &data) {
3997 * this->copy_local_to_global_stokes_system(data);
3999 * Assembly::Scratch::StokesSystem<dim>(
4002 * quadrature_formula,
4007 * Assembly::CopyData::StokesSystem<dim>(stokes_fe));
4009 *
if (rebuild_stokes_matrix ==
true)
4013 * rebuild_stokes_matrix =
false;
4015 * pcout << std::endl;
4022 * <a name=
"Temperaturematrixassembly"></a>
4023 * <h5>Temperature
matrix assembly</h5>
4027 * The task to be performed by the next three
functions is to calculate a
4028 * mass
matrix and a Laplace
matrix on the temperature system. These will be
4029 * combined in order to yield the semi-implicit time stepping
matrix that
4030 * consists of the mass
matrix plus a time step-dependent weight factor
4031 * times the Laplace
matrix. This function is again essentially the body of
4032 * the
loop over all cells from @ref step_31
"step-31".
4036 * The two following
functions perform similar services as the ones above.
4039 *
template <
int dim>
4040 *
void BoussinesqFlowProblem<dim>::local_assemble_temperature_matrix(
4042 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4043 * Assembly::CopyData::TemperatureMatrix<dim> & data)
4045 *
const unsigned int dofs_per_cell =
4046 * scratch.temperature_fe_values.get_fe().n_dofs_per_cell();
4047 *
const unsigned int n_q_points =
4048 * scratch.temperature_fe_values.n_quadrature_points;
4050 * scratch.temperature_fe_values.reinit(cell);
4051 * cell->get_dof_indices(data.local_dof_indices);
4053 * data.local_mass_matrix = 0;
4054 * data.local_stiffness_matrix = 0;
4056 *
for (
unsigned int q = 0; q < n_q_points; ++q)
4058 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
4060 * scratch.grad_phi_T[k] =
4061 * scratch.temperature_fe_values.shape_grad(k, q);
4062 * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4065 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
4066 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
4068 * data.local_mass_matrix(i, j) +=
4069 * (scratch.phi_T[i] * scratch.phi_T[j] *
4070 * scratch.temperature_fe_values.JxW(q));
4071 * data.local_stiffness_matrix(i, j) +=
4072 * (EquationData::kappa * scratch.grad_phi_T[i] *
4073 * scratch.grad_phi_T[j] * scratch.temperature_fe_values.JxW(q));
4080 *
template <
int dim>
4081 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_matrix(
4082 *
const Assembly::CopyData::TemperatureMatrix<dim> &data)
4084 * temperature_constraints.distribute_local_to_global(data.local_mass_matrix,
4085 * data.local_dof_indices,
4086 * temperature_mass_matrix);
4087 * temperature_constraints.distribute_local_to_global(
4088 * data.local_stiffness_matrix,
4089 * data.local_dof_indices,
4090 * temperature_stiffness_matrix);
4094 *
template <
int dim>
4095 *
void BoussinesqFlowProblem<dim>::assemble_temperature_matrix()
4097 *
if (rebuild_temperature_matrices ==
false)
4101 *
" Assemble temperature matrices");
4102 * temperature_mass_matrix = 0;
4103 * temperature_stiffness_matrix = 0;
4105 *
const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4107 *
using CellFilter =
4112 * temperature_dof_handler.begin_active()),
4114 * temperature_dof_handler.end()),
4116 * Assembly::Scratch::TemperatureMatrix<dim> & scratch,
4117 * Assembly::CopyData::TemperatureMatrix<dim> & data) {
4118 * this->local_assemble_temperature_matrix(cell, scratch, data);
4120 * [
this](
const Assembly::CopyData::TemperatureMatrix<dim> &data) {
4121 * this->copy_local_to_global_temperature_matrix(data);
4123 * Assembly::Scratch::TemperatureMatrix<dim>(temperature_fe,
4125 * quadrature_formula),
4126 * Assembly::CopyData::TemperatureMatrix<dim>(temperature_fe));
4131 * rebuild_temperature_matrices =
false;
4132 * rebuild_temperature_preconditioner =
true;
4139 * <a name=
"Temperaturerighthandsideassembly"></a>
4140 * <h5>Temperature right hand side assembly</h5>
4144 * This is the last assembly function. It calculates the right hand side of
4145 * the temperature system, which includes the convection and the
4146 * stabilization terms. It includes a lot of evaluations of old solutions at
4147 * the quadrature points (which are necessary
for calculating the artificial
4148 * viscosity of stabilization), but is otherwise similar to the other
4149 * assembly
functions. Notice, once again, how we resolve the dilemma of
4150 * having inhomogeneous boundary conditions, by just making a right hand
4151 * side at
this point (compare the comments
for the <code>
project()</code>
4152 * function above): We create some
matrix columns with exactly the
values
4153 * that would be entered for the temperature stiffness
matrix, in case we
4154 * have inhomogeneously constrained dofs. That will account for the correct
4155 * balance of the right hand side vector with the
matrix system of
4159 * template <
int dim>
4160 * void BoussinesqFlowProblem<dim>::local_assemble_temperature_rhs(
4161 * const
std::pair<double, double> global_T_range,
4162 * const double global_max_velocity,
4163 * const double global_entropy_variation,
4164 * const typename
DoFHandler<dim>::active_cell_iterator &cell,
4165 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4166 * Assembly::CopyData::TemperatureRHS<dim> & data)
4168 *
const bool use_bdf2_scheme = (timestep_number != 0);
4170 *
const unsigned int dofs_per_cell =
4171 * scratch.temperature_fe_values.get_fe().n_dofs_per_cell();
4172 *
const unsigned int n_q_points =
4173 * scratch.temperature_fe_values.n_quadrature_points;
4177 * data.local_rhs = 0;
4178 * data.matrix_for_bc = 0;
4179 * cell->get_dof_indices(data.local_dof_indices);
4181 * scratch.temperature_fe_values.
reinit(cell);
4184 * &
triangulation, cell->level(), cell->index(), &stokes_dof_handler);
4185 * scratch.stokes_fe_values.reinit(stokes_cell);
4187 * scratch.temperature_fe_values.get_function_values(
4188 * old_temperature_solution, scratch.old_temperature_values);
4189 * scratch.temperature_fe_values.get_function_values(
4190 * old_old_temperature_solution, scratch.old_old_temperature_values);
4192 * scratch.temperature_fe_values.get_function_gradients(
4193 * old_temperature_solution, scratch.old_temperature_grads);
4194 * scratch.temperature_fe_values.get_function_gradients(
4195 * old_old_temperature_solution, scratch.old_old_temperature_grads);
4197 * scratch.temperature_fe_values.get_function_laplacians(
4198 * old_temperature_solution, scratch.old_temperature_laplacians);
4199 * scratch.temperature_fe_values.get_function_laplacians(
4200 * old_old_temperature_solution, scratch.old_old_temperature_laplacians);
4202 * scratch.stokes_fe_values[velocities].get_function_values(
4203 * stokes_solution, scratch.old_velocity_values);
4204 * scratch.stokes_fe_values[velocities].get_function_values(
4205 * old_stokes_solution, scratch.old_old_velocity_values);
4206 * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4207 * stokes_solution, scratch.old_strain_rates);
4208 * scratch.stokes_fe_values[velocities].get_function_symmetric_gradients(
4209 * old_stokes_solution, scratch.old_old_strain_rates);
4212 * compute_viscosity(scratch.old_temperature_values,
4213 * scratch.old_old_temperature_values,
4214 * scratch.old_temperature_grads,
4215 * scratch.old_old_temperature_grads,
4216 * scratch.old_temperature_laplacians,
4217 * scratch.old_old_temperature_laplacians,
4218 * scratch.old_velocity_values,
4219 * scratch.old_old_velocity_values,
4220 * scratch.old_strain_rates,
4221 * scratch.old_old_strain_rates,
4222 * global_max_velocity,
4223 * global_T_range.second - global_T_range.first,
4224 * 0.5 * (global_T_range.second + global_T_range.first),
4225 * global_entropy_variation,
4226 * cell->diameter());
4228 *
for (
unsigned int q = 0; q < n_q_points; ++q)
4230 *
for (
unsigned int k = 0; k < dofs_per_cell; ++k)
4232 * scratch.phi_T[k] = scratch.temperature_fe_values.shape_value(k, q);
4233 * scratch.grad_phi_T[k] =
4234 * scratch.temperature_fe_values.shape_grad(k, q);
4238 *
const double T_term_for_rhs =
4239 * (use_bdf2_scheme ?
4240 * (scratch.old_temperature_values[q] *
4241 * (1 + time_step / old_time_step) -
4242 * scratch.old_old_temperature_values[q] * (time_step * time_step) /
4243 * (old_time_step * (time_step + old_time_step))) :
4244 * scratch.old_temperature_values[q]);
4246 *
const double ext_T =
4247 * (use_bdf2_scheme ? (scratch.old_temperature_values[q] *
4248 * (1 + time_step / old_time_step) -
4249 * scratch.old_old_temperature_values[q] *
4250 * time_step / old_time_step) :
4251 * scratch.old_temperature_values[q]);
4254 * (use_bdf2_scheme ? (scratch.old_temperature_grads[q] *
4255 * (1 + time_step / old_time_step) -
4256 * scratch.old_old_temperature_grads[q] * time_step /
4258 * scratch.old_temperature_grads[q]);
4261 * (use_bdf2_scheme ?
4262 * (scratch.old_velocity_values[q] * (1 + time_step / old_time_step) -
4263 * scratch.old_old_velocity_values[q] * time_step / old_time_step) :
4264 * scratch.old_velocity_values[q]);
4267 * (use_bdf2_scheme ?
4268 * (scratch.old_strain_rates[q] * (1 + time_step / old_time_step) -
4269 * scratch.old_old_strain_rates[q] * time_step / old_time_step) :
4270 * scratch.old_strain_rates[q]);
4272 *
const double gamma =
4273 * ((EquationData::radiogenic_heating * EquationData::density(ext_T) +
4274 * 2 * EquationData::eta * extrapolated_strain_rate *
4275 * extrapolated_strain_rate) /
4276 * (EquationData::density(ext_T) * EquationData::specific_heat));
4278 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
4280 * data.local_rhs(i) +=
4281 * (T_term_for_rhs * scratch.phi_T[i] -
4282 * time_step * extrapolated_u * ext_grad_T * scratch.phi_T[i] -
4283 * time_step * nu * ext_grad_T * scratch.grad_phi_T[i] +
4284 * time_step *
gamma * scratch.phi_T[i]) *
4285 * scratch.temperature_fe_values.JxW(q);
4287 *
if (temperature_constraints.is_inhomogeneously_constrained(
4288 * data.local_dof_indices[i]))
4290 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
4291 * data.matrix_for_bc(j, i) +=
4292 * (scratch.phi_T[i] * scratch.phi_T[j] *
4293 * (use_bdf2_scheme ? ((2 * time_step + old_time_step) /
4294 * (time_step + old_time_step)) :
4296 * scratch.grad_phi_T[i] * scratch.grad_phi_T[j] *
4297 * EquationData::kappa * time_step) *
4298 * scratch.temperature_fe_values.JxW(q);
4305 *
template <
int dim>
4306 *
void BoussinesqFlowProblem<dim>::copy_local_to_global_temperature_rhs(
4307 *
const Assembly::CopyData::TemperatureRHS<dim> &data)
4309 * temperature_constraints.distribute_local_to_global(data.local_rhs,
4310 * data.local_dof_indices,
4312 * data.matrix_for_bc);
4319 * In the function that runs the
WorkStream for actually calculating the
4320 * right hand side, we also generate the
final matrix. As mentioned above,
4321 * it is a
sum of the mass
matrix and the Laplace
matrix, times some time
4322 * step-dependent weight. This weight is specified by the BDF-2 time
4323 * integration scheme, see the introduction in @ref step_31
"step-31". What is
new in
this
4324 * tutorial program (in addition to the use of MPI parallelization and the
4325 *
WorkStream class), is that we now precompute the temperature
4326 * preconditioner as well. The reason is that the setup of the Jacobi
4327 * preconditioner takes a noticeable time compared to the solver because we
4328 * usually only need between 10 and 20 iterations
for solving the
4329 * temperature system (
this might sound strange, as Jacobi really only
4330 * consists of a
diagonal, but in Trilinos it is derived from more
general
4331 * framework
for point relaxation preconditioners which is a bit
4332 * inefficient). Hence, it is more efficient to precompute the
4333 * preconditioner, even though the
matrix entries may slightly change
4334 * because the time step might change. This is not too big a problem because
4335 * we
remesh every few time steps (and regenerate the preconditioner then).
4338 *
template <
int dim>
4339 *
void BoussinesqFlowProblem<dim>::assemble_temperature_system(
4340 *
const double maximal_velocity)
4342 *
const bool use_bdf2_scheme = (timestep_number != 0);
4344 *
if (use_bdf2_scheme ==
true)
4346 * temperature_matrix.copy_from(temperature_mass_matrix);
4347 * temperature_matrix *=
4348 * (2 * time_step + old_time_step) / (time_step + old_time_step);
4349 * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4353 * temperature_matrix.copy_from(temperature_mass_matrix);
4354 * temperature_matrix.add(time_step, temperature_stiffness_matrix);
4357 *
if (rebuild_temperature_preconditioner ==
true)
4359 * T_preconditioner =
4360 * std::make_shared<TrilinosWrappers::PreconditionJacobi>();
4361 * T_preconditioner->initialize(temperature_matrix);
4362 * rebuild_temperature_preconditioner =
false;
4367 * The next part is computing the right hand side vectors. To
do so, we
4368 *
first compute the average temperature @f$T_m@f$ that we use
for evaluating
4369 * the artificial viscosity stabilization through the residual @f$E(
T) =
4370 * (
T-T_m)^2@f$. We
do this by defining the midpoint between maximum and
4371 * minimum temperature as average temperature in the definition of the
4372 * entropy viscosity. An alternative would be to use the integral average,
4373 * but the results are not very sensitive to
this choice. The rest then
4374 * only
requires calling
WorkStream::run again, binding the arguments to
4375 * the <code>local_assemble_temperature_rhs</code> function that are the
4379 * temperature_rhs = 0;
4381 *
const QGauss<dim> quadrature_formula(parameters.temperature_degree + 2);
4382 *
const std::pair<double, double> global_T_range =
4383 * get_extrapolated_temperature_range();
4385 *
const double average_temperature =
4386 * 0.5 * (global_T_range.first + global_T_range.second);
4387 *
const double global_entropy_variation =
4388 * get_entropy_variation(average_temperature);
4390 *
using CellFilter =
4394 * [
this, global_T_range, maximal_velocity, global_entropy_variation](
4396 * Assembly::Scratch::TemperatureRHS<dim> & scratch,
4397 * Assembly::CopyData::TemperatureRHS<dim> & data) {
4398 * this->local_assemble_temperature_rhs(global_T_range,
4400 * global_entropy_variation,
4406 *
auto copier = [
this](
const Assembly::CopyData::TemperatureRHS<dim> &data) {
4407 * this->copy_local_to_global_temperature_rhs(data);
4411 * temperature_dof_handler.begin_active()),
4413 * temperature_dof_handler.end()),
4416 * Assembly::Scratch::TemperatureRHS<dim>(
4417 * temperature_fe, stokes_fe, mapping, quadrature_formula),
4418 * Assembly::CopyData::TemperatureRHS<dim>(temperature_fe));
4428 * <a name=
"BoussinesqFlowProblemsolve"></a>
4429 * <h4>BoussinesqFlowProblem::solve</h4>
4433 * This function solves the linear systems in each time step of the
4434 * Boussinesq problem. First, we work on the Stokes system and then on the
4435 * temperature system. In essence, it does the same things as the respective
4436 * function in @ref step_31
"step-31". However, there are a few changes here.
4440 * The
first change is related to the way we store our solution: we keep the
4441 * vectors with locally owned degrees of freedom plus ghost nodes on each
4442 * MPI node. When we enter a solver which is supposed to perform
4443 *
matrix-vector products with a distributed
matrix,
this is not the
4444 * appropriate form, though. There, we will want to have the solution vector
4445 * to be distributed in the same way as the
matrix, i.e. without any
4446 * ghosts. So what we
do first is to generate a distributed vector called
4447 * <code>distributed_stokes_solution</code> and put only the locally owned
4448 * dofs into that, which is neatly done by the <code>
operator=</code> of the
4453 * Next, we
scale the pressure solution (or rather, the
initial guess)
for
4454 * the solver so that it matches with the length scales in the matrices, as
4455 * discussed in the introduction. We also immediately
scale the pressure
4456 * solution back to the correct units after the solution is completed. We
4457 * also need to
set the pressure
values at hanging nodes to
zero. This we
4458 * also did in @ref step_31
"step-31" in order not to disturb the Schur complement by some
4459 * vector entries that actually are irrelevant during the solve stage. As a
4460 * difference to @ref step_31
"step-31", here we
do it only
for the locally owned pressure
4461 * dofs. After solving
for the Stokes solution, each processor copies the
4462 * distributed solution back into the solution vector that also includes
4467 * The third and most obvious change is that we have two variants
for the
4468 * Stokes solver:
A fast solver that sometimes breaks down, and a robust
4469 * solver that is slower. This is what we already discussed in the
4470 * introduction. Here is how we realize it: First, we perform 30 iterations
4471 * with the fast solver based on the simple preconditioner based on the AMG
4472 *
V-cycle instead of an
approximate solve (
this is indicated by the
4473 * <code>false</code> argument to the
4474 * <code>LinearSolvers::BlockSchurPreconditioner</code>
object). If we
4475 * converge, everything is fine. If we
do not converge, the solver control
4477 *
this would
abort the program because we don
't catch them in our usual
4478 * <code>solve()</code> functions. This is certainly not what we want to
4479 * happen here. Rather, we want to switch to the strong solver and continue
4480 * the solution process with whatever vector we got so far. Hence, we catch
4481 * the exception with the C++ try/catch mechanism. We then simply go through
4482 * the same solver sequence again in the <code>catch</code> clause, this
4483 * time passing the @p true flag to the preconditioner for the strong
4484 * solver, signaling an approximate CG solve.
4487 * template <int dim>
4488 * void BoussinesqFlowProblem<dim>::solve()
4491 * TimerOutput::Scope timer_section(computing_timer,
4492 * " Solve Stokes system");
4494 * pcout << " Solving Stokes system... " << std::flush;
4496 * TrilinosWrappers::MPI::BlockVector distributed_stokes_solution(
4498 * distributed_stokes_solution = stokes_solution;
4500 * distributed_stokes_solution.block(1) /= EquationData::pressure_scaling;
4502 * const unsigned int
4503 * start = (distributed_stokes_solution.block(0).size() +
4504 * distributed_stokes_solution.block(1).local_range().first),
4505 * end = (distributed_stokes_solution.block(0).size() +
4506 * distributed_stokes_solution.block(1).local_range().second);
4507 * for (unsigned int i = start; i < end; ++i)
4508 * if (stokes_constraints.is_constrained(i))
4509 * distributed_stokes_solution(i) = 0;
4512 * PrimitiveVectorMemory<TrilinosWrappers::MPI::BlockVector> mem;
4514 * unsigned int n_iterations = 0;
4515 * const double solver_tolerance = 1e-8 * stokes_rhs.l2_norm();
4516 * SolverControl solver_control(30, solver_tolerance);
4520 * const LinearSolvers::BlockSchurPreconditioner<
4521 * TrilinosWrappers::PreconditionAMG,
4522 * TrilinosWrappers::PreconditionJacobi>
4523 * preconditioner(stokes_matrix,
4524 * stokes_preconditioner_matrix,
4525 * *Mp_preconditioner,
4526 * *Amg_preconditioner,
4529 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4532 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4534 * solver.solve(stokes_matrix,
4535 * distributed_stokes_solution,
4539 * n_iterations = solver_control.last_step();
4542 * catch (SolverControl::NoConvergence &)
4544 * const LinearSolvers::BlockSchurPreconditioner<
4545 * TrilinosWrappers::PreconditionAMG,
4546 * TrilinosWrappers::PreconditionJacobi>
4547 * preconditioner(stokes_matrix,
4548 * stokes_preconditioner_matrix,
4549 * *Mp_preconditioner,
4550 * *Amg_preconditioner,
4553 * SolverControl solver_control_refined(stokes_matrix.m(),
4554 * solver_tolerance);
4555 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector> solver(
4556 * solver_control_refined,
4558 * SolverFGMRES<TrilinosWrappers::MPI::BlockVector>::AdditionalData(
4560 * solver.solve(stokes_matrix,
4561 * distributed_stokes_solution,
4566 * (solver_control.last_step() + solver_control_refined.last_step());
4570 * stokes_constraints.distribute(distributed_stokes_solution);
4572 * distributed_stokes_solution.block(1) *= EquationData::pressure_scaling;
4574 * stokes_solution = distributed_stokes_solution;
4575 * pcout << n_iterations << " iterations." << std::endl;
4581 * Now let's turn to the temperature part: First, we compute the time step
4582 * size. We found that we need smaller time steps
for 3D than
for 2D
for
4583 * the shell geometry. This is because the cells are more distorted in
4584 * that
case (it is the smallest edge length that determines the CFL
4585 * number). Instead of computing the time step from maximum velocity and
4586 * minimal mesh size as in @ref step_31
"step-31", we compute local CFL
numbers, i.e., on
4587 * each cell we compute the maximum velocity times the mesh size, and
4588 * compute the maximum of them. Hence, we need to choose the factor in
4589 * front of the time step slightly smaller.
4593 * After temperature right hand side assembly, we solve the linear system
4594 *
for temperature (with fully distributed vectors without any ghosts),
4595 * apply constraints and
copy the vector back to
one with ghosts.
4599 * In the
end, we
extract the temperature range similarly to @ref step_31
"step-31" to
4600 * produce some output (
for example in order to help us choose the
4601 * stabilization constants, as discussed in the introduction). The only
4602 * difference is that we need to exchange maxima over all processors.
4607 *
" Assemble temperature rhs");
4609 * old_time_step = time_step;
4611 *
const double scaling = (dim == 3 ? 0.25 : 1.0);
4612 * time_step = (scaling / (2.1 * dim *
std::sqrt(1. * dim)) /
4613 * (parameters.temperature_degree * get_cfl_number()));
4615 *
const double maximal_velocity = get_maximal_velocity();
4616 * pcout <<
" Maximal velocity: "
4617 * << maximal_velocity * EquationData::year_in_seconds * 100
4618 * <<
" cm/year" << std::endl;
4620 * <<
"Time step: " << time_step / EquationData::year_in_seconds
4621 * <<
" years" << std::endl;
4623 * temperature_solution = old_temperature_solution;
4624 * assemble_temperature_system(maximal_velocity);
4629 *
" Solve temperature system");
4632 * 1
e-12 * temperature_rhs.l2_norm());
4637 * distributed_temperature_solution = temperature_solution;
4639 * cg.solve(temperature_matrix,
4640 * distributed_temperature_solution,
4642 * *T_preconditioner);
4644 * temperature_constraints.distribute(distributed_temperature_solution);
4645 * temperature_solution = distributed_temperature_solution;
4647 * pcout <<
" " << solver_control.last_step()
4648 * <<
" CG iterations for temperature" << std::endl;
4652 *
double global_temperature[2];
4654 *
for (
unsigned int i =
4655 * distributed_temperature_solution.local_range().first;
4656 * i < distributed_temperature_solution.local_range().second;
4660 * std::min<double>(temperature[0],
4661 * distributed_temperature_solution(i));
4663 * std::max<double>(temperature[1],
4664 * distributed_temperature_solution(i));
4667 * temperature[0] *= -1.0;
4669 * global_temperature[0] *= -1.0;
4671 * pcout <<
" Temperature range: " << global_temperature[0] <<
' '
4672 * << global_temperature[1] << std::endl;
4680 * <a name=
"BoussinesqFlowProblemoutput_results"></a>
4681 * <h4>BoussinesqFlowProblem::output_results</h4>
4685 * Next comes the function that generates the output. The quantities to
4686 * output could be introduced manually like we did in @ref step_31
"step-31". An
4687 * alternative is to hand
this task over to a
class PostProcessor that
4689 *
DataOut. This allows us to output derived quantities from the solution,
4690 * like the friction heating included in
this example. It overloads the
4693 * give it
values of the numerical solution, its derivatives, normals to the
4694 * cell, the actual evaluation points and any additional quantities. This
4695 * follows the same procedure as discussed in @ref step_29
"step-29" and other programs.
4698 *
template <
int dim>
4699 *
class BoussinesqFlowProblem<dim>::Postprocessor
4703 * Postprocessor(
const unsigned int partition,
const double minimal_pressure);
4707 * std::vector<
Vector<double>> &computed_quantities)
const override;
4709 *
virtual std::vector<std::string>
get_names()
const override;
4711 *
virtual std::vector<
4719 *
const double minimal_pressure;
4723 *
template <
int dim>
4724 * BoussinesqFlowProblem<dim>::Postprocessor::Postprocessor(
4726 *
const double minimal_pressure)
4728 * , minimal_pressure(minimal_pressure)
4734 * Here we define the names
for the variables we want to output. These are
4735 * the actual solution
values for velocity, pressure, and temperature, as
4736 * well as the friction heating and to each cell the number of the processor
4737 * that owns it. This allows us to visualize the partitioning of the domain
4738 * among the processors. Except
for the velocity, which is vector-valued,
4739 * all other quantities are scalar.
4742 *
template <
int dim>
4743 * std::vector<std::string>
4744 * BoussinesqFlowProblem<dim>::Postprocessor::get_names() const
4746 * std::vector<std::string> solution_names(dim,
"velocity");
4747 * solution_names.emplace_back(
"p");
4748 * solution_names.emplace_back(
"T");
4749 * solution_names.emplace_back(
"friction_heating");
4750 * solution_names.emplace_back(
"partition");
4752 *
return solution_names;
4756 *
template <
int dim>
4757 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4758 * BoussinesqFlowProblem<dim>::Postprocessor::get_data_component_interpretation()
4761 * std::vector<DataComponentInterpretation::DataComponentInterpretation>
4762 * interpretation(dim,
4770 *
return interpretation;
4774 *
template <
int dim>
4776 * BoussinesqFlowProblem<dim>::Postprocessor::get_needed_update_flags() const
4784 * Now we implement the function that computes the derived quantities. As we
4785 * also did
for the output, we rescale the velocity from its SI units to
4786 * something more readable, namely cm/year. Next, the pressure is scaled to
4787 * be between 0 and the maximum pressure. This makes it more easily
4788 * comparable -- in essence making all pressure variables positive or
4789 *
zero. Temperature is taken as is, and the friction heating is computed as
4790 * @f$2 \eta \varepsilon(\mathbf{u}) \cdot \varepsilon(\mathbf{u})@f$.
4794 * The quantities we output here are more
for illustration, rather than
for
4795 * actual scientific
value. We come back to
this briefly in the results
4796 * section of
this program and explain what
one may in fact be interested in.
4799 * template <int dim>
4800 *
void BoussinesqFlowProblem<dim>::Postprocessor::evaluate_vector_field(
4804 *
const unsigned int n_quadrature_points = inputs.
solution_values.size();
4807 *
Assert(computed_quantities.size() == n_quadrature_points,
4811 *
for (
unsigned int q = 0; q < n_quadrature_points; ++q)
4813 *
for (
unsigned int d = 0;
d < dim; ++
d)
4815 * EquationData::year_in_seconds * 100);
4817 *
const double pressure =
4819 * computed_quantities[q](dim) = pressure;
4822 * computed_quantities[q](dim + 1) = temperature;
4825 *
for (
unsigned int d = 0;
d < dim; ++
d)
4828 * computed_quantities[q](dim + 2) =
4829 * 2 * EquationData::eta * strain_rate * strain_rate;
4831 * computed_quantities[q](dim + 3) =
partition;
4838 * The <code>output_results()</code> function has a similar task to the
one
4839 * in @ref step_31
"step-31". However, here we are going to demonstrate a different
4840 * technique on how to
merge output from different
DoFHandler objects. The
4841 * way we
're going to achieve this recombination is to create a joint
4842 * DoFHandler that collects both components, the Stokes solution and the
4843 * temperature solution. This can be nicely done by combining the finite
4844 * elements from the two systems to form one FESystem, and let this
4845 * collective system define a new DoFHandler object. To be sure that
4846 * everything was done correctly, we perform a sanity check that ensures
4847 * that we got all the dofs from both Stokes and temperature even in the
4848 * combined system. We then combine the data vectors. Unfortunately, there
4849 * is no straight-forward relation that tells us how to sort Stokes and
4850 * temperature vector into the joint vector. The way we can get around this
4851 * trouble is to rely on the information collected in the FESystem. For each
4852 * dof on a cell, the joint finite element knows to which equation component
4853 * (velocity component, pressure, or temperature) it belongs – that's the
4854 * information we need! So we step through all cells (with iterators into
4855 * all three DoFHandlers moving in sync), and
for each joint cell dof, we
4857 * function (see there
for a description of what the various parts of its
4858 *
return value contain). We also need to keep track whether we
're on a
4859 * Stokes dof or a temperature dof, which is contained in
4860 * joint_fe.system_to_base_index(i).first.first. Eventually, the dof_indices
4861 * data structures on either of the three systems tell us how the relation
4862 * between global vector and local dofs looks like on the present cell,
4863 * which concludes this tedious work. We make sure that each processor only
4864 * works on the subdomain it owns locally (and not on ghost or artificial
4865 * cells) when building the joint solution vector. The same will then have
4866 * to be done in DataOut::build_patches(), but that function does so
4871 * What we end up with is a set of patches that we can write using the
4872 * functions in DataOutBase in a variety of output formats. Here, we then
4873 * have to pay attention that what each processor writes is really only its
4874 * own part of the domain, i.e. we will want to write each processor's
4875 * contribution into a separate file. This we
do by adding an additional
4876 * number to the filename when we write the solution. This is not really
4877 *
new, we did it similarly in @ref step_40
"step-40". Note that we write in the compressed
4878 * format @p .vtu instead of plain
vtk files, which saves quite some
4883 * All the rest of the work is done in the PostProcessor
class.
4886 *
template <
int dim>
4887 *
void BoussinesqFlowProblem<dim>::output_results()
4891 *
const FESystem<dim> joint_fe(stokes_fe, 1, temperature_fe, 1);
4894 * joint_dof_handler.distribute_dofs(joint_fe);
4895 *
Assert(joint_dof_handler.n_dofs() ==
4896 * stokes_dof_handler.n_dofs() + temperature_dof_handler.n_dofs(),
4900 * joint_solution.
reinit(joint_dof_handler.locally_owned_dofs(),
4904 * std::vector<types::global_dof_index> local_joint_dof_indices(
4905 * joint_fe.n_dofs_per_cell());
4906 * std::vector<types::global_dof_index> local_stokes_dof_indices(
4907 * stokes_fe.n_dofs_per_cell());
4908 * std::vector<types::global_dof_index> local_temperature_dof_indices(
4909 * temperature_fe.n_dofs_per_cell());
4912 * joint_cell = joint_dof_handler.begin_active(),
4913 * joint_endc = joint_dof_handler.end(),
4914 * stokes_cell = stokes_dof_handler.begin_active(),
4915 * temperature_cell = temperature_dof_handler.begin_active();
4916 *
for (; joint_cell != joint_endc;
4917 * ++joint_cell, ++stokes_cell, ++temperature_cell)
4918 *
if (joint_cell->is_locally_owned())
4920 * joint_cell->get_dof_indices(local_joint_dof_indices);
4921 * stokes_cell->get_dof_indices(local_stokes_dof_indices);
4922 * temperature_cell->get_dof_indices(local_temperature_dof_indices);
4924 *
for (
unsigned int i = 0; i < joint_fe.n_dofs_per_cell(); ++i)
4925 *
if (joint_fe.system_to_base_index(i).first.first == 0)
4927 *
Assert(joint_fe.system_to_base_index(i).second <
4928 * local_stokes_dof_indices.size(),
4931 * joint_solution(local_joint_dof_indices[i]) = stokes_solution(
4932 * local_stokes_dof_indices[joint_fe.system_to_base_index(i)
4937 *
Assert(joint_fe.system_to_base_index(i).first.first == 1,
4939 *
Assert(joint_fe.system_to_base_index(i).second <
4940 * local_temperature_dof_indices.size(),
4942 * joint_solution(local_joint_dof_indices[i]) =
4943 * temperature_solution(
4944 * local_temperature_dof_indices
4945 * [joint_fe.system_to_base_index(i).second]);
4952 *
IndexSet locally_relevant_joint_dofs(joint_dof_handler.n_dofs());
4954 * locally_relevant_joint_dofs);
4956 * locally_relevant_joint_solution.
reinit(locally_relevant_joint_dofs,
4958 * locally_relevant_joint_solution = joint_solution;
4962 * stokes_solution.block(1).min());
4966 * data_out.
add_data_vector(locally_relevant_joint_solution, postprocessor);
4969 *
static int out_index = 0;
4971 *
"./",
"solution", out_index, MPI_COMM_WORLD, 5);
4981 * <a name=
"BoussinesqFlowProblemrefine_mesh"></a>
4982 * <h4>BoussinesqFlowProblem::refine_mesh</h4>
4986 * This function isn
't really new either. Since the <code>setup_dofs</code>
4987 * function that we call in the middle has its own timer section, we split
4988 * timing this function into two sections. It will also allow us to easily
4989 * identify which of the two is more expensive.
4993 * One thing of note, however, is that we only want to compute error
4994 * indicators on the locally owned subdomain. In order to achieve this, we
4995 * pass one additional argument to the KellyErrorEstimator::estimate
4996 * function. Note that the vector for error estimates is resized to the
4997 * number of active cells present on the current process, which is less than
4998 * the total number of active cells on all processors (but more than the
4999 * number of locally owned active cells); each processor only has a few
5000 * coarse cells around the locally owned ones, as also explained in @ref step_40 "step-40".
5004 * The local error estimates are then handed to a %parallel version of
5005 * GridRefinement (in namespace parallel::distributed::GridRefinement, see
5006 * also @ref step_40 "step-40") which looks at the errors and finds the cells that need
5007 * refinement by comparing the error values across processors. As in
5008 * @ref step_31 "step-31", we want to limit the maximum grid level. So in case some cells
5009 * have been marked that are already at the finest level, we simply clear
5013 * template <int dim>
5015 * BoussinesqFlowProblem<dim>::refine_mesh(const unsigned int max_grid_level)
5017 * parallel::distributed::SolutionTransfer<dim, TrilinosWrappers::MPI::Vector>
5018 * temperature_trans(temperature_dof_handler);
5019 * parallel::distributed::SolutionTransfer<dim,
5020 * TrilinosWrappers::MPI::BlockVector>
5021 * stokes_trans(stokes_dof_handler);
5024 * TimerOutput::Scope timer_section(computing_timer,
5025 * "Refine mesh structure, part 1");
5027 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
5029 * KellyErrorEstimator<dim>::estimate(
5030 * temperature_dof_handler,
5031 * QGauss<dim - 1>(parameters.temperature_degree + 1),
5032 * std::map<types::boundary_id, const Function<dim> *>(),
5033 * temperature_solution,
5034 * estimated_error_per_cell,
5038 * triangulation.locally_owned_subdomain());
5040 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_fraction(
5041 * triangulation, estimated_error_per_cell, 0.3, 0.1);
5043 * if (triangulation.n_levels() > max_grid_level)
5044 * for (typename Triangulation<dim>::active_cell_iterator cell =
5045 * triangulation.begin_active(max_grid_level);
5046 * cell != triangulation.end();
5048 * cell->clear_refine_flag();
5052 * With all flags marked as necessary, we can then tell the
5053 * parallel::distributed::SolutionTransfer objects to get ready to
5054 * transfer data from one mesh to the next, which they will do when
5056 * Triangulation as part of the @p execute_coarsening_and_refinement() call.
5057 * The syntax is similar to the non-%parallel solution transfer (with the
5058 * exception that here a pointer to the vector entries is enough). The
5059 * remainder of the function further down below is then concerned with
5060 * setting up the data structures again after mesh refinement and
5061 * restoring the solution vectors on the new mesh.
5064 * std::vector<const TrilinosWrappers::MPI::Vector *> x_temperature(2);
5065 * x_temperature[0] = &temperature_solution;
5066 * x_temperature[1] = &old_temperature_solution;
5067 * std::vector<const TrilinosWrappers::MPI::BlockVector *> x_stokes(2);
5068 * x_stokes[0] = &stokes_solution;
5069 * x_stokes[1] = &old_stokes_solution;
5071 * triangulation.prepare_coarsening_and_refinement();
5073 * temperature_trans.prepare_for_coarsening_and_refinement(x_temperature);
5074 * stokes_trans.prepare_for_coarsening_and_refinement(x_stokes);
5076 * triangulation.execute_coarsening_and_refinement();
5082 * TimerOutput::Scope timer_section(computing_timer,
5083 * "Refine mesh structure, part 2");
5086 * TrilinosWrappers::MPI::Vector distributed_temp1(temperature_rhs);
5087 * TrilinosWrappers::MPI::Vector distributed_temp2(temperature_rhs);
5089 * std::vector<TrilinosWrappers::MPI::Vector *> tmp(2);
5090 * tmp[0] = &(distributed_temp1);
5091 * tmp[1] = &(distributed_temp2);
5092 * temperature_trans.interpolate(tmp);
5096 * enforce constraints to make the interpolated solution conforming on
5100 * temperature_constraints.distribute(distributed_temp1);
5101 * temperature_constraints.distribute(distributed_temp2);
5103 * temperature_solution = distributed_temp1;
5104 * old_temperature_solution = distributed_temp2;
5108 * TrilinosWrappers::MPI::BlockVector distributed_stokes(stokes_rhs);
5109 * TrilinosWrappers::MPI::BlockVector old_distributed_stokes(stokes_rhs);
5111 * std::vector<TrilinosWrappers::MPI::BlockVector *> stokes_tmp(2);
5112 * stokes_tmp[0] = &(distributed_stokes);
5113 * stokes_tmp[1] = &(old_distributed_stokes);
5115 * stokes_trans.interpolate(stokes_tmp);
5119 * enforce constraints to make the interpolated solution conforming on
5123 * stokes_constraints.distribute(distributed_stokes);
5124 * stokes_constraints.distribute(old_distributed_stokes);
5126 * stokes_solution = distributed_stokes;
5127 * old_stokes_solution = old_distributed_stokes;
5137 * <a name="BoussinesqFlowProblemrun"></a>
5138 * <h4>BoussinesqFlowProblem::run</h4>
5142 * This is the final and controlling function in this class. It, in fact,
5143 * runs the entire rest of the program and is, once more, very similar to
5144 * @ref step_31 "step-31". The only substantial difference is that we use a different mesh
5145 * now (a GridGenerator::hyper_shell instead of a simple cube geometry).
5148 * template <int dim>
5149 * void BoussinesqFlowProblem<dim>::run()
5151 * GridGenerator::hyper_shell(triangulation,
5155 * (dim == 3) ? 96 : 12,
5158 * global_Omega_diameter = GridTools::diameter(triangulation);
5160 * triangulation.refine_global(parameters.initial_global_refinement);
5164 * unsigned int pre_refinement_step = 0;
5166 * start_time_iteration:
5169 * TrilinosWrappers::MPI::Vector solution(
5170 * temperature_dof_handler.locally_owned_dofs());
5173 * VectorTools::project supports parallel vector classes with most
5174 * standard finite elements via deal.II's own native
MatrixFree framework:
5175 * since we use standard Lagrange elements of moderate order
this function
5180 * temperature_constraints,
5182 * EquationData::TemperatureInitialValues<dim>(),
5186 * Having so computed the current temperature field, let us
set the member
5187 * variable that holds the temperature nodes. Strictly speaking, we really
5188 * only need to set <code>old_temperature_solution</code> since the
first
5189 * thing we will
do is to compute the Stokes solution that only
requires
5190 * the previous time step
's temperature field. That said, nothing good can
5191 * come from not initializing the other vectors as well (especially since
5192 * it's a relatively cheap operation and we only have to
do it once at the
5193 * beginning of the program)
if we ever want to extend our numerical
5194 * method or physical model, and so we initialize
5195 * <code>old_temperature_solution</code> and
5196 * <code>old_old_temperature_solution</code> as well. The assignment makes
5197 * sure that the vectors on the left hand side (which where initialized to
5198 * contain ghost elements as well) also get the correct ghost elements. In
5199 * other words, the assignment here
requires communication between
5203 * temperature_solution = solution;
5204 * old_temperature_solution = solution;
5205 * old_old_temperature_solution = solution;
5208 * timestep_number = 0;
5209 * time_step = old_time_step = 0;
5215 * pcout <<
"Timestep " << timestep_number
5216 * <<
": t=" << time / EquationData::year_in_seconds <<
" years"
5219 * assemble_stokes_system();
5220 * build_stokes_preconditioner();
5221 * assemble_temperature_matrix();
5225 * pcout << std::endl;
5227 *
if ((timestep_number == 0) &&
5228 * (pre_refinement_step < parameters.initial_adaptive_refinement))
5230 * refine_mesh(parameters.initial_global_refinement +
5231 * parameters.initial_adaptive_refinement);
5232 * ++pre_refinement_step;
5233 *
goto start_time_iteration;
5235 *
else if ((timestep_number > 0) &&
5236 * (timestep_number % parameters.adaptive_refinement_interval ==
5238 * refine_mesh(parameters.initial_global_refinement +
5239 * parameters.initial_adaptive_refinement);
5241 *
if ((parameters.generate_graphical_output ==
true) &&
5242 * (timestep_number % parameters.graphical_output_interval == 0))
5247 * In order to speed up linear solvers, we
extrapolate the solutions
5248 * from the old time levels to the
new one. This gives a very good
5249 *
initial guess, cutting the number of iterations needed in solvers
5250 * by more than
one half. We
do not need to
extrapolate in the last
5251 * iteration, so
if we reached the
final time, we stop here.
5255 * As the last thing during a time step (before actually bumping up
5256 * the number of the time step), we check whether the current time
5257 * step number is divisible by 100, and
if so we let the computing
5258 * timer print a summary of CPU times spent so far.
5261 *
if (time > parameters.end_time * EquationData::year_in_seconds)
5265 * old_old_stokes_solution = old_stokes_solution;
5266 * old_stokes_solution = stokes_solution;
5267 * old_old_temperature_solution = old_temperature_solution;
5268 * old_temperature_solution = temperature_solution;
5269 *
if (old_time_step > 0)
5273 * Trilinos
sadd does not like ghost vectors even as input. Copy
5274 * into distributed vectors
for now:
5279 * distr_solution = stokes_solution;
5281 * distr_old_solution = old_old_stokes_solution;
5282 * distr_solution.
sadd(1. + time_step / old_time_step,
5283 * -time_step / old_time_step,
5284 * distr_old_solution);
5285 * stokes_solution = distr_solution;
5289 * distr_solution = temperature_solution;
5291 * distr_old_solution = old_old_temperature_solution;
5292 * distr_solution.sadd(1. + time_step / old_time_step,
5293 * -time_step / old_time_step,
5294 * distr_old_solution);
5295 * temperature_solution = distr_solution;
5299 *
if ((timestep_number > 0) && (timestep_number % 100 == 0))
5300 * computing_timer.print_summary();
5302 * time += time_step;
5303 * ++timestep_number;
5309 * If we are generating graphical output,
do so also
for the last time
5310 * step unless we had just done so before we left the
do-
while loop
5313 *
if ((parameters.generate_graphical_output ==
true) &&
5314 * !((timestep_number - 1) % parameters.graphical_output_interval == 0))
5324 * <a name=
"Thecodemaincodefunction"></a>
5325 * <h3>The <code>main</code> function</h3>
5329 * The main function is
short as usual and very similar to the
one in
5330 * @ref step_31
"step-31". Since we use a parameter file which is specified as an argument in
5331 * the command line, we have to read it in here and pass it on to the
5332 * Parameters
class for parsing. If no filename is given in the command line,
5333 * we simply use the <code>step-32.prm</code> file which is distributed
5334 * together with the program.
5338 * Because 3
d computations are simply very slow unless you
throw a lot of
5339 * processors at them, the program defaults to 2
d. You can get the 3
d version
5340 * by changing the constant dimension below to 3.
5343 *
int main(
int argc,
char *argv[])
5347 *
using namespace Step32;
5348 *
using namespace dealii;
5353 * std::string parameter_filename;
5355 * parameter_filename = argv[1];
5357 * parameter_filename =
"step-32.prm";
5359 *
const int dim = 2;
5360 * BoussinesqFlowProblem<dim>::Parameters parameters(parameter_filename);
5361 * BoussinesqFlowProblem<dim> flow_problem(parameters);
5362 * flow_problem.run();
5364 *
catch (std::exception &exc)
5366 * std::cerr << std::endl
5368 * <<
"----------------------------------------------------"
5370 * std::cerr <<
"Exception on processing: " << std::endl
5371 * << exc.what() << std::endl
5372 * <<
"Aborting!" << std::endl
5373 * <<
"----------------------------------------------------"
5380 * std::cerr << std::endl
5382 * <<
"----------------------------------------------------"
5384 * std::cerr <<
"Unknown exception!" << std::endl
5385 * <<
"Aborting!" << std::endl
5386 * <<
"----------------------------------------------------"
5394<a name=
"Results"></a><h1>Results</h1>
5397When
run, the program simulates convection in 3
d in much the same way
5398as @ref step_31
"step-31" did, though with an entirely different testcase.
5401<a name=
"Comparisonofresultswithstep31"></a><h3>Comparison of results with step-31</h3>
5404Before we go to
this testcase, however, let us show a few results from a
5405slightly earlier version of
this program that was solving exactly the
5406testcase we used in @ref step_31
"step-31", just that we now solve it in
parallel and with
5407much higher resolution. We show these results mainly
for comparison.
5409Here are two images that show
this higher resolution
if we choose a 3
d
5410computation in <code>main()</code> and
if we
set
5411<code>initial_refinement=3</code> and
5412<code>n_pre_refinement_steps=4</code>. At the time steps shown, the
5413meshes had around 72,000 and 236,000 cells,
for a total of 2,680,000
5414and 8,250,000 degrees of freedom, respectively, more than an order of
5415magnitude more than we had available in @ref step_31
"step-31":
5417<table align=
"center" class=
"doxtable">
5420 <img src=
"https://www.dealii.org/images/steps/developer/step-32.3d.cube.0.png" alt=
"">
5425 <img src=
"https://www.dealii.org/images/steps/developer/step-32.3d.cube.1.png" alt=
"">
5430The computation was done on a subset of 50 processors of the Brazos
5431cluster at Texas
A&M University.
5434<a name=
"Resultsfora2dcircularshelltestcase"></a><h3>Results
for a 2
d circular shell testcase</h3>
5437Next, we will
run @ref step_32
"step-32" with the parameter file in the directory with
one
5438change: we increase the
final time to 1e9. Here we are
using 16 processors. The
5439command to launch is (note that @ref step_32
"step-32".prm is the
default):
5443$ mpirun -np 16 ./step-32
5447Note that running a job on a cluster typically requires going through a job
5448scheduler, which we won
't discuss here. The output will look roughly like
5453$ mpirun -np 16 ./step-32
5454Number of active cells: 12,288 (on 6 levels)
5455Number of degrees of freedom: 186,624 (99,840+36,864+49,920)
5457Timestep 0: t=0 years
5459 Rebuilding Stokes preconditioner...
5460 Solving Stokes system... 41 iterations.
5461 Maximal velocity: 60.4935 cm/year
5462 Time step: 18166.9 years
5463 17 CG iterations for temperature
5464 Temperature range: 973 4273.16
5466Number of active cells: 15,921 (on 7 levels)
5467Number of degrees of freedom: 252,723 (136,640+47,763+68,320)
5469Timestep 0: t=0 years
5471 Rebuilding Stokes preconditioner...
5472 Solving Stokes system... 50 iterations.
5473 Maximal velocity: 60.3223 cm/year
5474 Time step: 10557.6 years
5475 19 CG iterations for temperature
5476 Temperature range: 973 4273.16
5478Number of active cells: 19,926 (on 8 levels)
5479Number of degrees of freedom: 321,246 (174,312+59,778+87,156)
5481Timestep 0: t=0 years
5483 Rebuilding Stokes preconditioner...
5484 Solving Stokes system... 50 iterations.
5485 Maximal velocity: 57.8396 cm/year
5486 Time step: 5453.78 years
5487 18 CG iterations for temperature
5488 Temperature range: 973 4273.16
5490Timestep 1: t=5453.78 years
5492 Solving Stokes system... 49 iterations.
5493 Maximal velocity: 59.0231 cm/year
5494 Time step: 5345.86 years
5495 18 CG iterations for temperature
5496 Temperature range: 973 4273.16
5498Timestep 2: t=10799.6 years
5500 Solving Stokes system... 24 iterations.
5501 Maximal velocity: 60.2139 cm/year
5502 Time step: 5241.51 years
5503 17 CG iterations for temperature
5504 Temperature range: 973 4273.16
5508Timestep 100: t=272151 years
5510 Solving Stokes system... 21 iterations.
5511 Maximal velocity: 161.546 cm/year
5512 Time step: 1672.96 years
5513 17 CG iterations for temperature
5514 Temperature range: 973 4282.57
5516Number of active cells: 56,085 (on 8 levels)
5517Number of degrees of freedom: 903,408 (490,102+168,255+245,051)
5521+---------------------------------------------+------------+------------+
5522| Total wallclock time elapsed since start | 115s | |
5524| Section | no. calls | wall time | % of total |
5525+---------------------------------+-----------+------------+------------+
5526| Assemble Stokes system | 103 | 2.82s | 2.5% |
5527| Assemble temperature matrices | 12 | 0.452s | 0.39% |
5528| Assemble temperature rhs | 103 | 11.5s | 10% |
5529| Build Stokes preconditioner | 12 | 2.09s | 1.8% |
5530| Solve Stokes system | 103 | 90.4s | 79% |
5531| Solve temperature system | 103 | 1.53s | 1.3% |
5532| Postprocessing | 3 | 0.532s | 0.46% |
5533| Refine mesh structure, part 1 | 12 | 0.93s | 0.81% |
5534| Refine mesh structure, part 2 | 12 | 0.384s | 0.33% |
5535| Setup dof systems | 13 | 2.96s | 2.6% |
5536+---------------------------------+-----------+------------+------------+
5540+---------------------------------------------+------------+------------+
5541| Total wallclock time elapsed since start | 9.14e+04s | |
5543| Section | no. calls | wall time | % of total |
5544+---------------------------------+-----------+------------+------------+
5545| Assemble Stokes system | 47045 | 2.05e+03s | 2.2% |
5546| Assemble temperature matrices | 4707 | 310s | 0.34% |
5547| Assemble temperature rhs | 47045 | 8.7e+03s | 9.5% |
5548| Build Stokes preconditioner | 4707 | 1.48e+03s | 1.6% |
5549| Solve Stokes system | 47045 | 7.34e+04s | 80% |
5550| Solve temperature system | 47045 | 1.46e+03s | 1.6% |
5551| Postprocessing | 1883 | 222s | 0.24% |
5552| Refine mesh structure, part 1 | 4706 | 641s | 0.7% |
5553| Refine mesh structure, part 2 | 4706 | 259s | 0.28% |
5554| Setup dof systems | 4707 | 1.86e+03s | 2% |
5555+---------------------------------+-----------+------------+------------+
5559The simulation terminates when the time reaches the 1 billion years
5560selected in the input file. You can extrapolate from this how long a
5561simulation would take for a different final time (the time step size
5562ultimately settles on somewhere around 20,000 years, so computing for
5563two billion years will take 100,000 time steps, give or take 20%). As
5564can be seen here, we spend most of the compute time in assembling
5565linear systems and — above all — in solving Stokes
5569To demonstrate the output we show the output from every 1250th time step here:
5573 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-000.png" alt="">
5576 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-050.png" alt="">
5579 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-100.png" alt="">
5584 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-150.png" alt="">
5587 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-200.png" alt="">
5590 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-250.png" alt="">
5595 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-300.png" alt="">
5598 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-350.png" alt="">
5601 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-400.png" alt="">
5606 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-450.png" alt="">
5609 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-500.png" alt="">
5612 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-550.png" alt="">
5617 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-time-600.png" alt="">
5620 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-cells.png" alt="">
5623 <img src="https://www.dealii.org/images/steps/developer/step-32-2d-partition.png" alt="">
5628The last two images show the grid as well as the partitioning of the mesh for
5629the same computation with 16 subdomains and 16 processors. The full dynamics of
5630this simulation are really only visible by looking at an animation, for example
5632href="https://www.dealii.org/images/steps/developer/step-32-2d-temperature.webm">shown
5633on this site</a>. This image is well worth watching due to its artistic quality
5634and entrancing depiction of the evolution of the magma plumes.
5636If you watch the movie, you'll see that the convection pattern goes
5637through several stages: First, it gets rid of the instable temperature
5638layering with the hot material overlain by the dense cold
5639material. After this great driver is removed and we have a sort of
5640stable situation, a few blobs start to separate from the hot boundary
5641layer at the inner ring and rise up, with a few cold fingers also
5642dropping down from the outer boundary layer. During this phase, the solution
5643remains mostly
symmetric, reflecting the 12-fold symmetry of the
5644original mesh. In a final phase, the fluid enters vigorous chaotic
5645stirring in which all symmetries are lost. This is a pattern that then
5646continues to dominate flow.
5648These different phases can also be identified if we look at the
5649maximal velocity as a function of time in the simulation:
5651<img src=
"https://www.dealii.org/images/steps/developer/step-32.2d.t_vs_vmax.png" alt=
"">
5653Here, the velocity (shown in centimeters per year) becomes very large,
5654to the order of several meters per year) at the beginning when the
5655temperature layering is instable. It then calms down to relatively
5656small
values before picking up again in the chaotic stirring
5657regime. There, it remains in the range of 10-40 centimeters per year,
5658quite within the physically expected region.
5661<a name=
"Resultsfora3dsphericalshelltestcase"></a><h3>Results for a 3
d spherical shell testcase</h3>
56643
d computations are very expensive computationally. Furthermore, as
5665seen above, interesting behavior only starts after quite a long time
5666requiring more CPU hours than is available on a typical
5667cluster. Consequently, rather than showing a complete simulation here,
5668let us simply show a couple of pictures we have obtained using the
5669successor to this program, called <i>ASPECT</i> (short for <i>Advanced
5670%Solver for Problems in Earth
's ConvecTion</i>), that is being
5671developed independently of deal.II and that already incorporates some
5672of the extensions discussed below. The following two pictures show
5673isocontours of the temperature and the partition of the domain (along
5674with the mesh) onto 512 processors:
5677<img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.solution.png" alt="">
5679<img src="https://www.dealii.org/images/steps/developer/step-32.3d-sphere.partition.png" alt="">
5683<a name="extensions"></a>
5684<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
5687There are many directions in which this program could be extended. As
5688mentioned at the end of the introduction, most of these are under active
5689development in the <i>ASPECT</i> (short for <i>Advanced %Solver for Problems
5690in Earth's ConvecTion</i>) code at the time this tutorial program is being
5691finished. Specifically, the following are certainly topics that
one should
5692address to make the program more useful:
5695 <li> <
b>Adiabatic heating/cooling:</
b>
5696 The temperature field we get in our simulations after a while
5697 is mostly constant with boundary layers at the inner and outer
5698 boundary, and streamers of cold and hot material mixing
5699 everything. Yet, this doesn
't match our expectation that things
5700 closer to the earth core should be hotter than closer to the
5701 surface. The reason is that the energy equation we have used does
5702 not include a term that describes adiabatic cooling and heating:
5703 rock, like gas, heats up as you compress it. Consequently, material
5704 that rises up cools adiabatically, and cold material that sinks down
5705 heats adiabatically. The correct temperature equation would
5706 therefore look somewhat like this:
5710 \nabla \cdot \kappa \nabla T &=& \gamma + \tau\frac{Dp}{Dt},
5712 or, expanding the advected derivative @f$\frac{D}{Dt} =
5713 \frac{\partial}{\partial t} + \mathbf u \cdot \nabla@f$:
5715 \frac{\partial T}{\partial t}
5717 {\mathbf u} \cdot \nabla T
5719 \nabla \cdot \kappa \nabla T &=& \gamma +
5720 \tau\left\{\frac{\partial
5721 p}{\partial t} + \mathbf u \cdot \nabla p \right\}.
5723 In other words, as pressure increases in a rock volume
5724 (@f$\frac{Dp}{Dt}>0@f$) we get an additional heat source, and vice
5727 The time derivative of the pressure is a bit awkward to
5728 implement. If necessary, one could approximate using the fact
5729 outlined in the introduction that the pressure can be decomposed
5730 into a dynamic component due to temperature differences and the
5731 resulting flow, and a static component that results solely from the
5732 static pressure of the overlying rock. Since the latter is much
5733 bigger, one may approximate @f$p\approx p_{\text{static}}=-\rho_{\text{ref}}
5734 [1+\beta T_{\text{ref}}] \varphi@f$, and consequently
5735 @f$\frac{Dp}{Dt} \approx \left\{- \mathbf u \cdot \nabla \rho_{\text{ref}}
5736 [1+\beta T_{\text{ref}}]\varphi\right\} = \rho_{\text{ref}}
5737 [1+\beta T_{\text{ref}}] \mathbf u \cdot \mathbf g@f$.
5738 In other words, if the fluid is moving in the direction of gravity
5739 (downward) it will be compressed and because in that case @f$\mathbf u
5740 \cdot \mathbf g > 0@f$ we get a positive heat source. Conversely, the
5741 fluid will cool down if it moves against the direction of gravity.
5743<li> <b>Compressibility:</b>
5744 As already hinted at in the temperature model above,
5745 mantle rocks are not incompressible. Rather, given the enormous pressures in
5746 the earth mantle (at the core-mantle boundary, the pressure is approximately
5747 140 GPa, equivalent to 1,400,000 times atmospheric pressure), rock actually
5748 does compress to something around 1.5 times the density it would have
5749 at surface pressure. Modeling this presents any number of
5750 difficulties. Primarily, the mass conservation equation is no longer
5751 @f$\textrm{div}\;\mathbf u=0@f$ but should read
5752 @f$\textrm{div}(\rho\mathbf u)=0@f$ where the density @f$\rho@f$ is now no longer
5753 spatially constant but depends on temperature and pressure. A consequence is
5754 that the model is now no longer linear; a linearized version of the Stokes
5755 equation is also no longer symmetric requiring us to rethink preconditioners
5756 and, possibly, even the discretization. We won't go into detail here as to
5757 how this can be resolved.
5759<li> <
b>Nonlinear material models:</
b> As already hinted at in various places,
5760 material parameters such as the density, the viscosity, and the various
5761 thermal parameters are not constant throughout the earth mantle. Rather,
5762 they nonlinearly depend on the pressure and temperature, and in the case of
5763 the viscosity on the strain rate @f$\varepsilon(\mathbf u)@f$. For complicated
5764 models, the only way to solve such models accurately may be to actually
5765 iterate this dependence out in each time step, rather than simply freezing
5766 coefficients at
values extrapolated from the previous time step(s).
5768<li> <
b>Checkpoint/restart:</
b> Running this program in 2
d on a number of
5769 processors allows solving realistic models in a day or two. However, in 3
d,
5770 compute times are so large that
one runs into two typical problems: (i) On
5771 most compute clusters, the queuing system limits
run times for individual
5772 jobs are to 2 or 3 days; (ii) losing the results of a computation due to
5773 hardware failures, misconfigurations, or power outages is a shame when
5774 running on hundreds of processors
for a couple of days. Both of these
5775 problems can be addressed by periodically saving the state of the program
5776 and,
if necessary, restarting the program at
this point. This technique is
5777 commonly called <i>checkpoint/restart</i> and it
requires that the entire
5778 state of the program is written to a permanent storage location (
e.g. a hard
5779 drive). Given the complexity of the data structures of
this program,
this is
5780 not entirely trivial (it may also involve writing gigabytes or more of
5781 data), but it can be made easier by realizing that
one can save the state
5782 between two time steps where it essentially only consists of the mesh and
5783 solution vectors; during restart
one would then
first re-enumerate degrees
5784 of freedom in the same way as done before and then re-
assemble
5785 matrices. Nevertheless, given the distributed nature of the data structures
5786 involved here, saving and restoring the state of a program is not
5787 trivial. An additional complexity is introduced by the fact that
one may
5788 want to change the number of processors between runs,
for example because
5789 one may wish to
continue computing on a mesh that is finer than the
one used
5790 to precompute a starting temperature field at an intermediate time.
5792<li> <
b>Predictive postprocessing:</
b> The
point of computations like
this is
5793 not simply to solve the equations. Rather, it is typically the exploration
5794 of different physical models and their comparison with things that we can
5795 measure at the earth surface, in order to find which models are realistic
5796 and which are contradicted by reality. To
this end, we need to compute
5797 quantities from our solution vectors that are related to what we can
5798 observe. Among these are,
for example, heatfluxes at the surface of the
5799 earth, as well as seismic velocities throughout the mantle as these affect
5800 earthquake waves that are recorded by seismographs.
5802<li> <
b>Better refinement criteria:</
b> As can be seen above
for the
58033
d case, the mesh in 3
d is primarily refined along the inner
5804boundary. This is because the boundary layer there is stronger than
5805any other transition in the domain, leading us to
refine there almost
5806exclusively and basically not at all following the plumes. One
5807certainly needs better refinement criteria to track the parts of the
5808solution we are really interested in better than the criterion used
5814There are many other ways to extend the current program. However, rather than
5815discussing them here, let us
point to the much larger open
5816source code ASPECT (see https:
5817further development of @ref step_32
"step-32" and that already includes many such possible
5821<a name=
"PlainProg"></a>
5822<h1> The plain program</h1>
5823@include
"step-32.cc"
void attach_dof_handler(const DoFHandlerType &)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation=std::vector< DataComponentInterpretation::DataComponentInterpretation >())
virtual void build_patches(const unsigned int n_subdivisions=0)
virtual UpdateFlags get_needed_update_flags() const =0
virtual void evaluate_vector_field(const DataPostprocessorInputs::Vector< dim > &input_data, std::vector< Vector< double > > &computed_quantities) const
virtual std::vector< std::string > get_names() const =0
virtual std::vector< DataComponentInterpretation::DataComponentInterpretation > get_data_component_interpretation() const
const FiniteElement< dim, spacedim > & get_fe(const unsigned int index=0) const
void reinit(const Triangulation< dim, spacedim > &tria)
active_cell_iterator begin_active(const unsigned int level=0) const
unsigned int n_dofs_per_cell() const
std::pair< std::pair< unsigned int, unsigned int >, unsigned int > system_to_base_index(const unsigned int index) const
virtual RangeNumberType value(const Point< dim > &p, const unsigned int component=0) const
virtual void vector_value(const Point< dim > &p, Vector< RangeNumberType > &values) const
virtual void parse_input(std::istream &input, const std::string &filename="input file", const std::string &last_line="", const bool skip_undefined=false)
std::ostream & print_parameters(std::ostream &out, const OutputStyle style) const
long int get_integer(const std::string &entry_string) const
bool get_bool(const std::string &entry_name) const
void declare_entry(const std::string &entry, const std::string &default_value, const Patterns::PatternBase &pattern=Patterns::Anything(), const std::string &documentation="", const bool has_to_be_set=false)
double get_double(const std::string &entry_name) const
void enter_subsection(const std::string &subsection)
constexpr SymmetricTensor< 2, dim, Number > symmetrize(const Tensor< 2, dim, Number > &t)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
constexpr ProductType< Number, OtherNumber >::type scalar_product(const SymmetricTensor< 2, dim, Number > &t1, const SymmetricTensor< 2, dim, OtherNumber > &t2)
numbers::NumberTraits< Number >::real_type norm() const
VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &x)
VectorizedArray< Number, width > cos(const ::VectorizedArray< Number, width > &x)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
__global__ void set(Number *val, const Number s, const size_type N)
__global__ void sadd(const Number s, Number *val, const Number a, const Number *V_val, const size_type N)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
#define Assert(cond, exc)
void sadd(const value_type s, const BlockVectorBase &V)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
typename ActiveSelector::active_cell_iterator active_cell_iterator
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
std::vector< std::vector< bool > > constant_modes
double aggregation_threshold
void reinit(const Vector &v, const bool omit_zeroing_entries=false, const bool allow_different_maps=false)
void compress(::VectorOperation::values operation)
unsigned int smoother_sweeps
bool higher_order_elements
std::vector< value_type > split(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
DataComponentInterpretation
@ component_is_part_of_vector
void approximate(SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
Expression atan2(const Expression &y, const Expression &x)
Expression sign(const Expression &x)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ valid
Iterator points to a valid object.
static const types::blas_int zero
@ matrix
Contents is actually a matrix.
@ symmetric
Matrix is symmetric.
@ diagonal
Matrix is diagonal.
@ general
No special properties.
static const types::blas_int one
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > b(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr ReturnType< rank, T >::value_type & extract(T &t, const ArrayType &indices)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
T sum(const T &t, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
std::string compress(const std::string &input)
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
void run(const std::vector< std::vector< Iterator > > &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
void abort(const ExceptionBase &exc) noexcept
long double gamma(const unsigned int n)
constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
void copy(const T *begin, const T *end, U *dest)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
static constexpr double PI
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
unsigned int subdomain_id
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
std::vector<::Vector< double > > solution_values
std::vector< std::vector< Tensor< 1, spacedim > > > solution_gradients