556 * <a name=
"step_15-ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
562 * <a name=
"step_15-MinimalSurfaceProblemMinimalSurfaceProblem"></a>
584 * <a name=
"step_15-MinimalSurfaceProblemsetup_system"></a>
597 *
dof_handler.distribute_dofs(fe);
623 *
sparsity_pattern.copy_from(
dsp);
624 *
system_matrix.reinit(sparsity_pattern);
630 * <a name=
"step_15-MinimalSurfaceProblemassemble_system"></a>
638 * to use zero boundary values for the Newton updates; this is done by using
639 * the `zero_constraints` object when assembling into the global matrix and
644 * The top of the function contains the usual boilerplate code, setting up
645 * the objects that allow us to evaluate shape functions at quadrature
646 * points and temporary storage locations for the local matrices and
647 * vectors, as well as for the gradients of the previous solution at the
648 * quadrature points. We then start the loop over all cells:
652 * void MinimalSurfaceProblem<dim>::assemble_system()
654 * const QGauss<dim> quadrature_formula(fe.degree + 1);
659 * FEValues<dim> fe_values(fe,
660 * quadrature_formula,
661 * update_gradients | update_quadrature_points |
662 * update_JxW_values);
664 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
665 * const unsigned int n_q_points = quadrature_formula.size();
667 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
668 * Vector<double> cell_rhs(dofs_per_cell);
670 * std::vector<Tensor<1, dim>> old_solution_gradients(n_q_points);
672 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
674 * for (const auto &cell : dof_handler.active_cell_iterators())
679 * fe_values.reinit(cell);
683 * For the assembly of the linear system, we have to obtain the values
684 * of the previous solution's
gradients at
the quadrature
708 *
for (
unsigned int q = 0;
q < n_q_points; ++
q)
710 *
const double coeff =
714 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
716 *
for (
unsigned int j = 0;
j < dofs_per_cell; ++
j)
718 *
(((fe_values.shape_grad(i,
q)
720 *
* fe_values.shape_grad(
j,
q))
722 *
(fe_values.shape_grad(i,
q)
724 *
* (fe_values.shape_grad(
j,
q)
727 *
* fe_values.JxW(
q));
729 *
cell_rhs(i) -= (fe_values.shape_grad(i,
q)
732 *
* fe_values.JxW(
q));
736 *
cell->get_dof_indices(local_dof_indices);
747 * <a name=
"step_15-MinimalSurfaceProblemsolve"></a>
748 * <
h4>MinimalSurfaceProblem::solve</
h4>
765 *
preconditioner.initialize(system_matrix, 1.2);
779 * <a name=
"step_15-MinimalSurfaceProblemrefine_mesh"></a>
820 *
you don't have to do this by hand
821 * (Triangulation::execute_coarsening_and_refinement does this for
822 * you). However, we need to initialize the SolutionTransfer class and it
823 * needs to know the final set of cells that will be coarsened or refined
824 * in order to store the data from the old mesh and transfer to the new
825 * one. Thus, we call the function by hand:
828 * triangulation.prepare_coarsening_and_refinement();
832 * With this out of the way, we initialize a SolutionTransfer object with
833 * the present DoFHandler. We make a copy of the solution vector and attach
834 * it to the SolutionTransfer. Now we can actually execute the refinement
835 * and create the new matrices and vectors including the vector
836 * `current_solution`, that will hold the current solution on the new mesh
837 * after calling SolutionTransfer::interpolate():
840 * SolutionTransfer<dim> solution_transfer(dof_handler);
841 * const Vector<double> coarse_solution = current_solution;
842 * solution_transfer.prepare_for_coarsening_and_refinement(coarse_solution);
844 * triangulation.execute_coarsening_and_refinement();
848 * solution_transfer.interpolate(current_solution);
852 * On the new mesh, there are different hanging nodes, computed in
853 * `setup_system()` above. To be on the safe side, we should make sure that
870 * <a name=
"step_15-MinimalSurfaceProblemcompute_residual"></a>
871 * <
h4>MinimalSurfaceProblem::compute_residual</
h4>
880 * the current version of the program) one needs to compute the residual
881 * @f$\left<F(u^n+\alpha^n\;\delta u^n),\varphi_i\right>@f$ when determining
882 * optimal step lengths, and so this is what we implement here: the function
883 * takes the step length @f$\alpha^n@f$ as an argument. The original
884 * functionality is of course obtained by passing a zero as argument.
888 * In the function below, we first set up a vector for the residual, and
889 * then a vector for the evaluation point @f$u^n+\alpha^n\;\delta u^n@f$. This
890 * is followed by the same boilerplate code we use for all integration
895 * double MinimalSurfaceProblem<dim>::compute_residual(const double alpha) const
897 * Vector<double> residual(dof_handler.n_dofs());
899 * Vector<double> evaluation_point(dof_handler.n_dofs());
900 * evaluation_point = current_solution;
901 * evaluation_point.add(alpha, newton_update);
903 * const QGauss<dim> quadrature_formula(fe.degree + 1);
904 * FEValues<dim> fe_values(fe,
905 * quadrature_formula,
906 * update_gradients | update_quadrature_points |
907 * update_JxW_values);
909 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
910 * const unsigned int n_q_points = quadrature_formula.size();
912 * Vector<double> cell_residual(dofs_per_cell);
913 * std::vector<Tensor<1, dim>> gradients(n_q_points);
915 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
917 * for (const auto &cell : dof_handler.active_cell_iterators())
920 * fe_values.reinit(cell);
924 * The actual computation is much as in
925 * <code>assemble_system()</code>. We first evaluate the gradients of
926 * @f$u^n+\alpha^n\,\delta u^n@f$ at the quadrature points, then compute
927 * the coefficient @f$a_n@f$, and then plug it all into the formula for
931 * fe_values.get_function_gradients(evaluation_point, gradients);
934 * for (unsigned int q = 0; q < n_q_points; ++q)
936 * const double coeff =
937 * 1. / std::sqrt(1 + gradients[q] * gradients[q]);
939 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
940 * cell_residual(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
942 * * gradients[q] // * \nabla u_n
943 * * fe_values.JxW(q)); // * dx
946 * cell->get_dof_indices(local_dof_indices);
947 * zero_constraints.distribute_local_to_global(cell_residual,
952 * return residual.l2_norm();
960 * <a name="step_15-MinimalSurfaceProblemdetermine_step_length"></a>
961 * <h4>MinimalSurfaceProblem::determine_step_length</h4>
976 *
convergence of Newton
's method. We will discuss better strategies below
977 * in the results section, and @ref step_77 "step-77" also covers this aspect.
981 * double MinimalSurfaceProblem<dim>::determine_step_length() const
991 * <a name="step_15-MinimalSurfaceProblemoutput_results"></a>
992 * <h4>MinimalSurfaceProblem::output_results</h4>
996 * This last function to be called from `run()` outputs the current solution
997 * (and the Newton update) in graphical form as a VTU file. It is entirely the
998 * same as what has been used in previous tutorials.
1001 * template <int dim>
1002 * void MinimalSurfaceProblem<dim>::output_results(
1003 * const unsigned int refinement_cycle) const
1005 * DataOut<dim> data_out;
1007 * data_out.attach_dof_handler(dof_handler);
1008 * data_out.add_data_vector(current_solution, "solution");
1009 * data_out.add_data_vector(newton_update, "update");
1010 * data_out.build_patches();
1012 * const std::string filename =
1013 * "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
1014 * std::ofstream output(filename);
1015 * data_out.write_vtu(output);
1022 * <a name="step_15-MinimalSurfaceProblemrun"></a>
1023 * <h4>MinimalSurfaceProblem::run</h4>
1027 * In the run function, we build the first grid and then have the top-level
1028 * logic for the Newton iteration.
1032 * As described in the introduction, the domain is the unit disk around
1033 * the origin, created in the same way as shown in @ref step_6 "step-6". The mesh is
1034 * globally refined twice followed later on by several adaptive cycles.
1038 * Before starting the Newton loop, we also need to do
1039 * ensure that the first Newton iterate already has the correct
1040 * boundary values, as discussed in the introduction.
1043 * template <int dim>
1044 * void MinimalSurfaceProblem<dim>::run()
1046 * GridGenerator::hyper_ball(triangulation);
1047 * triangulation.refine_global(2);
1050 * nonzero_constraints.distribute(current_solution);
1054 * The Newton iteration starts next. We iterate until the (norm of the)
1055 * residual computed at the end of the previous iteration is less than
1056 * @f$10^{-3}@f$, as checked at the end of the `do { ... } while` loop that
1092 *
std::cout <<
" Initial residual: " << compute_residual(0) << std::endl;
1102 *
std::cout <<
" Residual: " << compute_residual(0) << std::endl;
1108 *
std::cout << std::endl;
1117 * <a name=
"step_15-Themainfunction"></a>
1130 *
using namespace Step15;
1135 *
catch (std::exception &exc)
1137 *
std::cerr << std::endl
1139 *
<<
"----------------------------------------------------"
1141 *
std::cerr <<
"Exception on processing: " << std::endl
1142 *
<< exc.what() << std::endl
1143 *
<<
"Aborting!" << std::endl
1144 *
<<
"----------------------------------------------------"
1151 *
std::cerr << std::endl
1153 *
<<
"----------------------------------------------------"
1155 *
std::cerr <<
"Unknown exception!" << std::endl
1156 *
<<
"Aborting!" << std::endl
1157 *
<<
"----------------------------------------------------"
1220<
div class=
"twocolumn" style=
"width: 80%">
1222 <
img src=
"https://www.dealii.org/images/steps/developer/step_15_solution_1.png"
1223 alt=
"Solution after zero cycles with contour lines." width=
"230" height=
"273">
1226 <
img src=
"https://www.dealii.org/images/steps/developer/step_15_solution_2.png"
1227 alt=
"Solution after one cycle with contour lines." width=
"230" height=
"273">
1230 <
img src=
"https://www.dealii.org/images/steps/developer/step_15_solution_3.png"
1231 alt=
"Solution after two cycles with contour lines." width=
"230" height=
"273">
1234 <
img src=
"https://www.dealii.org/images/steps/developer/step_15_solution_4.png"
1235 alt=
"Solution after three cycles with contour lines." width=
"230" height=
"273">
1245the boundary
doesn't look like a sine, whereas it does the
1248The mesh is mostly refined near the boundary, where the solution
1249increases or decreases strongly, whereas it is coarsened on
1250the inside of the domain, where nothing interesting happens,
1254<
div class=
"onecolumn" style=
"width: 60%">
1256 <
img src=
"https://www.dealii.org/images/steps/developer/step_15_solution_9.png"
1257 alt=
"Grid and solution of the ninth cycle with contour lines." width=
"507" height=
"507">
1263<a name=
"step-15-extensions"></a>
1264<a name=
"step_15-Possibilitiesforextensions"></a><
h3>Possibilities
for extensions</
h3>
1272- It does not connect the nonlinear iteration with the mesh refinement
1275Obviously, a better program would have to address these two points.
1276We will discuss them in the following.
1279<a name="step_15-Steplengthcontrol"></a><h4> Step length control </h4>
1286 converges by damping the iteration using a <i>step length</i> 0<@f$\alpha^n\le
1288- It exhibits rapid convergence of quadratic order if (i) the step length is
1289 chosen as @f$\alpha^n=1@f$, and (ii) it does in fact converge with this choice
1292A consequence of these two observations is that a successful strategy is to
1293choose @f$\alpha^n<1@f$ for the initial iterations until the iterate has come
1294close enough to allow for convergence with full step length, at which point we
1295want to switch to @f$\alpha^n=1@f$. The question is how to choose @f$\alpha^n@f$ in an
1296automatic fashion that satisfies these criteria.
1298We do not want to review the literature on this topic here, but only briefly
1299mention that there are two fundamental approaches to the problem: backtracking
1300line search and trust region methods. The former is more widely used for
1301partial differential equations and essentially does the following:
1302- Compute a search direction
1303- See if the resulting residual of @f$u^n + \alpha^n\;\delta u^n@f$ with
1304 @f$\alpha^n=1@f$ is "substantially smaller" than that of @f$u^n@f$ alone.
1305- If so, then take @f$\alpha^n=1@f$.
1306- If not, try whether the residual is "substantially smaller" with
1308- If so, then take @f$\alpha^n=2/3@f$.
1309- If not, try whether the residual is "substantially smaller" with
1310 @f$\alpha^n=(2/3)^2@f$.
1312One can of course choose other factors @f$r, r^2, \ldots@f$ than the @f$2/3,
1313(2/3)^2, \ldots@f$ chosen above, for @f$0<r<1@f$. It is obvious where the term
1314"backtracking" comes from: we try a long step, but if that doesn't work
we try
1393 F'(u^{n},\delta u^{n}) &=- F(u^{n})
1395so that we can compute the update
1397 u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
1399with the solution @f$\delta u^{n}@f$ of the Newton step. For the problem
1400here, we could compute the derivative @f$F'(
u,\delta
u)@
f$ by hand and
1405 - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u|^{2}\right)^{\frac{1}{2}}}\nabla
1407 \nabla \cdot \left( \frac{\nabla u \cdot
1408 \nabla \delta u}{\left(1+|\nabla u|^{2}\right)^{\frac{3}{2}}} \nabla u
1411But this is already a sizable expression that is cumbersome both to
1412derive and to implement. It is also, in some sense, duplicative: If we
1413implement what @f$F(u)@f$ is somewhere in the code, then @f$F'(
u,\delta
u)@
f$
1416Wouldn
't it be nice if that could actually happen? That is, if we
1443want to go all that way to change the structure of the program, then
1444here is a different approach: Storing the system matrix (the "Jacobian")
1445in single-precision instead of double precision floating point numbers
1446(i.e., using `float` instead of `double` as the data type). This reduces
1447the amount of memory necessary by a factor of 1.5 (each matrix entry
1448in a SparseMatrix object requires storing the column index -- 4 bytes --
1449and the actual value -- either 4 or 8 bytes), and consequently
1450will speed up matrix-vector products by a factor of around 1.5 as well because,
1451as pointed out above, most of the time is spent loading data from memory
1452and loading 2/3 the amount of data should be roughly 3/2 times as fast. All
1453of this could be done using SparseMatrix<float> as the data type
1454for the system matrix. (In principle, we would then also like it if
1455the SparseDirectUMFPACK solver we use in this program computes and
1456stores its sparse decomposition in `float` arithmetic. This is not
1457currently implemented, though could be done.)
1459Of course, there is a downside to this: Lower precision data storage
1460also implies that we will not solve the linear system of the Newton
1461step as accurately as we might with `double` precision. At least
1462while we are far away from the solution of the nonlinear problem,
1463this may not be terrible: If we can do a Newton iteration in half
1464the time, we can afford to do a couple more Newton steps if the
1465search directions aren't
as good.
1467theory and computational experience shows that it is entirely
1468sufficient to store the Jacobian matrix in single precision
1469*as long as one stores the right hand side in double precision*.
1470A great overview of why this is so, along with numerical
1471experiments that also consider "half precision" floating point
1472numbers, can be found in @cite Kelley2022 .
1475<a name="step_15-PlainProg"></a>
1476<h1> The plain program</h1>
1477@include "step-15.cc"
void get_function_gradients(const ReadVector< Number > &fe_function, std::vector< Tensor< 1, spacedim, Number > > &gradients) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
numbers::NumberTraits< Number >::real_type norm() const
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
virtual bool prepare_coarsening_and_refinement()
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
std::vector< index_type > data
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
@ matrix
Contents is actually a matrix.
constexpr types::blas_int one
std::pair< NumberType, unsigned int > line_search(const std::function< std::pair< NumberType, NumberType >(const NumberType x)> &func, const NumberType f0, const NumberType g0, const std::function< NumberType(const NumberType x_low, const NumberType f_low, const NumberType g_low, const NumberType x_hi, const NumberType f_hi, const NumberType g_hi, const FiniteSizeHistory< NumberType > &x_rec, const FiniteSizeHistory< NumberType > &f_rec, const FiniteSizeHistory< NumberType > &g_rec, const std::pair< NumberType, NumberType > bounds)> &interpolate, const NumberType a1, const NumberType eta=0.9, const NumberType mu=0.01, const NumberType a_max=std::numeric_limits< NumberType >::max(), const unsigned int max_evaluations=20, const bool debug_output=false)
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation