474 * <a name=
"step_77-ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
475 * <h3>The <code>MinimalSurfaceProblem</code>
class implementation</h3>
480 * <a name=
"step_77-Constructorandsetupfunctions"></a>
485 * The following few
functions are also essentially copies of what
486 * @ref step_15
"step-15" already does, and so there is little to discuss.
490 * MinimalSurfaceProblem<dim>::MinimalSurfaceProblem()
499 *
void MinimalSurfaceProblem<dim>::setup_system()
503 * dof_handler.distribute_dofs(fe);
504 * current_solution.reinit(dof_handler.n_dofs());
506 * zero_constraints.clear();
513 * zero_constraints.close();
515 * nonzero_constraints.clear();
518 * BoundaryValues<dim>(),
519 * nonzero_constraints);
522 * nonzero_constraints.close();
527 * sparsity_pattern.copy_from(dsp);
528 * jacobian_matrix.reinit(sparsity_pattern);
529 * jacobian_matrix_factorization.reset();
537 * <a name=
"step_77-AssemblingandfactorizingtheJacobianmatrix"></a>
538 * <h4>Assembling and factorizing the Jacobian
matrix</h4>
542 * The following function is then responsible
for assembling and factorizing
543 * the Jacobian
matrix. The
first half of the function is in essence the
544 * `assemble_system()` function of @ref step_15
"step-15", except that it does not deal with
545 * also forming a right hand side vector (i.e., the residual) since we
do not
546 *
always have to
do these operations at the same time.
550 * We put the whole assembly functionality into a code block enclosed by curly
552 * time is spent in
this code block, excluding everything that happens in
this
553 * function after the
matching closing brace `}`.
557 *
void MinimalSurfaceProblem<dim>::compute_and_factorize_jacobian(
563 * std::cout <<
" Computing Jacobian matrix" << std::endl;
565 *
const QGauss<dim> quadrature_formula(fe.degree + 1);
567 * jacobian_matrix = 0;
570 * quadrature_formula,
574 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
575 *
const unsigned int n_q_points = quadrature_formula.size();
579 * std::vector<Tensor<1, dim>> evaluation_point_gradients(n_q_points);
581 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
583 *
for (
const auto &cell : dof_handler.active_cell_iterators())
587 * fe_values.reinit(cell);
589 * fe_values.get_function_gradients(evaluation_point,
590 * evaluation_point_gradients);
592 *
for (
unsigned int q = 0; q < n_q_points; ++q)
594 *
const double coeff =
595 * 1.0 /
std::sqrt(1 + evaluation_point_gradients[q] *
596 * evaluation_point_gradients[q]);
598 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
600 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
602 * (((fe_values.shape_grad(i, q)
604 * * fe_values.shape_grad(j, q))
606 * (fe_values.shape_grad(i, q)
607 * * coeff * coeff * coeff
609 * (fe_values.shape_grad(j, q)
610 * * evaluation_point_gradients[q])
611 * * evaluation_point_gradients[q]))
612 * * fe_values.JxW(q));
616 * cell->get_dof_indices(local_dof_indices);
617 * zero_constraints.distribute_local_to_global(cell_matrix,
625 * The
second half of the function then deals with factorizing the
627 *
object and by assigning it to the member variable
628 * `jacobian_matrix_factorization`, we also destroy whatever
object that
629 * pointer previously pointed to (
if any). Then we tell the
object to
630 * factorize the Jacobian.
634 * As above, we enclose
this block of code into curly braces and use a timer
635 * to assess how
long this part of the program takes.
639 * (Strictly speaking, we don
't actually need the matrix any more after we
640 * are done here, and could throw the matrix object away. A code intended to
641 * be memory efficient would do this, and only create the matrix object in
642 * this function, rather than as a member variable of the surrounding class.
643 * We omit this step here because using the same coding style as in previous
644 * tutorial programs breeds familiarity with the common style and helps make
645 * these tutorial programs easier to read.)
649 * TimerOutput::Scope t(computing_timer, "factorizing the Jacobian");
651 * std::cout << " Factorizing Jacobian matrix" << std::endl;
653 * jacobian_matrix_factorization = std::make_unique<SparseDirectUMFPACK>();
654 * jacobian_matrix_factorization->factorize(jacobian_matrix);
663 * <a name="step_77-Computingtheresidualvector"></a>
664 * <h4>Computing the residual vector</h4>
668 * The second part of what `assemble_system()` used to do in @ref step_15 "step-15" is
669 * computing the residual vector, i.e., the right hand side vector of the
670 * Newton linear systems. We have broken this out of the previous function,
671 * but the following function will be easy to understand if you understood
672 * what `assemble_system()` in @ref step_15 "step-15" did. Importantly, however, we need to
673 * compute the residual not linearized around the current solution vector, but
674 * whatever we get from KINSOL. This is necessary for operations such as line
675 * search where we want to know what the residual @f$F(U^k + \alpha_k \delta
676 * U^K)@f$ is for different values of @f$\alpha_k@f$; KINSOL in those cases simply
677 * gives us the argument to the function @f$F@f$ and we then compute the residual
678 * @f$F(\cdot)@f$ at this point.
682 * The function prints the norm of the so-computed residual at the end as a
683 * way for us to follow along the progress of the program.
687 * void MinimalSurfaceProblem<dim>::compute_residual(
688 * const Vector<double> &evaluation_point,
689 * Vector<double> &residual)
691 * TimerOutput::Scope t(computing_timer, "assembling the residual");
693 * std::cout << " Computing residual vector..." << std::flush;
697 * const QGauss<dim> quadrature_formula(fe.degree + 1);
698 * FEValues<dim> fe_values(fe,
699 * quadrature_formula,
700 * update_gradients | update_quadrature_points |
701 * update_JxW_values);
703 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
704 * const unsigned int n_q_points = quadrature_formula.size();
706 * Vector<double> cell_residual(dofs_per_cell);
707 * std::vector<Tensor<1, dim>> evaluation_point_gradients(n_q_points);
709 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
711 * for (const auto &cell : dof_handler.active_cell_iterators())
714 * fe_values.reinit(cell);
716 * fe_values.get_function_gradients(evaluation_point,
717 * evaluation_point_gradients);
720 * for (unsigned int q = 0; q < n_q_points; ++q)
722 * const double coeff =
723 * 1.0 / std::sqrt(1 + evaluation_point_gradients[q] *
724 * evaluation_point_gradients[q]);
726 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
727 * cell_residual(i) +=
728 * (fe_values.shape_grad(i, q) // \nabla \phi_i
730 * * evaluation_point_gradients[q] // * \nabla u_n
731 * * fe_values.JxW(q)); // * dx
734 * cell->get_dof_indices(local_dof_indices);
735 * zero_constraints.distribute_local_to_global(cell_residual,
740 * std::cout << " norm=" << residual.l2_norm() << std::endl;
748 * <a name="step_77-SolvinglinearsystemswiththeJacobianmatrix"></a>
749 * <h4>Solving linear systems with the Jacobian matrix</h4>
753 * Next up is the function that implements the solution of a linear system
754 * with the Jacobian matrix. Since we have already factored the matrix when we
755 * built the matrix, solving a linear system comes down to applying the
756 * inverse matrix to the given right hand side vector: This is what the
757 * SparseDirectUMFPACK::vmult() function does that we use here. Following
758 * this, we have to make sure that we also address the values of hanging nodes
759 * in the solution vector, and this is done using
760 * AffineConstraints::distribute().
764 * The function takes an additional, but unused, argument `tolerance` that
765 * indicates how accurately we have to solve the linear system. The meaning of
766 * this argument is discussed in the introduction in the context of the
767 * "Eisenstat Walker trick", but since we are using a direct rather than an
768 * iterative solver, we are not using this opportunity to solve linear systems
773 * void MinimalSurfaceProblem<dim>::solve(const Vector<double> &rhs,
774 * Vector<double> &solution,
775 * const double /*tolerance*/)
777 * TimerOutput::Scope t(computing_timer, "linear system solve");
779 * std::cout << " Solving linear system" << std::endl;
781 * jacobian_matrix_factorization->vmult(solution, rhs);
782 * zero_constraints.distribute(solution);
790 * <a name="step_77-Refiningthemeshsettingboundaryvaluesandgeneratinggraphicaloutput"></a>
791 * <h4>Refining the mesh, setting boundary values, and generating graphical output</h4>
795 * The following three functions are again simply copies of the ones in
796 * @ref step_15 "step-15":
800 * void MinimalSurfaceProblem<dim>::refine_mesh()
802 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
804 * KellyErrorEstimator<dim>::estimate(
806 * QGauss<dim - 1>(fe.degree + 1),
807 * std::map<types::boundary_id, const Function<dim> *>(),
809 * estimated_error_per_cell);
811 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
812 * estimated_error_per_cell,
816 * triangulation.prepare_coarsening_and_refinement();
818 * SolutionTransfer<dim> solution_transfer(dof_handler);
819 * const Vector<double> coarse_solution = current_solution;
820 * solution_transfer.prepare_for_coarsening_and_refinement(coarse_solution);
822 * triangulation.execute_coarsening_and_refinement();
826 * solution_transfer.interpolate(coarse_solution, current_solution);
828 * nonzero_constraints.distribute(current_solution);
834 * void MinimalSurfaceProblem<dim>::output_results(
835 * const unsigned int refinement_cycle)
837 * TimerOutput::Scope t(computing_timer, "graphical output");
839 * DataOut<dim> data_out;
841 * data_out.attach_dof_handler(dof_handler);
842 * data_out.add_data_vector(current_solution, "solution");
843 * data_out.build_patches();
845 * const std::string filename =
846 * "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
847 * std::ofstream output(filename);
848 * data_out.write_vtu(output);
856 * <a name="step_77-Therunfunctionandtheoveralllogicoftheprogram"></a>
857 * <h4>The run() function and the overall logic of the program</h4>
861 * The only function that *really* is interesting in this program is the one
862 * that drives the overall algorithm of starting on a coarse mesh, doing some
863 * mesh refinement cycles, and on each mesh using KINSOL to find the solution
864 * of the nonlinear algebraic equation we obtain from discretization on this
865 * mesh. The `refine_mesh()` function above makes sure that the solution on
866 * one mesh is used as the starting guess on the next mesh. We also use a
867 * TimerOutput object to measure how much time every operation on each mesh
868 * costs, and reset the timer at the beginning of each cycle.
872 * As discussed in the introduction, it is not necessary to solve problems on
873 * coarse meshes particularly accurately since these will only solve as
874 * starting guesses for the next mesh. As a consequence, we will use a target
876 * @f$\tau=10^{-3} \frac{1}{10^k}@f$ for the @f$k@f$th mesh refinement cycle.
880 * All of this is encoded in the first part of this function:
884 * void MinimalSurfaceProblem<dim>::run()
886 * GridGenerator::hyper_ball(triangulation);
887 * triangulation.refine_global(2);
890 * nonzero_constraints.distribute(current_solution);
892 * for (unsigned int refinement_cycle = 0; refinement_cycle < 6;
893 * ++refinement_cycle)
895 * computing_timer.reset();
896 * std::cout << "Mesh refinement step " << refinement_cycle << std::endl;
898 * if (refinement_cycle != 0)
901 * const double target_tolerance = 1e-3 * std::pow(0.1, refinement_cycle);
902 * std::cout << " Target_tolerance: " << target_tolerance << std::endl
907 * This is where the fun starts. At the top we create the KINSOL solver
908 * object and feed it with an object that encodes a number of additional
909 * specifics (of which we only change the nonlinear tolerance we want to
910 * reach; but you might want to look into what other members the
911 * SUNDIALS::KINSOL::AdditionalData class has and play with them).
915 * typename SUNDIALS::KINSOL<Vector<double>>::AdditionalData
917 * additional_data.function_tolerance = target_tolerance;
919 * SUNDIALS::KINSOL<Vector<double>> nonlinear_solver(additional_data);
923 * Then we have to describe the operations that were already mentioned
924 * in the introduction. In essence, we have to teach KINSOL how to (i)
925 * resize a vector to the correct size, (ii) compute the residual
926 * vector, (iii) compute the Jacobian matrix (during which we also
927 * compute its factorization), and (iv) solve a linear system with the
932 * All four of these operations are represented by member variables of
933 * the SUNDIALS::KINSOL class that are of type `std::function`, i.e.,
934 * they are objects to which we can assign a pointer to a function or,
935 * as we do here, a "lambda function" that takes the appropriate
936 * arguments and returns the appropriate information. It turns out
937 * that we can do all of this in just over 20 lines of code.
941 * (If you're not familiar what
"lambda functions" are, take
942 * a look at @ref step_12
"step-12" or at the
943 * [wikipedia page](https:
945 * wants to define a function with a certain
set of
946 * arguments, but (i) not make it a named
functions because,
947 * typically, the function is used in only one place and it
948 * seems unnecessary to give it a global name; and (ii) that
949 * the function has access to some of the variables that
950 * exist at the place where it is defined, including member
952 * ultimately quite useful.)
956 * At the very
end of the code block we then tell KINSOL to go to work
957 * and solve our problem. The member
functions called from the
958 *
'residual',
'setup_jacobian', and
'solve_with_jacobian' functions
959 * will then print output to screen that allows us to follow along
960 * with the progress of the program.
964 * x.reinit(dof_handler.n_dofs());
967 * nonlinear_solver.residual =
970 * compute_residual(evaluation_point, residual);
973 * nonlinear_solver.setup_jacobian =
976 * compute_and_factorize_jacobian(current_u);
979 * nonlinear_solver.solve_with_jacobian = [&](
const Vector<double> &rhs,
981 *
const double tolerance) {
982 * solve(rhs, dst, tolerance);
985 * nonlinear_solver.solve(current_solution);
990 * The rest is then just house-keeping: Writing data to a file
for
991 * visualizing, and showing a summary of the timing collected so that we
992 * can interpret how
long each operation has taken, how often it was
996 * output_results(refinement_cycle);
998 * computing_timer.print_summary();
1000 * std::cout << std::endl;
1010 *
using namespace Step77;
1012 * MinimalSurfaceProblem<2> problem;
1015 *
catch (std::exception &exc)
1017 * std::cerr << std::endl
1019 * <<
"----------------------------------------------------"
1021 * std::cerr <<
"Exception on processing: " << std::endl
1022 * << exc.what() << std::endl
1023 * <<
"Aborting!" << std::endl
1024 * <<
"----------------------------------------------------"
1031 * std::cerr << std::endl
1033 * <<
"----------------------------------------------------"
1035 * std::cerr <<
"Unknown exception!" << std::endl
1036 * <<
"Aborting!" << std::endl
1037 * <<
"----------------------------------------------------"
1044<a name=
"step_77-Results"></a><h1>Results</h1>
1047When running the program, you get output that looks like
this:
1049Mesh refinement step 0
1050 Target_tolerance: 0.001
1052 Computing residual vector...
norm=0.867975
1053 Computing Jacobian
matrix
1054 Factorizing Jacobian
matrix
1055 Solving linear system
1056 Computing residual vector...
norm=0.867975
1057 Computing residual vector...
norm=0.212073
1058 Solving linear system
1059 Computing residual vector...
norm=0.212073
1060 Computing residual vector...
norm=0.202631
1061 Solving linear system
1062 Computing residual vector...
norm=0.202631
1063 Computing residual vector...
norm=0.165773
1064 Solving linear system
1065 Computing residual vector...
norm=0.165774
1066 Computing residual vector...
norm=0.162594
1067 Solving linear system
1068 Computing residual vector...
norm=0.162594
1069 Computing residual vector...
norm=0.148175
1070 Solving linear system
1071 Computing residual vector...
norm=0.148175
1072 Computing residual vector...
norm=0.145391
1073 Solving linear system
1074 Computing residual vector...
norm=0.145391
1075 Computing residual vector...
norm=0.137551
1076 Solving linear system
1077 Computing residual vector...
norm=0.137551
1078 Computing residual vector...
norm=0.135366
1079 Solving linear system
1080 Computing residual vector...
norm=0.135365
1081 Computing residual vector...
norm=0.130367
1082 Solving linear system
1083 Computing residual vector...
norm=0.130367
1084 Computing residual vector...
norm=0.128704
1085 Computing Jacobian
matrix
1086 Factorizing Jacobian
matrix
1087 Solving linear system
1088 Computing residual vector...
norm=0.128704
1089 Computing residual vector...
norm=0.0302623
1090 Solving linear system
1091 Computing residual vector...
norm=0.0302624
1092 Computing residual vector...
norm=0.0126764
1093 Solving linear system
1094 Computing residual vector...
norm=0.0126763
1095 Computing residual vector...
norm=0.00488315
1096 Solving linear system
1097 Computing residual vector...
norm=0.00488322
1098 Computing residual vector...
norm=0.00195788
1099 Solving linear system
1100 Computing residual vector...
norm=0.00195781
1101 Computing residual vector...
norm=0.000773169
1104+---------------------------------------------+------------+------------+
1105| Total wallclock time elapsed since start | 0.121s | |
1107| Section | no. calls | wall time | % of total |
1108+---------------------------------+-----------+------------+------------+
1109| assembling the Jacobian | 2 | 0.0151s | 12% |
1110| assembling the residual | 31 | 0.0945s | 78% |
1111| factorizing the Jacobian | 2 | 0.00176s | 1.5% |
1112| graphical output | 1 | 0.00504s | 4.2% |
1113| linear system solve | 15 | 0.000893s | 0.74% |
1114+---------------------------------+-----------+------------+------------+
1117Mesh refinement step 1
1118 Target_tolerance: 0.0001
1120 Computing residual vector...
norm=0.2467
1121 Computing Jacobian
matrix
1122 Factorizing Jacobian
matrix
1123 Solving linear system
1124 Computing residual vector...
norm=0.246699
1125 Computing residual vector...
norm=0.0357783
1126 Solving linear system
1127 Computing residual vector...
norm=0.0357784
1128 Computing residual vector...
norm=0.0222161
1129 Solving linear system
1130 Computing residual vector...
norm=0.022216
1131 Computing residual vector...
norm=0.0122148
1132 Solving linear system
1133 Computing residual vector...
norm=0.0122149
1134 Computing residual vector...
norm=0.00750795
1135 Solving linear system
1136 Computing residual vector...
norm=0.00750787
1137 Computing residual vector...
norm=0.00439629
1138 Solving linear system
1139 Computing residual vector...
norm=0.00439638
1140 Computing residual vector...
norm=0.00265093
1141 Solving linear system
1146The way
this should be interpreted is most easily explained by looking at
1147the
first few lines of the output on the
first mesh:
1149Mesh refinement step 0
1150Mesh refinement step 0
1151 Target_tolerance: 0.001
1153 Computing residual vector...
norm=0.867975
1154 Computing Jacobian
matrix
1155 Factorizing Jacobian
matrix
1156 Solving linear system
1157 Computing residual vector...
norm=0.867975
1158 Computing residual vector...
norm=0.212073
1159 Solving linear system
1160 Computing residual vector...
norm=0.212073
1161 Computing residual vector...
norm=0.202631
1162 Solving linear system
1163 Computing residual vector...
norm=0.202631
1164 Computing residual vector...
norm=0.165773
1165 Solving linear system
1166 Computing residual vector...
norm=0.165774
1167 Computing residual vector...
norm=0.162594
1168 Solving linear system
1169 Computing residual vector...
norm=0.162594
1170 Computing residual vector...
norm=0.148175
1171 Solving linear system
1174What is happening is
this:
1175- In the
first residual computation, KINSOL computes the residual to see whether
1176 the desired tolerance has been reached. The answer is no, so it requests the
1177 user program to compute the Jacobian
matrix (and the function then also
1179- KINSOL then instructs us to solve a linear system of the form
1180 @f$J_k \, \delta U_k = -F_k@f$ with
this matrix and the previously computed
1182- It is then time to determine how far we want to go in
this direction,
1183 i.e.,
do line search. To
this end, KINSOL
requires us to compute the
1184 residual vector @f$F(U_k + \alpha_k \delta U_k)@f$
for different step lengths
1185 @f$\alpha_k@f$. For the
first step above, it finds an acceptable @f$\alpha_k@f$
1186 after two tries, and that
's generally what will happen in later line
1188- Having found a suitable updated solution @f$U_{k+1}@f$, the process is
1189 repeated except now KINSOL is happy with the current Jacobian matrix
1190 and does not instruct us to re-build the matrix and its factorization,
1191 instead asking us to solve a linear system with that same matrix. That
1192 will happen several times over, and only after ten solves with the same
1193 matrix are we instructed to build a matrix again, using what is by then an
1194 already substantially improved solution as linearization point.
1196The program also writes the solution to a VTU file at the end
1197of each mesh refinement cycle, and it looks as follows:
1198<table width="60%" align="center">
1201 <img src="https://www.dealii.org/images/steps/developer/step-77.solution.png" alt="">
1207The key takeaway messages of this program are the following:
1209- The solution is the same as the one we computed in @ref step_15 "step-15", i.e., the
1210 interfaces to %SUNDIALS' KINSOL
package really did what they were supposed
1211 to do. This should not come as a surprise, but the important point is that
1212 we don't have to spend the time implementing the complex algorithms that
1213 underlie advanced nonlinear solvers ourselves.
1215- KINSOL is able to avoid all sorts of operations such as rebuilding the
1216 Jacobian matrix when that is not actually necessary. Comparing the
1217 number of linear solves in the output above with the number of times
1218 we rebuild the Jacobian and compute its factorization should make it
1219 clear that this leads to very substantial savings in terms of compute
1220 times, without us having to implement the intricacies of algorithms
1221 that determine when we need to rebuild this information.
1224<a name="step-77-extensions"></a>
1225<a name="step_77-Possibilitiesforextensions"></a><h3> Possibilities for extensions </h3>
1228<a name="step_77-Betterlinearsolvers"></a><h4> Better linear solvers </h4>
1231For all but the small problems we consider here, a sparse direct solver
1232requires too much time and memory -- we need an iterative solver like
1233we use in many other programs. The trade-off between constructing an
1234expensive preconditioner (say, a geometric or algebraic multigrid method)
1235is different in the current case, however: Since we can re-use the same
1236matrix for numerous linear solves, we can do the same for the preconditioner
1237and putting more work into building a good preconditioner can more easily
1238be justified than if we used it only for a single linear solve as one
1239does for many other situations.
1241But iterative solvers also afford other opportunities. For example (and as
1242discussed briefly in the introduction), we may not need to solve to
1243very high accuracy (small tolerances) in early nonlinear iterations as long
1244as we are still far away from the actual solution. This was the basis of the
1245Eisenstat-Walker trick mentioned there. (This is also the underlying reason
1246why one can store the matrix in single precision rather than double precision,
1247see the discussion in the "Possibilities for extensions" section of @ref step_15 "step-15".)
1249KINSOL provides the function that does the linear solution with a target
1250tolerance that needs to be reached. We ignore it in the program above
1251because the direct solver we use does not need a tolerance and instead
1252solves the linear system exactly (up to round-off, of course), but iterative
1253solvers could make use of this kind of information -- and, in fact, should.
1254Indeed, the infrastructure is already there: The `solve()` function of this
1255program is declared as
1258 void MinimalSurfaceProblem<dim>::solve(const Vector<double> &rhs,
1259 Vector<double> & solution,
1262i.e., the `tolerance` parameter already exists, but is unused.
1265<a name="step_77-ReplacingSUNDIALSKINSOLbyPETScsSNES"></a><h4> Replacing SUNDIALS' KINSOL by PETSc's SNES </h4>
1268As mentioned in the introduction, SUNDIALS' KINSOL package is not the
1269only player in town. Rather, very similar interfaces exist to the SNES
1270package that is part of PETSc, and the NOX package that is part of
1271Trilinos, via the PETScWrappers::NonlinearSolver and
1272TrilinosWrappers::NOXSolver classes.
1274It is not very difficult to change the program to use either of these
1275two alternatives. Rather than show exactly what needs to be done,
1276let us point out that a version of this program that uses SNES instead
1277of KINSOL is available as part of the test suite, in the file
1278`tests/petsc/step-77-snes.cc`. Setting up the solver for
1279PETScWrappers::NonlinearSolver turns out to be even simpler than
1280for the SUNDIALS::KINSOL class we use here because we don't even
1281need the `reinit` lambda function -- SNES only needs us to set up
1282the remaining three functions `residual`, `setup_jacobian`, and
1283`solve_with_jacobian`. The majority of changes necessary to convert
1284the program to use SNES are related to the fact that SNES can only
1285deal with PETSc vectors and matrices, and these need to be set up
1286slightly differently. On the upside, the test suite program mentioned
1287above already works in parallel.
1289SNES also allows playing with a number of parameters about the
1290solver, and that enables some interesting comparisons between
1291methods. When you run the test program (or a slightly modified
1292version that outputs information to the screen instead of a file),
1293you get output that looks comparable to something like this:
1295Mesh refinement step 0
1296 Target_tolerance: 0.001
1298 Computing residual vector
1300 Computing Jacobian matrix
1301 Computing residual vector
1302 Computing residual vector
1304 Computing Jacobian matrix
1305 Computing residual vector
1306 Computing residual vector
1308 Computing Jacobian matrix
1309 Computing residual vector
1310 Computing residual vector
1316By default, PETSc uses a Newton solver with cubic backtracking,
1317resampling the Jacobian matrix at each Newton step. That is, we
1318compute and factorize the matrix once per Newton step, and then sample
1319the residual to check for a successful line-search.
1321The attentive reader should have noticed that in this case we are
1322computing one more extra residual per Newton step. This is because
1323the deal.II code is set up to use a Jacobian-free approach, and the
1324extra residual computation pops up when computing a matrix-vector
1325product to test the validity of the Newton solution.
1327PETSc can be configured in many interesting ways via the command line.
1328We can visualize the details of the solver by using the command line
1329argument **-snes_view**, which produces the excerpt below at the end
1332Mesh refinement step 0
1334SNES Object: 1 MPI process
1336 maximum iterations=50, maximum function evaluations=10000
1337 tolerances: relative=1e-08, absolute=0.001, solution=1e-08
1338 total number of linear solver iterations=3
1339 total number of function evaluations=7
1340 norm schedule ALWAYS
1341 Jacobian is applied matrix-free with differencing
1342 Jacobian is applied matrix-free with differencing, no explicit Jacobian
1343 SNESLineSearch Object: 1 MPI process
1345 interpolation: cubic
1347 maxstep=1.000000e+08, minlambda=1.000000e-12
1348 tolerances: relative=1.000000e-08, absolute=1.000000e-15, lambda=1.000000e-08
1349 maximum iterations=40
1350 KSP Object: 1 MPI process
1352 maximum iterations=10000, initial guess is zero
1353 tolerances: relative=1e-05, absolute=1e-50, divergence=10000.
1354 left preconditioning
1355 using NONE norm type for convergence test
1356 PC Object: 1 MPI process
1359 linear system matrix followed by preconditioner matrix:
1360 Mat Object: 1 MPI process
1363 Matrix-free approximation:
1364 err=1.49012e-08 (relative error in function evaluation)
1365 Using wp compute h routine
1366 Does not compute normU
1367 Mat Object: 1 MPI process
1370 total: nonzeros=745, allocated nonzeros=745
1371 total number of mallocs used during MatSetValues calls=0
1372 not using I-node routines
1375From the above details, we see that we are using the "newtonls" solver
1376type ("Newton line search"), with "bt" ("backtracting") line search.
1378From the output of **-snes_view** we can also get information about
1379the linear solver details; specifically, when
using the
1380`solve_with_jacobian` interface, the deal.II
interface internally uses
1381a custom solver configuration within a
"shell" preconditioner, that
1382wraps the action of `solve_with_jacobian`.
1384We can also see the details of the type of matrices used within the
1385solve: "mffd" (matrix-free finite-differencing) for the action of the
1386linearized operator and "seqaij" for the assembled Jacobian we have
1387used to construct the preconditioner.
1389Diagnostics for the line search procedure can be turned on using the
1390command line **-snes_linesearch_monitor**, producing the excerpt
1393Mesh refinement step 0
1394 Target_tolerance: 0.001
1396 Computing residual vector
1398 Computing Jacobian
matrix
1399 Computing residual vector
1400 Computing residual vector
1401 Line search: Using full step: fnorm 8.679748230595e-01 gnorm 2.120728179320e-01
1403 Computing Jacobian
matrix
1404 Computing residual vector
1405 Computing residual vector
1406 Line search: Using full step: fnorm 2.120728179320e-01 gnorm 1.896033864659e-02
1408 Computing Jacobian
matrix
1409 Computing residual vector
1410 Computing residual vector
1411 Line search: Using full step: fnorm 1.896033864659e-02 gnorm 3.148542199408e-04
1417Within the
run, the Jacobian
matrix is assembled (and factored) 29 times:
1419./step-77-snes | grep
"Computing Jacobian" | wc -
l
1423KINSOL internally decided when it was necessary to update the Jacobian
1424matrix (which is when it would call `setup_jacobian`). SNES can
do
1425something similar: We can compute the
explicit sparse Jacobian
matrix
1426only once per refinement step (and reuse the initial factorization) by
1427using the command line **-snes_lag_jacobian -2**, producing:
1429./step-77-snes -snes_lag_jacobian -2 | grep
"Computing Jacobian" | wc -
l
1432In other words,
this dramatically reduces the number of times we have to
1433build the Jacobian
matrix, though at a cost to the number of
1434nonlinear steps we have to take.
1436The lagging period can also be decided automatically. For example,
if
1437we want to recompute the Jacobian at every other step:
1439./step-77-snes -snes_lag_jacobian 2 | grep
"Computing Jacobian" | wc -
l
1442Note, however, that we didn
't exactly halve the number of Jacobian
1443computations. In this case the solution process will require many more
1444nonlinear iterations since the accuracy of the linear system solve is
1447If we switch to using the preconditioned conjugate gradient method as
1448a linear solve, still using our initial factorization as
1449preconditioner, we get:
1451./step-77-snes -snes_lag_jacobian 2 -ksp_type cg | grep "Computing Jacobian" | wc -l
1454Note that in this case we use an approximate preconditioner (the LU
1455factorization of the initial approximation) but we use a matrix-free
1456operator for the action of our Jacobian matrix, thus solving for the
1457correct linear system.
1459We can switch to a quasi-Newton method by using the command
1460line **-snes_type qn -snes_qn_scale_type jacobian**, and we can see that
1461our Jacobian is sampled and factored only when needed, at the cost of
1462an increase of the number of steps:
1464Mesh refinement step 0
1465 Target_tolerance: 0.001
1467 Computing residual vector
1469 Computing Jacobian matrix
1470 Computing residual vector
1471 Computing residual vector
1473 Computing residual vector
1474 Computing residual vector
1476 Computing residual vector
1477 Computing residual vector
1479 Computing residual vector
1480 Computing residual vector
1481 Computing residual vector
1483 Computing residual vector
1484 Computing residual vector
1485 Computing residual vector
1490<a href="https://www.mcs.anl.gov/papers/P2010-0112.pdf">Nonlinear preconditioning</a>
1491can also be used. For example, we can run a right-preconditioned nonlinear
1492GMRES, using one Newton step as a preconditioner, with the command:
1494./step-77-snes -snes_type ngmres -npc_snes_type newtonls -snes_monitor -npc_snes_monitor | grep SNES
1495 0 SNES Function norm 8.679748230595e-01
1496 0 SNES Function norm 8.679748230595e-01
1497 1 SNES Function norm 2.120738413585e-01
1498 1 SNES Function norm 1.284613424341e-01
1499 0 SNES Function norm 1.284613424341e-01
1500 1 SNES Function norm 6.539358995036e-03
1501 2 SNES Function norm 5.148828618635e-03
1502 0 SNES Function norm 5.148828618635e-03
1503 1 SNES Function norm 6.048613313899e-06
1504 3 SNES Function norm 3.199913594705e-06
1505 0 SNES Function norm 2.464793634583e-01
1506 0 SNES Function norm 2.464793634583e-01
1507 1 SNES Function norm 3.591625291931e-02
1508 1 SNES Function norm 3.235827289342e-02
1509 0 SNES Function norm 3.235827289342e-02
1510 1 SNES Function norm 1.249214136060e-03
1511 2 SNES Function norm 5.302288687547e-04
1512 0 SNES Function norm 5.302288687547e-04
1513 1 SNES Function norm 1.490247730530e-07
1514 3 SNES Function norm 1.436531309822e-07
1515 0 SNES Function norm 5.044203686086e-01
1516 0 SNES Function norm 5.044203686086e-01
1517 1 SNES Function norm 1.716855756535e-01
1518 1 SNES Function norm 7.770484434662e-02
1519 0 SNES Function norm 7.770484434662e-02
1520 1 SNES Function norm 2.462422395554e-02
1521 2 SNES Function norm 1.438187947066e-02
1522 0 SNES Function norm 1.438187947066e-02
1523 1 SNES Function norm 9.214168343848e-04
1524 3 SNES Function norm 2.268378169625e-04
1525 0 SNES Function norm 2.268378169625e-04
1526 1 SNES Function norm 3.463704776158e-07
1527 4 SNES Function norm 9.964533647277e-08
1528 0 SNES Function norm 1.942213246154e-01
1529 0 SNES Function norm 1.942213246154e-01
1530 1 SNES Function norm 1.125558372384e-01
1531 1 SNES Function norm 1.309880643103e-01
1532 0 SNES Function norm 1.309880643103e-01
1533 1 SNES Function norm 2.595634741967e-02
1534 2 SNES Function norm 1.149616419685e-02
1535 0 SNES Function norm 1.149616419685e-02
1536 1 SNES Function norm 7.204904831783e-04
1537 3 SNES Function norm 6.743539224973e-04
1538 0 SNES Function norm 6.743539224973e-04
1539 1 SNES Function norm 1.521290969181e-05
1540 4 SNES Function norm 8.121151857453e-06
1541 0 SNES Function norm 8.121151857453e-06
1542 1 SNES Function norm 1.460470903719e-09
1543 5 SNES Function norm 9.982794797188e-10
1544 0 SNES Function norm 1.225979459424e-01
1545 0 SNES Function norm 1.225979459424e-01
1546 1 SNES Function norm 4.946412992249e-02
1547 1 SNES Function norm 2.466574163571e-02
1548 0 SNES Function norm 2.466574163571e-02
1549 1 SNES Function norm 8.537739703503e-03
1550 2 SNES Function norm 5.935412895618e-03
1551 0 SNES Function norm 5.935412895618e-03
1552 1 SNES Function norm 3.699307476482e-04
1553 3 SNES Function norm 2.188768476656e-04
1554 0 SNES Function norm 2.188768476656e-04
1555 1 SNES Function norm 9.478344390128e-07
1556 4 SNES Function norm 4.559224590570e-07
1557 0 SNES Function norm 4.559224590570e-07
1558 1 SNES Function norm 1.317127376721e-11
1559 5 SNES Function norm 1.311046524394e-11
1560 0 SNES Function norm 1.011637873732e-01
1561 0 SNES Function norm 1.011637873732e-01
1562 1 SNES Function norm 1.072720108836e-02
1563 1 SNES Function norm 8.985302820531e-03
1564 0 SNES Function norm 8.985302820531e-03
1565 1 SNES Function norm 5.807781788861e-04
1566 2 SNES Function norm 5.594756759727e-04
1567 0 SNES Function norm 5.594756759727e-04
1568 1 SNES Function norm 1.834638371641e-05
1569 3 SNES Function norm 1.408280767367e-05
1570 0 SNES Function norm 1.408280767367e-05
1571 1 SNES Function norm 5.763656314185e-08
1572 4 SNES Function norm 1.702747382189e-08
1573 0 SNES Function norm 1.702747382189e-08
1574 1 SNES Function norm 1.452722802538e-12
1575 5 SNES Function norm 1.444478767837e-12
1579As also discussed for the KINSOL use above, optimal preconditioners
1580should be used instead of the LU factorization used here by
1581default. This is already possible within this tutorial by playing with
1582the command line options. For example, algebraic multigrid can be
1583used by simply specifying **-pc_type gamg**. When using iterative
1584linear solvers, the "Eisenstat-Walker trick" @cite eiwa96 can be also
1585requested at command line via **-snes_ksp_ew**. Using these options,
1586we can see that the number of nonlinear iterations used by the solver
1587increases as the mesh is refined, and that the number of linear
1588iterations increases as the Newton solver is entering the second-order
1591./step-77-snes -pc_type gamg -ksp_type cg -ksp_converged_reason -snes_converged_reason -snes_ksp_ew | grep CONVERGED
1592 Linear solve converged due to CONVERGED_RTOL iterations 1
1593 Linear solve converged due to CONVERGED_RTOL iterations 2
1594 Linear solve converged due to CONVERGED_RTOL iterations 3
1595Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 3
1596 Linear solve converged due to CONVERGED_RTOL iterations 1
1597 Linear solve converged due to CONVERGED_RTOL iterations 1
1598 Linear solve converged due to CONVERGED_RTOL iterations 2
1599Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 3
1600 Linear solve converged due to CONVERGED_RTOL iterations 1
1601 Linear solve converged due to CONVERGED_RTOL iterations 2
1602 Linear solve converged due to CONVERGED_RTOL iterations 2
1603 Linear solve converged due to CONVERGED_RTOL iterations 2
1604 Linear solve converged due to CONVERGED_RTOL iterations 3
1605 Linear solve converged due to CONVERGED_RTOL iterations 4
1606Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 6
1607 Linear solve converged due to CONVERGED_RTOL iterations 1
1608 Linear solve converged due to CONVERGED_RTOL iterations 1
1609 Linear solve converged due to CONVERGED_RTOL iterations 1
1610 Linear solve converged due to CONVERGED_RTOL iterations 1
1611 Linear solve converged due to CONVERGED_RTOL iterations 1
1612 Linear solve converged due to CONVERGED_RTOL iterations 1
1613 Linear solve converged due to CONVERGED_RTOL iterations 1
1614 Linear solve converged due to CONVERGED_RTOL iterations 1
1615 Linear solve converged due to CONVERGED_RTOL iterations 1
1616 Linear solve converged due to CONVERGED_RTOL iterations 2
1617 Linear solve converged due to CONVERGED_RTOL iterations 4
1618 Linear solve converged due to CONVERGED_RTOL iterations 7
1619Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 12
1620 Linear solve converged due to CONVERGED_RTOL iterations 1
1621 Linear solve converged due to CONVERGED_RTOL iterations 2
1622 Linear solve converged due to CONVERGED_RTOL iterations 3
1623 Linear solve converged due to CONVERGED_RTOL iterations 4
1624 Linear solve converged due to CONVERGED_RTOL iterations 7
1625Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 5
1626 Linear solve converged due to CONVERGED_RTOL iterations 2
1627 Linear solve converged due to CONVERGED_RTOL iterations 3
1628 Linear solve converged due to CONVERGED_RTOL iterations 7
1629 Linear solve converged due to CONVERGED_RTOL iterations 6
1630 Linear solve converged due to CONVERGED_RTOL iterations 7
1631 Linear solve converged due to CONVERGED_RTOL iterations 12
1632Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 6
1635Finally we describe how to get some diagnostic on the correctness of
1636the computed Jacobian. Deriving the correct linearization is
1637sometimes difficult: It took a page or two in the introduction to
1638derive the exact bilinear form for the Jacobian matrix, and it would
1639be quite nice compute it automatically from the residual of which it
1640is the derivative. (This is what @ref step_72 "step-72" does!) But if one is set on
1641doing things by hand, it would at least be nice if we had a way to
1642check the correctness of the derivation. SNES allows us to do this: we
1643can use the options **-snes_test_jacobian -snes_test_jacobian_view**:
1645Mesh refinement step 0
1646 Target_tolerance: 0.001
1648 Computing residual vector
1650 Computing Jacobian matrix
1651 ---------- Testing Jacobian -------------
1652 Testing hand-coded Jacobian, if (for double precision runs) ||J - Jfd||_F/||J||_F is
1653 O(1.e-8), the hand-coded Jacobian is probably correct.
1655 ||J - Jfd||_F/||J||_F = 0.0196815, ||J - Jfd||_F = 0.503436
1657 Hand-coded minus finite-difference Jacobian with tolerance 1e-05 ----------
1658Mat Object: 1 MPI process
1661row 1: (1, 0.0437112)
1671row 11: (11, 1.38157)
1675showing that the only errors we commit in assembling the Jacobian are
1676on the boundary dofs. As discussed in the tutorial, those errors are
1679The key take-away messages of this modification of the tutorial program are
1680therefore basically the same of what we already found using KINSOL:
1682- The solution is the same as the one we computed in @ref step_15 "step-15", i.e., the
1683 interfaces to PETSc SNES package really did what they were supposed
1684 to do. This should not come as a surprise, but the important point is that
1685 we don't have to spend the time implementing the complex algorithms that
1686 underlie advanced nonlinear solvers ourselves.
1688- SNES offers a wide variety of solvers and line search techniques,
1689 not only Newton. It also allows us to control Jacobian setups;
1690 however, differently from KINSOL,
this is not automatically decided
1691 within the library by looking at the residual vector but it needs to
1692 be specified by the user.
1697<a name=
"step_77-ReplacingSUNDIALSKINSOLbyTrilinosNOXpackage"></a><h4> Replacing
SUNDIALS' KINSOL by Trilinos' NOX
package </h4>
1700Besides KINSOL and SNES, the third option you have is to use the NOX
1701package. As before, rather than showing in detail how that needs to
1702happen, let us simply point out that the test suite program
1703`tests/trilinos/step-77-with-nox.cc` does this. The modifications
1704necessary to use NOX instead of KINSOL are quite minimal; in
1705particular, NOX (unlike SNES) is happy to work with deal.II
's own
1706vector and matrix classes.
1709<a name="step_77-ReplacingSUNDIALSKINSOLbyagenericnonlinearsolver"></a><h4> Replacing SUNDIALS' KINSOL by a
generic nonlinear solver </h4>
1712Having to choose which of these three frameworks (KINSOL, SNES, or NOX)
1713to use at compile time is cumbersome when wanting to compare things. It
1714would be nicer
if one could decide the
package to use at run time, assuming that one
1715has a copy of deal.II installed that is compiled against all three of these
1716dependencies. It turns out that this is possible, using the class
1717NonlinearSolverSelector that presents a common interface to all three of
1718these solvers, along with the ability to choose which one to use based
1719on run-time parameters.
1722<a name="step_77-PlainProg"></a>
1723<h1> The plain program</h1>
1724@include "step-77.cc"
__global__ void set(Number *val, const Number s, const size_type N)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr const ReferenceCell Line
VectorType::value_type * end(VectorType &V)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(& functions)(const void *v1, const void *v2)
static constexpr double PI
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation