474 * <a name=
"step_77-ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
475 * <h3>The <code>MinimalSurfaceProblem</code>
class implementation</h3>
480 * <a name=
"step_77-Constructorandsetupfunctions"></a>
481 * <h4>Constructor and set up
functions</h4>
485 * The following few
functions are also essentially copies of what
486 * @ref step_15
"step-15" already does, and so there is little to discuss.
490 * MinimalSurfaceProblem<dim>::MinimalSurfaceProblem()
499 *
void MinimalSurfaceProblem<dim>::setup_system()
503 * dof_handler.distribute_dofs(fe);
504 * current_solution.reinit(dof_handler.n_dofs());
506 * zero_constraints.clear();
513 * zero_constraints.close();
515 * nonzero_constraints.clear();
518 * BoundaryValues<dim>(),
519 * nonzero_constraints);
522 * nonzero_constraints.close();
527 * sparsity_pattern.copy_from(dsp);
528 * jacobian_matrix.reinit(sparsity_pattern);
529 * jacobian_matrix_factorization.reset();
537 * <a name=
"step_77-AssemblingandfactorizingtheJacobianmatrix"></a>
538 * <h4>Assembling and factorizing the Jacobian
matrix</h4>
542 * The following function is then responsible
for assembling and factorizing
543 * the Jacobian
matrix. The
first half of the function is in essence the
544 * `assemble_system()` function of @ref step_15
"step-15", except that it does not deal with
545 * also forming a right hand side vector (i.e., the residual) since we
do not
546 *
always have to
do these operations at the same time.
550 * We put the whole assembly functionality into a code block enclosed by curly
552 * time is spent in
this code block, excluding everything that happens in
this
553 * function after the
matching closing brace `}`.
557 *
void MinimalSurfaceProblem<dim>::compute_and_factorize_jacobian(
563 * std::cout <<
" Computing Jacobian matrix" << std::endl;
565 *
const QGauss<dim> quadrature_formula(fe.degree + 1);
567 * jacobian_matrix = 0;
570 * quadrature_formula,
574 *
const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
575 *
const unsigned int n_q_points = quadrature_formula.size();
579 * std::vector<Tensor<1, dim>> evaluation_point_gradients(n_q_points);
581 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
583 *
for (
const auto &cell : dof_handler.active_cell_iterators())
587 * fe_values.reinit(cell);
589 * fe_values.get_function_gradients(evaluation_point,
590 * evaluation_point_gradients);
592 *
for (
unsigned int q = 0; q < n_q_points; ++q)
594 *
const double coeff =
595 * 1.0 /
std::sqrt(1 + evaluation_point_gradients[q] *
596 * evaluation_point_gradients[q]);
598 *
for (
unsigned int i = 0; i < dofs_per_cell; ++i)
600 *
for (
unsigned int j = 0; j < dofs_per_cell; ++j)
602 * (((fe_values.shape_grad(i, q)
604 * * fe_values.shape_grad(j, q))
606 * (fe_values.shape_grad(i, q)
607 * * coeff * coeff * coeff
609 * (fe_values.shape_grad(j, q)
610 * * evaluation_point_gradients[q])
611 * * evaluation_point_gradients[q]))
612 * * fe_values.JxW(q));
616 * cell->get_dof_indices(local_dof_indices);
617 * zero_constraints.distribute_local_to_global(cell_matrix,
625 * The
second half of the function then deals with factorizing the
627 *
object and by assigning it to the member variable
628 * `jacobian_matrix_factorization`, we also destroy whatever
object that
629 * pointer previously pointed to (
if any). Then we tell the
object to
630 * factorize the Jacobian.
634 * As above, we enclose
this block of code into curly braces and use a timer
635 * to assess how
long this part of the program takes.
639 * (Strictly speaking, we don
't actually need the matrix any more after we
640 * are done here, and could throw the matrix object away. A code intended to
641 * be memory efficient would do this, and only create the matrix object in
642 * this function, rather than as a member variable of the surrounding class.
643 * We omit this step here because using the same coding style as in previous
644 * tutorial programs breeds familiarity with the common style and helps make
645 * these tutorial programs easier to read.)
649 * TimerOutput::Scope t(computing_timer, "factorizing the Jacobian");
651 * std::cout << " Factorizing Jacobian matrix" << std::endl;
653 * jacobian_matrix_factorization = std::make_unique<SparseDirectUMFPACK>();
654 * jacobian_matrix_factorization->factorize(jacobian_matrix);
663 * <a name="step_77-Computingtheresidualvector"></a>
664 * <h4>Computing the residual vector</h4>
668 * The second part of what `assemble_system()` used to do in @ref step_15 "step-15" is
669 * computing the residual vector, i.e., the right hand side vector of the
670 * Newton linear systems. We have broken this out of the previous function,
671 * but the following function will be easy to understand if you understood
672 * what `assemble_system()` in @ref step_15 "step-15" did. Importantly, however, we need to
673 * compute the residual not linearized around the current solution vector, but
674 * whatever we get from KINSOL. This is necessary for operations such as line
675 * search where we want to know what the residual @f$F(U^k + \alpha_k \delta
676 * U^K)@f$ is for different values of @f$\alpha_k@f$; KINSOL in those cases simply
677 * gives us the argument to the function @f$F@f$ and we then compute the residual
678 * @f$F(\cdot)@f$ at this point.
682 * The function prints the norm of the so-computed residual at the end as a
683 * way for us to follow along the progress of the program.
687 * void MinimalSurfaceProblem<dim>::compute_residual(
688 * const Vector<double> &evaluation_point,
689 * Vector<double> &residual)
691 * TimerOutput::Scope t(computing_timer, "assembling the residual");
693 * std::cout << " Computing residual vector..." << std::flush;
697 * const QGauss<dim> quadrature_formula(fe.degree + 1);
698 * FEValues<dim> fe_values(fe,
699 * quadrature_formula,
700 * update_gradients | update_quadrature_points |
701 * update_JxW_values);
703 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
704 * const unsigned int n_q_points = quadrature_formula.size();
706 * Vector<double> cell_residual(dofs_per_cell);
707 * std::vector<Tensor<1, dim>> evaluation_point_gradients(n_q_points);
709 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
711 * for (const auto &cell : dof_handler.active_cell_iterators())
714 * fe_values.reinit(cell);
716 * fe_values.get_function_gradients(evaluation_point,
717 * evaluation_point_gradients);
720 * for (unsigned int q = 0; q < n_q_points; ++q)
722 * const double coeff =
723 * 1.0 / std::sqrt(1 + evaluation_point_gradients[q] *
724 * evaluation_point_gradients[q]);
726 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
727 * cell_residual(i) +=
728 * (fe_values.shape_grad(i, q) // \nabla \phi_i
730 * * evaluation_point_gradients[q] // * \nabla u_n
731 * * fe_values.JxW(q)); // * dx
734 * cell->get_dof_indices(local_dof_indices);
735 * zero_constraints.distribute_local_to_global(cell_residual,
740 * std::cout << " norm=" << residual.l2_norm() << std::endl;
748 * <a name="step_77-SolvinglinearsystemswiththeJacobianmatrix"></a>
749 * <h4>Solving linear systems with the Jacobian matrix</h4>
753 * Next up is the function that implements the solution of a linear system
754 * with the Jacobian matrix. Since we have already factored the matrix when we
755 * built the matrix, solving a linear system comes down to applying the
756 * inverse matrix to the given right hand side vector: This is what the
757 * SparseDirectUMFPACK::vmult() function does that we use here. Following
758 * this, we have to make sure that we also address the values of hanging nodes
759 * in the solution vector, and this is done using
760 * AffineConstraints::distribute().
764 * The function takes an additional, but unused, argument `tolerance` that
765 * indicates how accurately we have to solve the linear system. The meaning of
766 * this argument is discussed in the introduction in the context of the
767 * "Eisenstat Walker trick", but since we are using a direct rather than an
768 * iterative solver, we are not using this opportunity to solve linear systems
773 * void MinimalSurfaceProblem<dim>::solve(const Vector<double> &rhs,
774 * Vector<double> &solution,
775 * const double /*tolerance*/)
777 * TimerOutput::Scope t(computing_timer, "linear system solve");
779 * std::cout << " Solving linear system" << std::endl;
781 * jacobian_matrix_factorization->vmult(solution, rhs);
782 * zero_constraints.distribute(solution);
790 * <a name="step_77-Refiningthemeshsettingboundaryvaluesandgeneratinggraphicaloutput"></a>
791 * <h4>Refining the mesh, setting boundary values, and generating graphical output</h4>
795 * The following three functions are again simply copies of the ones in
796 * @ref step_15 "step-15":
800 * void MinimalSurfaceProblem<dim>::refine_mesh()
802 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
804 * KellyErrorEstimator<dim>::estimate(
806 * QGauss<dim - 1>(fe.degree + 1),
807 * std::map<types::boundary_id, const Function<dim> *>(),
809 * estimated_error_per_cell);
811 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
812 * estimated_error_per_cell,
816 * triangulation.prepare_coarsening_and_refinement();
818 * SolutionTransfer<dim> solution_transfer(dof_handler);
819 * const Vector<double> coarse_solution = current_solution;
820 * solution_transfer.prepare_for_coarsening_and_refinement(coarse_solution);
822 * triangulation.execute_coarsening_and_refinement();
826 * solution_transfer.interpolate(current_solution);
827 * nonzero_constraints.distribute(current_solution);
833 * void MinimalSurfaceProblem<dim>::output_results(
834 * const unsigned int refinement_cycle)
836 * TimerOutput::Scope t(computing_timer, "graphical output");
838 * DataOut<dim> data_out;
840 * data_out.attach_dof_handler(dof_handler);
841 * data_out.add_data_vector(current_solution, "solution");
842 * data_out.build_patches();
844 * const std::string filename =
845 * "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
846 * std::ofstream output(filename);
847 * data_out.write_vtu(output);
855 * <a name="step_77-Therunfunctionandtheoveralllogicoftheprogram"></a>
856 * <h4>The run() function and the overall logic of the program</h4>
860 * The only function that *really* is interesting in this program is the one
861 * that drives the overall algorithm of starting on a coarse mesh, doing some
862 * mesh refinement cycles, and on each mesh using KINSOL to find the solution
863 * of the nonlinear algebraic equation we obtain from discretization on this
864 * mesh. The `refine_mesh()` function above makes sure that the solution on
865 * one mesh is used as the starting guess on the next mesh. We also use a
866 * TimerOutput object to measure how much time every operation on each mesh
867 * costs, and reset the timer at the beginning of each cycle.
871 * As discussed in the introduction, it is not necessary to solve problems on
872 * coarse meshes particularly accurately since these will only solve as
873 * starting guesses for the next mesh. As a consequence, we will use a target
875 * @f$\tau=10^{-3} \frac{1}{10^k}@f$ for the @f$k@f$th mesh refinement cycle.
879 * All of this is encoded in the first part of this function:
883 * void MinimalSurfaceProblem<dim>::run()
885 * GridGenerator::hyper_ball(triangulation);
886 * triangulation.refine_global(2);
889 * nonzero_constraints.distribute(current_solution);
891 * for (unsigned int refinement_cycle = 0; refinement_cycle < 6;
892 * ++refinement_cycle)
894 * computing_timer.reset();
895 * std::cout << "Mesh refinement step " << refinement_cycle << std::endl;
897 * if (refinement_cycle != 0)
900 * const double target_tolerance = 1e-3 * std::pow(0.1, refinement_cycle);
901 * std::cout << " Target_tolerance: " << target_tolerance << std::endl
906 * This is where the fun starts. At the top we create the KINSOL solver
907 * object and feed it with an object that encodes a number of additional
908 * specifics (of which we only change the nonlinear tolerance we want to
909 * reach; but you might want to look into what other members the
910 * SUNDIALS::KINSOL::AdditionalData class has and play with them).
914 * typename SUNDIALS::KINSOL<Vector<double>>::AdditionalData
916 * additional_data.function_tolerance = target_tolerance;
918 * SUNDIALS::KINSOL<Vector<double>> nonlinear_solver(additional_data);
922 * Then we have to describe the operations that were already mentioned
923 * in the introduction. In essence, we have to teach KINSOL how to (i)
924 * resize a vector to the correct size, (ii) compute the residual
925 * vector, (iii) compute the Jacobian matrix (during which we also
926 * compute its factorization), and (iv) solve a linear system with the
931 * All four of these operations are represented by member variables of
932 * the SUNDIALS::KINSOL class that are of type `std::function`, i.e.,
933 * they are objects to which we can assign a pointer to a function or,
934 * as we do here, a "lambda function" that takes the appropriate
935 * arguments and returns the appropriate information. It turns out
936 * that we can do all of this in just over 20 lines of code.
940 * (If you're not familiar what
"lambda functions" are, take
941 * a look at @ref step_12
"step-12" or at the
942 * [wikipedia page](https:
944 * wants to define a function with a certain set of
945 * arguments, but (i) not make it a named
functions because,
946 * typically, the function is used in only one place and it
947 * seems unnecessary to give it a global name; and (ii) that
948 * the function has access to some of the variables that
949 * exist at the place where it is defined, including member
951 * ultimately quite useful.)
955 * At the very
end of the code block we then tell KINSOL to go to work
956 * and solve our problem. The member
functions called from the
957 *
'residual',
'setup_jacobian', and
'solve_with_jacobian' functions
958 * will then print output to screen that allows us to follow along
959 * with the progress of the program.
963 * x.reinit(dof_handler.n_dofs());
966 * nonlinear_solver.residual =
969 * compute_residual(evaluation_point, residual);
972 * nonlinear_solver.setup_jacobian =
975 * compute_and_factorize_jacobian(current_u);
978 * nonlinear_solver.solve_with_jacobian = [&](
const Vector<double> &rhs,
980 *
const double tolerance) {
981 * solve(rhs, dst, tolerance);
984 * nonlinear_solver.solve(current_solution);
989 * The rest is then just house-keeping: Writing
data to a file
for
990 * visualizing, and showing a summary of the timing collected so that we
991 * can interpret how
long each operation has taken, how often it was
995 * output_results(refinement_cycle);
997 * computing_timer.print_summary();
999 * std::cout << std::endl;
1009 *
using namespace Step77;
1011 * MinimalSurfaceProblem<2> problem;
1014 *
catch (std::exception &exc)
1016 * std::cerr << std::endl
1018 * <<
"----------------------------------------------------"
1020 * std::cerr <<
"Exception on processing: " << std::endl
1021 * << exc.what() << std::endl
1022 * <<
"Aborting!" << std::endl
1023 * <<
"----------------------------------------------------"
1030 * std::cerr << std::endl
1032 * <<
"----------------------------------------------------"
1034 * std::cerr <<
"Unknown exception!" << std::endl
1035 * <<
"Aborting!" << std::endl
1036 * <<
"----------------------------------------------------"
1043<a name=
"step_77-Results"></a><h1>Results</h1>
1046When running the program, you get output that looks like
this:
1048Mesh refinement step 0
1049 Target_tolerance: 0.001
1051 Computing residual vector...
norm=0.867975
1052 Computing Jacobian
matrix
1053 Factorizing Jacobian
matrix
1054 Solving linear system
1055 Computing residual vector...
norm=0.867975
1056 Computing residual vector...
norm=0.212073
1057 Solving linear system
1058 Computing residual vector...
norm=0.212073
1059 Computing residual vector...
norm=0.202631
1060 Solving linear system
1061 Computing residual vector...
norm=0.202631
1062 Computing residual vector...
norm=0.165773
1063 Solving linear system
1064 Computing residual vector...
norm=0.165774
1065 Computing residual vector...
norm=0.162594
1066 Solving linear system
1067 Computing residual vector...
norm=0.162594
1068 Computing residual vector...
norm=0.148175
1069 Solving linear system
1070 Computing residual vector...
norm=0.148175
1071 Computing residual vector...
norm=0.145391
1072 Solving linear system
1073 Computing residual vector...
norm=0.145391
1074 Computing residual vector...
norm=0.137551
1075 Solving linear system
1076 Computing residual vector...
norm=0.137551
1077 Computing residual vector...
norm=0.135366
1078 Solving linear system
1079 Computing residual vector...
norm=0.135365
1080 Computing residual vector...
norm=0.130367
1081 Solving linear system
1082 Computing residual vector...
norm=0.130367
1083 Computing residual vector...
norm=0.128704
1084 Computing Jacobian
matrix
1085 Factorizing Jacobian
matrix
1086 Solving linear system
1087 Computing residual vector...
norm=0.128704
1088 Computing residual vector...
norm=0.0302623
1089 Solving linear system
1090 Computing residual vector...
norm=0.0302624
1091 Computing residual vector...
norm=0.0126764
1092 Solving linear system
1093 Computing residual vector...
norm=0.0126763
1094 Computing residual vector...
norm=0.00488315
1095 Solving linear system
1096 Computing residual vector...
norm=0.00488322
1097 Computing residual vector...
norm=0.00195788
1098 Solving linear system
1099 Computing residual vector...
norm=0.00195781
1100 Computing residual vector...
norm=0.000773169
1103+---------------------------------------------+------------+------------+
1104| Total wallclock time elapsed since start | 0.121s | |
1106| Section | no. calls | wall time | % of total |
1107+---------------------------------+-----------+------------+------------+
1108| assembling the Jacobian | 2 | 0.0151s | 12% |
1109| assembling the residual | 31 | 0.0945s | 78% |
1110| factorizing the Jacobian | 2 | 0.00176s | 1.5% |
1111| graphical output | 1 | 0.00504s | 4.2% |
1112| linear system solve | 15 | 0.000893s | 0.74% |
1113+---------------------------------+-----------+------------+------------+
1116Mesh refinement step 1
1117 Target_tolerance: 0.0001
1119 Computing residual vector...
norm=0.2467
1120 Computing Jacobian
matrix
1121 Factorizing Jacobian
matrix
1122 Solving linear system
1123 Computing residual vector...
norm=0.246699
1124 Computing residual vector...
norm=0.0357783
1125 Solving linear system
1126 Computing residual vector...
norm=0.0357784
1127 Computing residual vector...
norm=0.0222161
1128 Solving linear system
1129 Computing residual vector...
norm=0.022216
1130 Computing residual vector...
norm=0.0122148
1131 Solving linear system
1132 Computing residual vector...
norm=0.0122149
1133 Computing residual vector...
norm=0.00750795
1134 Solving linear system
1135 Computing residual vector...
norm=0.00750787
1136 Computing residual vector...
norm=0.00439629
1137 Solving linear system
1138 Computing residual vector...
norm=0.00439638
1139 Computing residual vector...
norm=0.00265093
1140 Solving linear system
1145The way
this should be interpreted is most easily explained by looking at
1146the
first few lines of the output on the
first mesh:
1148Mesh refinement step 0
1149Mesh refinement step 0
1150 Target_tolerance: 0.001
1152 Computing residual vector...
norm=0.867975
1153 Computing Jacobian
matrix
1154 Factorizing Jacobian
matrix
1155 Solving linear system
1156 Computing residual vector...
norm=0.867975
1157 Computing residual vector...
norm=0.212073
1158 Solving linear system
1159 Computing residual vector...
norm=0.212073
1160 Computing residual vector...
norm=0.202631
1161 Solving linear system
1162 Computing residual vector...
norm=0.202631
1163 Computing residual vector...
norm=0.165773
1164 Solving linear system
1165 Computing residual vector...
norm=0.165774
1166 Computing residual vector...
norm=0.162594
1167 Solving linear system
1168 Computing residual vector...
norm=0.162594
1169 Computing residual vector...
norm=0.148175
1170 Solving linear system
1173What is happening is
this:
1174- In the
first residual computation, KINSOL computes the residual to see whether
1175 the desired tolerance has been reached. The answer is no, so it requests the
1176 user program to compute the Jacobian
matrix (and the function then also
1178- KINSOL then instructs us to solve a linear system of the form
1179 @f$J_k \, \delta U_k = -F_k@f$ with
this matrix and the previously computed
1181- It is then time to determine how far we want to go in
this direction,
1182 i.e.,
do line search. To
this end, KINSOL
requires us to compute the
1183 residual vector @f$F(U_k + \alpha_k \delta U_k)@f$
for different step lengths
1184 @f$\alpha_k@f$. For the
first step above, it finds an acceptable @f$\alpha_k@f$
1185 after two tries, and that
's generally what will happen in later line
1187- Having found a suitable updated solution @f$U_{k+1}@f$, the process is
1188 repeated except now KINSOL is happy with the current Jacobian matrix
1189 and does not instruct us to re-build the matrix and its factorization,
1190 instead asking us to solve a linear system with that same matrix. That
1191 will happen several times over, and only after ten solves with the same
1192 matrix are we instructed to build a matrix again, using what is by then an
1193 already substantially improved solution as linearization point.
1195The program also writes the solution to a VTU file at the end
1196of each mesh refinement cycle, and it looks as follows:
1197<table width="60%" align="center">
1200 <img src="https://www.dealii.org/images/steps/developer/step-77.solution.png" alt="">
1206The key takeaway messages of this program are the following:
1208- The solution is the same as the one we computed in @ref step_15 "step-15", i.e., the
1209 interfaces to %SUNDIALS' KINSOL
package really did what they were supposed
1210 to do. This should not come as a surprise, but the important point is that
1211 we don't have to spend the time implementing the complex algorithms that
1212 underlie advanced nonlinear solvers ourselves.
1214- KINSOL is able to avoid all sorts of operations such as rebuilding the
1215 Jacobian matrix when that is not actually necessary. Comparing the
1216 number of linear solves in the output above with the number of times
1217 we rebuild the Jacobian and compute its factorization should make it
1218 clear that this leads to very substantial savings in terms of compute
1219 times, without us having to implement the intricacies of algorithms
1220 that determine when we need to rebuild this information.
1223<a name="step-77-extensions"></a>
1224<a name="step_77-Possibilitiesforextensions"></a><h3> Possibilities for extensions </h3>
1227<a name="step_77-Betterlinearsolvers"></a><h4> Better linear solvers </h4>
1230For all but the small problems we consider here, a sparse direct solver
1231requires too much time and memory -- we need an iterative solver like
1232we use in many other programs. The trade-off between constructing an
1233expensive preconditioner (say, a geometric or algebraic multigrid method)
1234is different in the current case, however: Since we can re-use the same
1235matrix for numerous linear solves, we can do the same for the preconditioner
1236and putting more work into building a good preconditioner can more easily
1237be justified than if we used it only for a single linear solve as one
1238does for many other situations.
1240But iterative solvers also afford other opportunities. For example (and as
1241discussed briefly in the introduction), we may not need to solve to
1242very high accuracy (small tolerances) in early nonlinear iterations as long
1243as we are still far away from the actual solution. This was the basis of the
1244Eisenstat-Walker trick mentioned there. (This is also the underlying reason
1245why one can store the matrix in single precision rather than double precision,
1246see the discussion in the "Possibilities for extensions" section of @ref step_15 "step-15".)
1248KINSOL provides the function that does the linear solution with a target
1249tolerance that needs to be reached. We ignore it in the program above
1250because the direct solver we use does not need a tolerance and instead
1251solves the linear system exactly (up to round-off, of course), but iterative
1252solvers could make use of this kind of information -- and, in fact, should.
1253Indeed, the infrastructure is already there: The `solve()` function of this
1254program is declared as
1257 void MinimalSurfaceProblem<dim>::solve(const Vector<double> &rhs,
1258 Vector<double> & solution,
1261i.e., the `tolerance` parameter already exists, but is unused.
1264<a name="step_77-ReplacingSUNDIALSKINSOLbyPETScsSNES"></a><h4> Replacing SUNDIALS' KINSOL by PETSc's SNES </h4>
1267As mentioned in the introduction, SUNDIALS' KINSOL package is not the
1268only player in town. Rather, very similar interfaces exist to the SNES
1269package that is part of PETSc, and the NOX package that is part of
1270Trilinos, via the PETScWrappers::NonlinearSolver and
1271TrilinosWrappers::NOXSolver classes.
1273It is not very difficult to change the program to use either of these
1274two alternatives. Rather than show exactly what needs to be done,
1275let us point out that a version of this program that uses SNES instead
1276of KINSOL is available as part of the test suite, in the file
1277`tests/petsc/step-77-snes.cc`. Setting up the solver for
1278PETScWrappers::NonlinearSolver turns out to be even simpler than
1279for the SUNDIALS::KINSOL class we use here because we don't even
1280need the `reinit` lambda function -- SNES only needs us to set up
1281the remaining three functions `residual`, `setup_jacobian`, and
1282`solve_with_jacobian`. The majority of changes necessary to convert
1283the program to use SNES are related to the fact that SNES can only
1284deal with PETSc vectors and matrices, and these need to be set up
1285slightly differently. On the upside, the test suite program mentioned
1286above already works in parallel.
1288SNES also allows playing with a number of parameters about the
1289solver, and that enables some interesting comparisons between
1290methods. When you run the test program (or a slightly modified
1291version that outputs information to the screen instead of a file),
1292you get output that looks comparable to something like this:
1294Mesh refinement step 0
1295 Target_tolerance: 0.001
1297 Computing residual vector
1299 Computing Jacobian matrix
1300 Computing residual vector
1301 Computing residual vector
1303 Computing Jacobian matrix
1304 Computing residual vector
1305 Computing residual vector
1307 Computing Jacobian matrix
1308 Computing residual vector
1309 Computing residual vector
1315By default, PETSc uses a Newton solver with cubic backtracking,
1316resampling the Jacobian matrix at each Newton step. That is, we
1317compute and factorize the matrix once per Newton step, and then sample
1318the residual to check for a successful line-search.
1320The attentive reader should have noticed that in this case we are
1321computing one more extra residual per Newton step. This is because
1322the deal.II code is set up to use a Jacobian-free approach, and the
1323extra residual computation pops up when computing a matrix-vector
1324product to test the validity of the Newton solution.
1326PETSc can be configured in many interesting ways via the command line.
1327We can visualize the details of the solver by using the command line
1328argument **-snes_view**, which produces the excerpt below at the end
1331Mesh refinement step 0
1333SNES Object: 1 MPI process
1335 maximum iterations=50, maximum function evaluations=10000
1336 tolerances: relative=1e-08, absolute=0.001, solution=1e-08
1337 total number of linear solver iterations=3
1338 total number of function evaluations=7
1339 norm schedule ALWAYS
1340 Jacobian is applied matrix-free with differencing
1341 Jacobian is applied matrix-free with differencing, no explicit Jacobian
1342 SNESLineSearch Object: 1 MPI process
1344 interpolation: cubic
1346 maxstep=1.000000e+08, minlambda=1.000000e-12
1347 tolerances: relative=1.000000e-08, absolute=1.000000e-15, lambda=1.000000e-08
1348 maximum iterations=40
1349 KSP Object: 1 MPI process
1351 maximum iterations=10000, initial guess is zero
1352 tolerances: relative=1e-05, absolute=1e-50, divergence=10000.
1353 left preconditioning
1354 using NONE norm type for convergence test
1355 PC Object: 1 MPI process
1358 linear system matrix followed by preconditioner matrix:
1359 Mat Object: 1 MPI process
1362 Matrix-free approximation:
1363 err=1.49012e-08 (relative error in function evaluation)
1364 Using wp compute h routine
1365 Does not compute normU
1366 Mat Object: 1 MPI process
1369 total: nonzeros=745, allocated nonzeros=745
1370 total number of mallocs used during MatSetValues calls=0
1371 not using I-node routines
1374From the above details, we see that we are using the "newtonls" solver
1375type ("Newton line search"), with "bt" ("backtracting") line search.
1377From the output of **-snes_view** we can also get information about
1378the linear solver details; specifically, when
using the
1379`solve_with_jacobian` interface, the deal.II
interface internally uses
1380a custom solver configuration within a
"shell" preconditioner, that
1381wraps the action of `solve_with_jacobian`.
1383We can also see the details of the type of matrices used within the
1384solve: "mffd" (matrix-free finite-differencing) for the action of the
1385linearized operator and "seqaij" for the assembled Jacobian we have
1386used to construct the preconditioner.
1388Diagnostics for the line search procedure can be turned on using the
1389command line **-snes_linesearch_monitor**, producing the excerpt
1392Mesh refinement step 0
1393 Target_tolerance: 0.001
1395 Computing residual vector
1397 Computing Jacobian
matrix
1398 Computing residual vector
1399 Computing residual vector
1400 Line search: Using full step: fnorm 8.679748230595e-01 gnorm 2.120728179320e-01
1402 Computing Jacobian
matrix
1403 Computing residual vector
1404 Computing residual vector
1405 Line search: Using full step: fnorm 2.120728179320e-01 gnorm 1.896033864659e-02
1407 Computing Jacobian
matrix
1408 Computing residual vector
1409 Computing residual vector
1410 Line search: Using full step: fnorm 1.896033864659e-02 gnorm 3.148542199408e-04
1416Within the
run, the Jacobian
matrix is assembled (and factored) 29 times:
1418./step-77-snes | grep
"Computing Jacobian" | wc -
l
1422KINSOL internally decided when it was necessary to update the Jacobian
1423matrix (which is when it would call `setup_jacobian`). SNES can
do
1424something similar: We can compute the
explicit sparse Jacobian
matrix
1425only once per refinement step (and reuse the initial factorization) by
1426using the command line **-snes_lag_jacobian -2**, producing:
1428./step-77-snes -snes_lag_jacobian -2 | grep
"Computing Jacobian" | wc -
l
1431In other words,
this dramatically reduces the number of times we have to
1432build the Jacobian
matrix, though at a cost to the number of
1433nonlinear steps we have to take.
1435The lagging period can also be decided automatically. For example,
if
1436we want to recompute the Jacobian at every other step:
1438./step-77-snes -snes_lag_jacobian 2 | grep
"Computing Jacobian" | wc -
l
1441Note, however, that we didn
't exactly halve the number of Jacobian
1442computations. In this case the solution process will require many more
1443nonlinear iterations since the accuracy of the linear system solve is
1446If we switch to using the preconditioned conjugate gradient method as
1447a linear solve, still using our initial factorization as
1448preconditioner, we get:
1450./step-77-snes -snes_lag_jacobian 2 -ksp_type cg | grep "Computing Jacobian" | wc -l
1453Note that in this case we use an approximate preconditioner (the LU
1454factorization of the initial approximation) but we use a matrix-free
1455operator for the action of our Jacobian matrix, thus solving for the
1456correct linear system.
1458We can switch to a quasi-Newton method by using the command
1459line **-snes_type qn -snes_qn_scale_type jacobian**, and we can see that
1460our Jacobian is sampled and factored only when needed, at the cost of
1461an increase of the number of steps:
1463Mesh refinement step 0
1464 Target_tolerance: 0.001
1466 Computing residual vector
1468 Computing Jacobian matrix
1469 Computing residual vector
1470 Computing residual vector
1472 Computing residual vector
1473 Computing residual vector
1475 Computing residual vector
1476 Computing residual vector
1478 Computing residual vector
1479 Computing residual vector
1480 Computing residual vector
1482 Computing residual vector
1483 Computing residual vector
1484 Computing residual vector
1489<a href="https://www.mcs.anl.gov/papers/P2010-0112.pdf">Nonlinear preconditioning</a>
1490can also be used. For example, we can run a right-preconditioned nonlinear
1491GMRES, using one Newton step as a preconditioner, with the command:
1493./step-77-snes -snes_type ngmres -npc_snes_type newtonls -snes_monitor -npc_snes_monitor | grep SNES
1494 0 SNES Function norm 8.679748230595e-01
1495 0 SNES Function norm 8.679748230595e-01
1496 1 SNES Function norm 2.120738413585e-01
1497 1 SNES Function norm 1.284613424341e-01
1498 0 SNES Function norm 1.284613424341e-01
1499 1 SNES Function norm 6.539358995036e-03
1500 2 SNES Function norm 5.148828618635e-03
1501 0 SNES Function norm 5.148828618635e-03
1502 1 SNES Function norm 6.048613313899e-06
1503 3 SNES Function norm 3.199913594705e-06
1504 0 SNES Function norm 2.464793634583e-01
1505 0 SNES Function norm 2.464793634583e-01
1506 1 SNES Function norm 3.591625291931e-02
1507 1 SNES Function norm 3.235827289342e-02
1508 0 SNES Function norm 3.235827289342e-02
1509 1 SNES Function norm 1.249214136060e-03
1510 2 SNES Function norm 5.302288687547e-04
1511 0 SNES Function norm 5.302288687547e-04
1512 1 SNES Function norm 1.490247730530e-07
1513 3 SNES Function norm 1.436531309822e-07
1514 0 SNES Function norm 5.044203686086e-01
1515 0 SNES Function norm 5.044203686086e-01
1516 1 SNES Function norm 1.716855756535e-01
1517 1 SNES Function norm 7.770484434662e-02
1518 0 SNES Function norm 7.770484434662e-02
1519 1 SNES Function norm 2.462422395554e-02
1520 2 SNES Function norm 1.438187947066e-02
1521 0 SNES Function norm 1.438187947066e-02
1522 1 SNES Function norm 9.214168343848e-04
1523 3 SNES Function norm 2.268378169625e-04
1524 0 SNES Function norm 2.268378169625e-04
1525 1 SNES Function norm 3.463704776158e-07
1526 4 SNES Function norm 9.964533647277e-08
1527 0 SNES Function norm 1.942213246154e-01
1528 0 SNES Function norm 1.942213246154e-01
1529 1 SNES Function norm 1.125558372384e-01
1530 1 SNES Function norm 1.309880643103e-01
1531 0 SNES Function norm 1.309880643103e-01
1532 1 SNES Function norm 2.595634741967e-02
1533 2 SNES Function norm 1.149616419685e-02
1534 0 SNES Function norm 1.149616419685e-02
1535 1 SNES Function norm 7.204904831783e-04
1536 3 SNES Function norm 6.743539224973e-04
1537 0 SNES Function norm 6.743539224973e-04
1538 1 SNES Function norm 1.521290969181e-05
1539 4 SNES Function norm 8.121151857453e-06
1540 0 SNES Function norm 8.121151857453e-06
1541 1 SNES Function norm 1.460470903719e-09
1542 5 SNES Function norm 9.982794797188e-10
1543 0 SNES Function norm 1.225979459424e-01
1544 0 SNES Function norm 1.225979459424e-01
1545 1 SNES Function norm 4.946412992249e-02
1546 1 SNES Function norm 2.466574163571e-02
1547 0 SNES Function norm 2.466574163571e-02
1548 1 SNES Function norm 8.537739703503e-03
1549 2 SNES Function norm 5.935412895618e-03
1550 0 SNES Function norm 5.935412895618e-03
1551 1 SNES Function norm 3.699307476482e-04
1552 3 SNES Function norm 2.188768476656e-04
1553 0 SNES Function norm 2.188768476656e-04
1554 1 SNES Function norm 9.478344390128e-07
1555 4 SNES Function norm 4.559224590570e-07
1556 0 SNES Function norm 4.559224590570e-07
1557 1 SNES Function norm 1.317127376721e-11
1558 5 SNES Function norm 1.311046524394e-11
1559 0 SNES Function norm 1.011637873732e-01
1560 0 SNES Function norm 1.011637873732e-01
1561 1 SNES Function norm 1.072720108836e-02
1562 1 SNES Function norm 8.985302820531e-03
1563 0 SNES Function norm 8.985302820531e-03
1564 1 SNES Function norm 5.807781788861e-04
1565 2 SNES Function norm 5.594756759727e-04
1566 0 SNES Function norm 5.594756759727e-04
1567 1 SNES Function norm 1.834638371641e-05
1568 3 SNES Function norm 1.408280767367e-05
1569 0 SNES Function norm 1.408280767367e-05
1570 1 SNES Function norm 5.763656314185e-08
1571 4 SNES Function norm 1.702747382189e-08
1572 0 SNES Function norm 1.702747382189e-08
1573 1 SNES Function norm 1.452722802538e-12
1574 5 SNES Function norm 1.444478767837e-12
1578As also discussed for the KINSOL use above, optimal preconditioners
1579should be used instead of the LU factorization used here by
1580default. This is already possible within this tutorial by playing with
1581the command line options. For example, algebraic multigrid can be
1582used by simply specifying **-pc_type gamg**. When using iterative
1583linear solvers, the "Eisenstat-Walker trick" @cite eiwa96 can be also
1584requested at command line via **-snes_ksp_ew**. Using these options,
1585we can see that the number of nonlinear iterations used by the solver
1586increases as the mesh is refined, and that the number of linear
1587iterations increases as the Newton solver is entering the second-order
1590./step-77-snes -pc_type gamg -ksp_type cg -ksp_converged_reason -snes_converged_reason -snes_ksp_ew | grep CONVERGED
1591 Linear solve converged due to CONVERGED_RTOL iterations 1
1592 Linear solve converged due to CONVERGED_RTOL iterations 2
1593 Linear solve converged due to CONVERGED_RTOL iterations 3
1594Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 3
1595 Linear solve converged due to CONVERGED_RTOL iterations 1
1596 Linear solve converged due to CONVERGED_RTOL iterations 1
1597 Linear solve converged due to CONVERGED_RTOL iterations 2
1598Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 3
1599 Linear solve converged due to CONVERGED_RTOL iterations 1
1600 Linear solve converged due to CONVERGED_RTOL iterations 2
1601 Linear solve converged due to CONVERGED_RTOL iterations 2
1602 Linear solve converged due to CONVERGED_RTOL iterations 2
1603 Linear solve converged due to CONVERGED_RTOL iterations 3
1604 Linear solve converged due to CONVERGED_RTOL iterations 4
1605Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 6
1606 Linear solve converged due to CONVERGED_RTOL iterations 1
1607 Linear solve converged due to CONVERGED_RTOL iterations 1
1608 Linear solve converged due to CONVERGED_RTOL iterations 1
1609 Linear solve converged due to CONVERGED_RTOL iterations 1
1610 Linear solve converged due to CONVERGED_RTOL iterations 1
1611 Linear solve converged due to CONVERGED_RTOL iterations 1
1612 Linear solve converged due to CONVERGED_RTOL iterations 1
1613 Linear solve converged due to CONVERGED_RTOL iterations 1
1614 Linear solve converged due to CONVERGED_RTOL iterations 1
1615 Linear solve converged due to CONVERGED_RTOL iterations 2
1616 Linear solve converged due to CONVERGED_RTOL iterations 4
1617 Linear solve converged due to CONVERGED_RTOL iterations 7
1618Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 12
1619 Linear solve converged due to CONVERGED_RTOL iterations 1
1620 Linear solve converged due to CONVERGED_RTOL iterations 2
1621 Linear solve converged due to CONVERGED_RTOL iterations 3
1622 Linear solve converged due to CONVERGED_RTOL iterations 4
1623 Linear solve converged due to CONVERGED_RTOL iterations 7
1624Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 5
1625 Linear solve converged due to CONVERGED_RTOL iterations 2
1626 Linear solve converged due to CONVERGED_RTOL iterations 3
1627 Linear solve converged due to CONVERGED_RTOL iterations 7
1628 Linear solve converged due to CONVERGED_RTOL iterations 6
1629 Linear solve converged due to CONVERGED_RTOL iterations 7
1630 Linear solve converged due to CONVERGED_RTOL iterations 12
1631Nonlinear solve converged due to CONVERGED_FNORM_ABS iterations 6
1634Finally we describe how to get some diagnostic on the correctness of
1635the computed Jacobian. Deriving the correct linearization is
1636sometimes difficult: It took a page or two in the introduction to
1637derive the exact bilinear form for the Jacobian matrix, and it would
1638be quite nice compute it automatically from the residual of which it
1639is the derivative. (This is what @ref step_72 "step-72" does!) But if one is set on
1640doing things by hand, it would at least be nice if we had a way to
1641check the correctness of the derivation. SNES allows us to do this: we
1642can use the options **-snes_test_jacobian -snes_test_jacobian_view**:
1644Mesh refinement step 0
1645 Target_tolerance: 0.001
1647 Computing residual vector
1649 Computing Jacobian matrix
1650 ---------- Testing Jacobian -------------
1651 Testing hand-coded Jacobian, if (for double precision runs) ||J - Jfd||_F/||J||_F is
1652 O(1.e-8), the hand-coded Jacobian is probably correct.
1654 ||J - Jfd||_F/||J||_F = 0.0196815, ||J - Jfd||_F = 0.503436
1656 Hand-coded minus finite-difference Jacobian with tolerance 1e-05 ----------
1657Mat Object: 1 MPI process
1660row 1: (1, 0.0437112)
1670row 11: (11, 1.38157)
1674showing that the only errors we commit in assembling the Jacobian are
1675on the boundary dofs. As discussed in the tutorial, those errors are
1678The key take-away messages of this modification of the tutorial program are
1679therefore basically the same of what we already found using KINSOL:
1681- The solution is the same as the one we computed in @ref step_15 "step-15", i.e., the
1682 interfaces to PETSc SNES package really did what they were supposed
1683 to do. This should not come as a surprise, but the important point is that
1684 we don't have to spend the time implementing the complex algorithms that
1685 underlie advanced nonlinear solvers ourselves.
1687- SNES offers a wide variety of solvers and line search techniques,
1688 not only Newton. It also allows us to control Jacobian setups;
1689 however, differently from KINSOL,
this is not automatically decided
1690 within the library by looking at the residual vector but it needs to
1691 be specified by the user.
1696<a name=
"step_77-ReplacingSUNDIALSKINSOLbyTrilinosNOXpackage"></a><h4> Replacing
SUNDIALS' KINSOL by Trilinos' NOX
package </h4>
1699Besides KINSOL and SNES, the third option you have is to use the NOX
1700package. As before, rather than showing in detail how that needs to
1701happen, let us simply point out that the test suite program
1702`tests/trilinos/step-77-with-nox.cc` does this. The modifications
1703necessary to use NOX instead of KINSOL are quite minimal; in
1704particular, NOX (unlike SNES) is happy to work with deal.II
's own
1705vector and matrix classes.
1708<a name="step_77-ReplacingSUNDIALSKINSOLbyagenericnonlinearsolver"></a><h4> Replacing SUNDIALS' KINSOL by a
generic nonlinear solver </h4>
1711Having to choose which of these three frameworks (KINSOL, SNES, or NOX)
1712to use at compile time is cumbersome when wanting to compare things. It
1713would be nicer
if one could decide the
package to use at run time, assuming that one
1714has a copy of deal.II installed that is compiled against all three of these
1715dependencies. It turns out that this is possible, using the class
1716NonlinearSolverSelector that presents a common interface to all three of
1717these solvers, along with the ability to choose which one to use based
1718on run-time parameters.
1721<a name="step_77-PlainProg"></a>
1722<h1> The plain program</h1>
1723@include "step-77.cc"
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< index_type > data
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Tensor< 2, dim, Number > l(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
constexpr const ReferenceCell Line
VectorType::value_type * end(VectorType &V)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation