deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-15.h
Go to the documentation of this file.
1) const
548 *   {
549 *   return std::sin(2 * numbers::PI * (p[0] + p[1]));
550 *   }
551 *  
552 * @endcode
553 *
554 *
555 * <a name="step_15-ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
556 * <h3>The <code>MinimalSurfaceProblem</code> class implementation</h3>
557 *
558
559 *
560 *
561 * <a name="step_15-MinimalSurfaceProblemMinimalSurfaceProblem"></a>
562 * <h4>MinimalSurfaceProblem::MinimalSurfaceProblem</h4>
563 *
564
565 *
566 * The constructor and destructor of the class are the same as in the first
567 * few tutorials.
568 *
569
570 *
571 *
572 * @code
573 *   template <int dim>
574 *   MinimalSurfaceProblem<dim>::MinimalSurfaceProblem()
575 *   : dof_handler(triangulation)
576 *   , fe(2)
577 *   {}
578 *  
579 *  
580 * @endcode
581 *
582 *
583 * <a name="step_15-MinimalSurfaceProblemsetup_system"></a>
584 * <h4>MinimalSurfaceProblem::setup_system</h4>
585 *
586
587 *
588 * As always in the setup-system function, we set up the variables of the
589 * finite element method. There are some differences to @ref step_6 "step-6", because
590 * we need to construct two AffineConstraint<> objects.
591 *
592 * @code
593 *   template <int dim>
594 *   void MinimalSurfaceProblem<dim>::setup_system()
595 *   {
596 *   dof_handler.distribute_dofs(fe);
597 *   current_solution.reinit(dof_handler.n_dofs());
598 *  
599 *   zero_constraints.clear();
601 *   0,
603 *   zero_constraints);
604 *   DoFTools::make_hanging_node_constraints(dof_handler, zero_constraints);
605 *   zero_constraints.close();
606 *  
607 *   nonzero_constraints.clear();
609 *   0,
610 *   BoundaryValues<dim>(),
611 *   nonzero_constraints);
612 *  
613 *   DoFTools::make_hanging_node_constraints(dof_handler, nonzero_constraints);
614 *   nonzero_constraints.close();
615 *  
616 *   newton_update.reinit(dof_handler.n_dofs());
617 *   system_rhs.reinit(dof_handler.n_dofs());
618 *  
619 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
620 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, zero_constraints);
621 *  
622 *   sparsity_pattern.copy_from(dsp);
623 *   system_matrix.reinit(sparsity_pattern);
624 *   }
625 *  
626 * @endcode
627 *
628 *
629 * <a name="step_15-MinimalSurfaceProblemassemble_system"></a>
630 * <h4>MinimalSurfaceProblem::assemble_system</h4>
631 *
632
633 *
634 * This function does the same as in the previous tutorials except that now,
635 * of course, the matrix and right hand side functions depend on the
636 * previous iteration's solution. As discussed in the introduction, we need
637 * to use zero boundary values for the Newton updates; this is done by using
638 * the `zero_constraints` object when assembling into the global matrix and
639 * vector.
640 *
641
642 *
643 * The top of the function contains the usual boilerplate code, setting up
644 * the objects that allow us to evaluate shape functions at quadrature
645 * points and temporary storage locations for the local matrices and
646 * vectors, as well as for the gradients of the previous solution at the
647 * quadrature points. We then start the loop over all cells:
648 *
649 * @code
650 *   template <int dim>
651 *   void MinimalSurfaceProblem<dim>::assemble_system()
652 *   {
653 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
654 *  
655 *   system_matrix = 0;
656 *   system_rhs = 0;
657 *  
658 *   FEValues<dim> fe_values(fe,
659 *   quadrature_formula,
660 *   update_gradients | update_quadrature_points |
661 *   update_JxW_values);
662 *  
663 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
664 *   const unsigned int n_q_points = quadrature_formula.size();
665 *  
666 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
667 *   Vector<double> cell_rhs(dofs_per_cell);
668 *  
669 *   std::vector<Tensor<1, dim>> old_solution_gradients(n_q_points);
670 *  
671 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
672 *  
673 *   for (const auto &cell : dof_handler.active_cell_iterators())
674 *   {
675 *   cell_matrix = 0;
676 *   cell_rhs = 0;
677 *  
678 *   fe_values.reinit(cell);
679 *  
680 * @endcode
681 *
682 * For the assembly of the linear system, we have to obtain the values
683 * of the previous solution's gradients at the quadrature
684 * points. There is a standard way of doing this: the
685 * FEValues::get_function_gradients function takes a vector that
686 * represents a finite element field defined on a DoFHandler, and
687 * evaluates the gradients of this field at the quadrature points of the
688 * cell with which the FEValues object has last been reinitialized.
689 * The values of the gradients at all quadrature points are then written
690 * into the second argument:
691 *
692 * @code
693 *   fe_values.get_function_gradients(current_solution,
694 *   old_solution_gradients);
695 *  
696 * @endcode
697 *
698 * With this, we can then do the integration loop over all quadrature
699 * points and shape functions. Having just computed the gradients of
700 * the old solution in the quadrature points, we are able to compute
701 * the coefficients @f$a_{n}@f$ in these points. The assembly of the
702 * system itself then looks similar to what we always do with the
703 * exception of the nonlinear terms, as does copying the results from
704 * the local objects into the global ones:
705 *
706 * @code
707 *   for (unsigned int q = 0; q < n_q_points; ++q)
708 *   {
709 *   const double coeff =
710 *   1.0 / std::sqrt(1 + old_solution_gradients[q] *
711 *   old_solution_gradients[q]);
712 *  
713 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
714 *   {
715 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
716 *   cell_matrix(i, j) +=
717 *   (((fe_values.shape_grad(i, q) // ((\nabla \phi_i
718 *   * coeff // * a_n
719 *   * fe_values.shape_grad(j, q)) // * \nabla \phi_j)
720 *   - // -
721 *   (fe_values.shape_grad(i, q) // (\nabla \phi_i
722 *   * coeff * coeff * coeff // * a_n^3
723 *   * (fe_values.shape_grad(j, q) // * (\nabla \phi_j
724 *   * old_solution_gradients[q]) // * \nabla u_n)
725 *   * old_solution_gradients[q])) // * \nabla u_n)))
726 *   * fe_values.JxW(q)); // * dx
727 *  
728 *   cell_rhs(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
729 *   * coeff // * a_n
730 *   * old_solution_gradients[q] // * \nabla u_n
731 *   * fe_values.JxW(q)); // * dx
732 *   }
733 *   }
734 *  
735 *   cell->get_dof_indices(local_dof_indices);
736 *   zero_constraints.distribute_local_to_global(
737 *   cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
738 *   }
739 *   }
740 *  
741 *  
742 *  
743 * @endcode
744 *
745 *
746 * <a name="step_15-MinimalSurfaceProblemsolve"></a>
747 * <h4>MinimalSurfaceProblem::solve</h4>
748 *
749
750 *
751 * The solve function is the same as always. At the end of the solution
752 * process we update the current solution by setting
753 * @f$u^{n+1}=u^n+\alpha^n\;\delta u^n@f$.
754 *
755 * @code
756 *   template <int dim>
757 *   void MinimalSurfaceProblem<dim>::solve()
758 *   {
759 *   SolverControl solver_control(system_rhs.size(),
760 *   system_rhs.l2_norm() * 1e-6);
761 *   SolverCG<Vector<double>> solver(solver_control);
762 *  
763 *   PreconditionSSOR<SparseMatrix<double>> preconditioner;
764 *   preconditioner.initialize(system_matrix, 1.2);
765 *  
766 *   solver.solve(system_matrix, newton_update, system_rhs, preconditioner);
767 *  
768 *   zero_constraints.distribute(newton_update);
769 *  
770 *   const double alpha = determine_step_length();
771 *   current_solution.add(alpha, newton_update);
772 *   }
773 *  
774 *  
775 * @endcode
776 *
777 *
778 * <a name="step_15-MinimalSurfaceProblemrefine_mesh"></a>
779 * <h4>MinimalSurfaceProblem::refine_mesh</h4>
780 *
781
782 *
783 * The first part of this function is the same as in @ref step_6 "step-6"... However,
784 * after refining the mesh we have to transfer the old solution to the new
785 * one which we do with the help of the SolutionTransfer class. The process
786 * is slightly convoluted, so let us describe it in detail:
787 *
788 * @code
789 *   template <int dim>
790 *   void MinimalSurfaceProblem<dim>::refine_mesh()
791 *   {
792 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
793 *  
795 *   dof_handler,
796 *   QGauss<dim - 1>(fe.degree + 1),
797 *   std::map<types::boundary_id, const Function<dim> *>(),
798 *   current_solution,
799 *   estimated_error_per_cell);
800 *  
802 *   estimated_error_per_cell,
803 *   0.3,
804 *   0.03);
805 *  
806 * @endcode
807 *
808 * Then we need an additional step: if, for example, you flag a cell that
809 * is once more refined than its neighbor, and that neighbor is not
810 * flagged for refinement, we would end up with a jump of two refinement
811 * levels across a cell interface. To avoid these situations, the library
812 * will silently also have to refine the neighbor cell once. It does so by
814 * before actually doing the refinement and coarsening. This function
815 * flags a set of additional cells for refinement or coarsening, to
816 * enforce rules like the one-hanging-node rule. The cells that are
817 * flagged for refinement and coarsening after calling this function are
818 * exactly the ones that will actually be refined or coarsened. Usually,
819 * you don't have to do this by hand
820 * (Triangulation::execute_coarsening_and_refinement does this for
821 * you). However, we need to initialize the SolutionTransfer class and it
822 * needs to know the final set of cells that will be coarsened or refined
823 * in order to store the data from the old mesh and transfer to the new
824 * one. Thus, we call the function by hand:
825 *
826 * @code
827 *   triangulation.prepare_coarsening_and_refinement();
828 *  
829 * @endcode
830 *
831 * With this out of the way, we initialize a SolutionTransfer object with
832 * the present DoFHandler. We make a copy of the solution vector and attach
833 * it to the SolutionTransfer. Now we can actually execute the refinement
834 * and create the new matrices and vectors including the vector
835 * `current_solution`, that will hold the current solution on the new mesh
836 * after calling SolutionTransfer::interpolate():
837 *
838 * @code
839 *   SolutionTransfer<dim> solution_transfer(dof_handler);
840 *   const Vector<double> coarse_solution = current_solution;
841 *   solution_transfer.prepare_for_coarsening_and_refinement(coarse_solution);
842 *  
843 *   triangulation.execute_coarsening_and_refinement();
844 *  
845 *   setup_system();
846 *  
847 *   solution_transfer.interpolate(current_solution);
848 *  
849 * @endcode
850 *
851 * On the new mesh, there are different hanging nodes, computed in
852 * `setup_system()` above. To be on the safe side, we should make sure that
853 * the current solution's vector entries satisfy the hanging node
854 * constraints (see the discussion in the documentation of the
855 * SolutionTransfer class for why this is necessary) and boundary values. As
856 * explained at the end of the introduction, the interpolated solution does
857 * not automatically satisfy the boundary values even if the solution before
858 * refinement had the correct boundary values.
859 *
860 * @code
861 *   nonzero_constraints.distribute(current_solution);
862 *   }
863 *  
864 *  
865 *  
866 * @endcode
867 *
868 *
869 * <a name="step_15-MinimalSurfaceProblemcompute_residual"></a>
870 * <h4>MinimalSurfaceProblem::compute_residual</h4>
871 *
872
873 *
874 * In order to monitor convergence, we need a way to compute the norm of the
875 * (discrete) residual, i.e., the norm of the vector
876 * @f$\left<F(u^n),\varphi_i\right>@f$ with @f$F(u)=-\nabla \cdot \left(
877 * \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)@f$ as discussed in the
878 * introduction. It turns out that (although we don't use this feature in
879 * the current version of the program) one needs to compute the residual
880 * @f$\left<F(u^n+\alpha^n\;\delta u^n),\varphi_i\right>@f$ when determining
881 * optimal step lengths, and so this is what we implement here: the function
882 * takes the step length @f$\alpha^n@f$ as an argument. The original
883 * functionality is of course obtained by passing a zero as argument.
884 *
885
886 *
887 * In the function below, we first set up a vector for the residual, and
888 * then a vector for the evaluation point @f$u^n+\alpha^n\;\delta u^n@f$. This
889 * is followed by the same boilerplate code we use for all integration
890 * operations:
891 *
892 * @code
893 *   template <int dim>
894 *   double MinimalSurfaceProblem<dim>::compute_residual(const double alpha) const
895 *   {
896 *   Vector<double> residual(dof_handler.n_dofs());
897 *  
898 *   Vector<double> evaluation_point(dof_handler.n_dofs());
899 *   evaluation_point = current_solution;
900 *   evaluation_point.add(alpha, newton_update);
901 *  
902 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
903 *   FEValues<dim> fe_values(fe,
904 *   quadrature_formula,
905 *   update_gradients | update_quadrature_points |
906 *   update_JxW_values);
907 *  
908 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
909 *   const unsigned int n_q_points = quadrature_formula.size();
910 *  
911 *   Vector<double> cell_residual(dofs_per_cell);
912 *   std::vector<Tensor<1, dim>> gradients(n_q_points);
913 *  
914 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
915 *  
916 *   for (const auto &cell : dof_handler.active_cell_iterators())
917 *   {
918 *   cell_residual = 0;
919 *   fe_values.reinit(cell);
920 *  
921 * @endcode
922 *
923 * The actual computation is much as in
924 * <code>assemble_system()</code>. We first evaluate the gradients of
925 * @f$u^n+\alpha^n\,\delta u^n@f$ at the quadrature points, then compute
926 * the coefficient @f$a_n@f$, and then plug it all into the formula for
927 * the residual:
928 *
929 * @code
930 *   fe_values.get_function_gradients(evaluation_point, gradients);
931 *  
932 *  
933 *   for (unsigned int q = 0; q < n_q_points; ++q)
934 *   {
935 *   const double coeff =
936 *   1. / std::sqrt(1 + gradients[q] * gradients[q]);
937 *  
938 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
939 *   cell_residual(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
940 *   * coeff // * a_n
941 *   * gradients[q] // * \nabla u_n
942 *   * fe_values.JxW(q)); // * dx
943 *   }
944 *  
945 *   cell->get_dof_indices(local_dof_indices);
946 *   zero_constraints.distribute_local_to_global(cell_residual,
947 *   local_dof_indices,
948 *   residual);
949 *   }
950 *  
951 *   return residual.l2_norm();
952 *   }
953 *  
954 *  
955 *  
956 * @endcode
957 *
958 *
959 * <a name="step_15-MinimalSurfaceProblemdetermine_step_length"></a>
960 * <h4>MinimalSurfaceProblem::determine_step_length</h4>
961 *
962
963 *
964 * As discussed in the introduction, Newton's method frequently does not
965 * converge if we always take full steps, i.e., compute @f$u^{n+1}=u^n+\delta
966 * u^n@f$. Rather, one needs a damping parameter (step length) @f$\alpha^n@f$ and
967 * set @f$u^{n+1}=u^n+\alpha^n\delta u^n@f$. This function is the one called
968 * to compute @f$\alpha^n@f$.
969 *
970
971 *
972 * Here, we simply always return 0.1. This is of course a sub-optimal
973 * choice: ideally, what one wants is that the step size goes to one as we
974 * get closer to the solution, so that we get to enjoy the rapid quadratic
975 * convergence of Newton's method. We will discuss better strategies below
976 * in the results section, and @ref step_77 "step-77" also covers this aspect.
977 *
978 * @code
979 *   template <int dim>
980 *   double MinimalSurfaceProblem<dim>::determine_step_length() const
981 *   {
982 *   return 0.1;
983 *   }
984 *  
985 *  
986 *  
987 * @endcode
988 *
989 *
990 * <a name="step_15-MinimalSurfaceProblemoutput_results"></a>
991 * <h4>MinimalSurfaceProblem::output_results</h4>
992 *
993
994 *
995 * This last function to be called from `run()` outputs the current solution
996 * (and the Newton update) in graphical form as a VTU file. It is entirely the
997 * same as what has been used in previous tutorials.
998 *
999 * @code
1000 *   template <int dim>
1001 *   void MinimalSurfaceProblem<dim>::output_results(
1002 *   const unsigned int refinement_cycle) const
1003 *   {
1004 *   DataOut<dim> data_out;
1005 *  
1006 *   data_out.attach_dof_handler(dof_handler);
1007 *   data_out.add_data_vector(current_solution, "solution");
1008 *   data_out.add_data_vector(newton_update, "update");
1009 *   data_out.build_patches();
1010 *  
1011 *   const std::string filename =
1012 *   "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
1013 *   std::ofstream output(filename);
1014 *   data_out.write_vtu(output);
1015 *   }
1016 *  
1017 *  
1018 * @endcode
1019 *
1020 *
1021 * <a name="step_15-MinimalSurfaceProblemrun"></a>
1022 * <h4>MinimalSurfaceProblem::run</h4>
1023 *
1024
1025 *
1026 * In the run function, we build the first grid and then have the top-level
1027 * logic for the Newton iteration.
1028 *
1029
1030 *
1031 * As described in the introduction, the domain is the unit disk around
1032 * the origin, created in the same way as shown in @ref step_6 "step-6". The mesh is
1033 * globally refined twice followed later on by several adaptive cycles.
1034 *
1035
1036 *
1037 * Before starting the Newton loop, we also need to do
1038 * ensure that the first Newton iterate already has the correct
1039 * boundary values, as discussed in the introduction.
1040 *
1041 * @code
1042 *   template <int dim>
1043 *   void MinimalSurfaceProblem<dim>::run()
1044 *   {
1045 *   GridGenerator::hyper_ball(triangulation);
1046 *   triangulation.refine_global(2);
1047 *  
1048 *   setup_system();
1049 *   nonzero_constraints.distribute(current_solution);
1050 *  
1051 * @endcode
1052 *
1053 * The Newton iteration starts next. We iterate until the (norm of the)
1054 * residual computed at the end of the previous iteration is less than
1055 * @f$10^{-3}@f$, as checked at the end of the `do { ... } while` loop that
1056 * starts here. Because we don't have a reasonable value to initialize
1057 * the variable, we just use the largest value that can be represented
1058 * as a `double`.
1059 *
1060 * @code
1061 *   double last_residual_norm = std::numeric_limits<double>::max();
1062 *   unsigned int refinement_cycle = 0;
1063 *   do
1064 *   {
1065 *   std::cout << "Mesh refinement step " << refinement_cycle << std::endl;
1066 *  
1067 *   if (refinement_cycle != 0)
1068 *   refine_mesh();
1069 *  
1070 * @endcode
1071 *
1072 * On every mesh we do exactly five Newton steps. We print the initial
1073 * residual here and then start the iterations on this mesh.
1074 *
1075
1076 *
1077 * In every Newton step the system matrix and the right hand side have
1078 * to be computed first, after which we store the norm of the right
1079 * hand side as the residual to check against when deciding whether to
1080 * stop the iterations. We then solve the linear system (the function
1081 * also updates @f$u^{n+1}=u^n+\alpha^n\;\delta u^n@f$) and output the
1082 * norm of the residual at the end of this Newton step.
1083 *
1084
1085 *
1086 * After the end of this loop, we then also output the solution on the
1087 * current mesh in graphical form and increment the counter for the
1088 * mesh refinement cycle.
1089 *
1090 * @code
1091 *   std::cout << " Initial residual: " << compute_residual(0) << std::endl;
1092 *  
1093 *   for (unsigned int inner_iteration = 0; inner_iteration < 5;
1094 *   ++inner_iteration)
1095 *   {
1096 *   assemble_system();
1097 *   last_residual_norm = system_rhs.l2_norm();
1098 *  
1099 *   solve();
1100 *  
1101 *   std::cout << " Residual: " << compute_residual(0) << std::endl;
1102 *   }
1103 *  
1104 *   output_results(refinement_cycle);
1105 *  
1106 *   ++refinement_cycle;
1107 *   std::cout << std::endl;
1108 *   }
1109 *   while (last_residual_norm > 1e-2);
1110 *   }
1111 *   } // namespace Step15
1112 *  
1113 * @endcode
1114 *
1115 *
1116 * <a name="step_15-Themainfunction"></a>
1117 * <h4>The main function</h4>
1118 *
1119
1120 *
1121 * Finally the main function. This follows the scheme of all other main
1122 * functions:
1123 *
1124 * @code
1125 *   int main()
1126 *   {
1127 *   try
1128 *   {
1129 *   using namespace Step15;
1130 *  
1131 *   MinimalSurfaceProblem<2> problem;
1132 *   problem.run();
1133 *   }
1134 *   catch (std::exception &exc)
1135 *   {
1136 *   std::cerr << std::endl
1137 *   << std::endl
1138 *   << "----------------------------------------------------"
1139 *   << std::endl;
1140 *   std::cerr << "Exception on processing: " << std::endl
1141 *   << exc.what() << std::endl
1142 *   << "Aborting!" << std::endl
1143 *   << "----------------------------------------------------"
1144 *   << std::endl;
1145 *  
1146 *   return 1;
1147 *   }
1148 *   catch (...)
1149 *   {
1150 *   std::cerr << std::endl
1151 *   << std::endl
1152 *   << "----------------------------------------------------"
1153 *   << std::endl;
1154 *   std::cerr << "Unknown exception!" << std::endl
1155 *   << "Aborting!" << std::endl
1156 *   << "----------------------------------------------------"
1157 *   << std::endl;
1158 *   return 1;
1159 *   }
1160 *   return 0;
1161 *   }
1162 * @endcode
1163<a name="step_15-Results"></a><h1>Results</h1>
1164
1165
1166
1167The output of the program looks as follows:
1168@code
1169Mesh refinement step 0
1170 Initial residual: 1.53143
1171 Residual: 1.08746
1172 Residual: 0.966748
1173 Residual: 0.859602
1174 Residual: 0.766462
1175 Residual: 0.685475
1176
1177Mesh refinement step 1
1178 Initial residual: 0.868959
1179 Residual: 0.762125
1180 Residual: 0.677792
1181 Residual: 0.605762
1182 Residual: 0.542748
1183 Residual: 0.48704
1184
1185Mesh refinement step 2
1186 Initial residual: 0.426445
1187 Residual: 0.382731
1188 Residual: 0.343865
1189 Residual: 0.30918
1190 Residual: 0.278147
1191 Residual: 0.250327
1192
1193Mesh refinement step 3
1194 Initial residual: 0.282026
1195 Residual: 0.253146
1196 Residual: 0.227414
1197 Residual: 0.20441
1198 Residual: 0.183803
1199 Residual: 0.165319
1200
1201Mesh refinement step 4
1202 Initial residual: 0.154404
1203 Residual: 0.138723
1204 Residual: 0.124694
1205 Residual: 0.112124
1206 Residual: 0.100847
1207 Residual: 0.0907222
1208
1209....
1210@endcode
1211
1212Obviously, the scheme converges, if not very fast. We will come back to
1213strategies for accelerating the method below.
1214
1215One can visualize the solution after each set of five Newton
1216iterations, i.e., on each of the meshes on which we approximate the
1217solution. This yields the following set of images:
1218
1219<div class="twocolumn" style="width: 80%">
1220 <div>
1221 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_1.png"
1222 alt="Solution after zero cycles with contour lines." width="230" height="273">
1223 </div>
1224 <div>
1225 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_2.png"
1226 alt="Solution after one cycle with contour lines." width="230" height="273">
1227 </div>
1228 <div>
1229 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_3.png"
1230 alt="Solution after two cycles with contour lines." width="230" height="273">
1231 </div>
1232 <div>
1233 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_4.png"
1234 alt="Solution after three cycles with contour lines." width="230" height="273">
1235 </div>
1236</div>
1237
1238It is clearly visible, that the solution minimizes the surface
1239after each refinement. The solution converges to a picture one
1240would imagine a soap bubble to be that is located inside a wire loop
1241that is bent like
1242the boundary. Also it is visible, how the boundary
1243is smoothed out after each refinement. On the coarse mesh,
1244the boundary doesn't look like a sine, whereas it does the
1245finer the mesh gets.
1246
1247The mesh is mostly refined near the boundary, where the solution
1248increases or decreases strongly, whereas it is coarsened on
1249the inside of the domain, where nothing interesting happens,
1250because there isn't much change in the solution. The ninth
1251solution and mesh are shown here:
1252
1253<div class="onecolumn" style="width: 60%">
1254 <div>
1255 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_9.png"
1256 alt="Grid and solution of the ninth cycle with contour lines." width="507" height="507">
1257 </div>
1258</div>
1259
1260
1261
1262<a name="step-15-extensions"></a>
1263<a name="step_15-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1264
1265
1266The program shows the basic structure of a solver for a nonlinear, stationary
1267problem. However, it does not converge particularly fast, for good reasons:
1268
1269- The program always takes a step size of 0.1. This precludes the rapid,
1270 quadratic convergence for which Newton's method is typically chosen.
1271- It does not connect the nonlinear iteration with the mesh refinement
1272 iteration.
1273
1274Obviously, a better program would have to address these two points.
1275We will discuss them in the following.
1276
1277
1278<a name="step_15-Steplengthcontrol"></a><h4> Step length control </h4>
1279
1280
1281Newton's method has two well known properties:
1282- It may not converge from arbitrarily chosen starting points. Rather, a
1283 starting point has to be close enough to the solution to guarantee
1284 convergence. However, we can enlarge the area from which Newton's method
1285 converges by damping the iteration using a <i>step length</i> 0<@f$\alpha^n\le
1286 1@f$.
1287- It exhibits rapid convergence of quadratic order if (i) the step length is
1288 chosen as @f$\alpha^n=1@f$, and (ii) it does in fact converge with this choice
1289 of step length.
1290
1291A consequence of these two observations is that a successful strategy is to
1292choose @f$\alpha^n<1@f$ for the initial iterations until the iterate has come
1293close enough to allow for convergence with full step length, at which point we
1294want to switch to @f$\alpha^n=1@f$. The question is how to choose @f$\alpha^n@f$ in an
1295automatic fashion that satisfies these criteria.
1296
1297We do not want to review the literature on this topic here, but only briefly
1298mention that there are two fundamental approaches to the problem: backtracking
1299line search and trust region methods. The former is more widely used for
1300partial differential equations and essentially does the following:
1301- Compute a search direction
1302- See if the resulting residual of @f$u^n + \alpha^n\;\delta u^n@f$ with
1303 @f$\alpha^n=1@f$ is "substantially smaller" than that of @f$u^n@f$ alone.
1304- If so, then take @f$\alpha^n=1@f$.
1305- If not, try whether the residual is "substantially smaller" with
1306 @f$\alpha^n=2/3@f$.
1307- If so, then take @f$\alpha^n=2/3@f$.
1308- If not, try whether the residual is "substantially smaller" with
1309 @f$\alpha^n=(2/3)^2@f$.
1310- Etc.
1311One can of course choose other factors @f$r, r^2, \ldots@f$ than the @f$2/3,
1312(2/3)^2, \ldots@f$ chosen above, for @f$0<r<1@f$. It is obvious where the term
1313"backtracking" comes from: we try a long step, but if that doesn't work we try
1314a shorter step, and ever shorter step, etc. The function
1315<code>determine_step_length()</code> is written the way it is to support
1316exactly this kind of use case.
1317
1318Whether we accept a particular step length @f$\alpha^n@f$ depends on how we define
1319"substantially smaller". There are a number of ways to do so, but without
1320going into detail let us just mention that the most common ones are to use the
1321Wolfe and Armijo-Goldstein conditions. For these, one can show the following:
1322- There is always a step length @f$\alpha^n@f$ for which the conditions are
1323 satisfied, i.e., the iteration never gets stuck as long as the problem is
1324 convex.
1325- If we are close enough to the solution, then the conditions allow for
1326 @f$\alpha^n=1@f$, thereby enabling quadratic convergence.
1327
1328We will not dwell on this here any further but leave the implementation of
1329such algorithms as an exercise. We note, however, that when implemented
1330correctly then it is a common observation that most reasonably nonlinear
1331problems can be solved in anywhere between 5 and 15 Newton iterations to
1332engineering accuracy &mdash; substantially fewer than we need with the current
1333version of the program.
1334
1335More details on globalization methods including backtracking can be found,
1336for example, in @cite GNS08 and @cite NW99.
1337
1338A separate point, very much worthwhile making, however, is that in practice
1339the implementation of efficient nonlinear solvers is about as complicated as
1340the implementation of efficient finite element methods. One should not
1341attempt to reinvent the wheel by implementing all of the necessary steps
1342oneself. Substantial pieces of the puzzle are already available in
1343the LineMinimization::line_search() function and could be used to this end.
1344But, instead, just like building finite element solvers on libraries
1345such as deal.II, one should be building nonlinear solvers on libraries such
1346as [SUNDIALS](https://computing.llnl.gov/projects/sundials). In fact,
1347deal.II has interfaces to SUNDIALS and in particular to its nonlinear solver
1348sub-package KINSOL through the SUNDIALS::KINSOL class. It would not be
1349very difficult to base the current problem on that interface --
1350indeed, that is what @ref step_77 "step-77" does.
1351
1352
1353
1354<a name="step_15-Integratingmeshrefinementandnonlinearandlinearsolvers"></a><h4> Integrating mesh refinement and nonlinear and linear solvers </h4>
1355
1356
1357We currently do exactly 5 iterations on each mesh. But is this optimal? One
1358could ask the following questions:
1359- Maybe it is worthwhile doing more iterations on the initial meshes since
1360 there, computations are cheap.
1361- On the other hand, we do not want to do too many iterations on every mesh:
1362 yes, we could drive the residual to zero on every mesh, but that would only
1363 mean that the nonlinear iteration error is far smaller than the
1364 discretization error.
1365- Should we use solve the linear systems in each Newton step with higher or
1366 lower accuracy?
1367
1368Ultimately, what this boils down to is that we somehow need to couple the
1369discretization error on the current mesh with the nonlinear residual we want
1370to achieve with the Newton iterations on a given mesh, and to the linear
1371iteration we want to achieve with the CG method within each Newton
1372iterations.
1373
1374How to do this is, again, not entirely trivial, and we again leave it as a
1375future exercise.
1376
1377
1378
1379<a name="step_15-UsingautomaticdifferentiationtocomputetheJacobianmatrix"></a><h4> Using automatic differentiation to compute the Jacobian matrix </h4>
1380
1381
1382As outlined in the introduction, when solving a nonlinear problem of
1383the form
1384 @f[
1385 F(u) \dealcoloneq
1386 -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
1387 = 0
1388 @f]
1389we use a Newton iteration that requires us to repeatedly solve the
1390linear partial differential equation
1391 @f{align*}{
1392 F'(u^{n},\delta u^{n}) &=- F(u^{n})
1393 @f}
1394so that we can compute the update
1395 @f{align*}{
1396 u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
1397 @f}
1398with the solution @f$\delta u^{n}@f$ of the Newton step. For the problem
1399here, we could compute the derivative @f$F'(u,\delta u)@f$ by hand and
1400obtained
1401 @f[
1402 F'(u,\delta u)
1403 =
1404 - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u|^{2}\right)^{\frac{1}{2}}}\nabla
1405 \delta u \right) +
1406 \nabla \cdot \left( \frac{\nabla u \cdot
1407 \nabla \delta u}{\left(1+|\nabla u|^{2}\right)^{\frac{3}{2}}} \nabla u
1408 \right).
1409 @f]
1410But this is already a sizable expression that is cumbersome both to
1411derive and to implement. It is also, in some sense, duplicative: If we
1412implement what @f$F(u)@f$ is somewhere in the code, then @f$F'(u,\delta u)@f$
1413is not an independent piece of information but is something that, at
1414least in principle, a computer should be able to infer itself.
1415Wouldn't it be nice if that could actually happen? That is, if we
1416really only had to implement @f$F(u)@f$, and @f$F'(u,\delta u)@f$ was then somehow
1417done implicitly? That is in fact possible, and runs under the name
1418"automatic differentiation". @ref step_71 "step-71" discusses this very
1419concept in general terms, and @ref step_72 "step-72" illustrates how this can be
1420applied in practice for the very problem we are considering here.
1421
1422
1423<a name="step_15-StoringtheJacobianmatrixinlowerprecisionfloatingpointvariables"></a><h4> Storing the Jacobian matrix in lower-precision floating point variables </h4>
1424
1425
1426On modern computer systems, *accessing* data in main memory takes far
1427longer than *actually doing* something with it: We can do many floating
1428point operations for the time it takes to load one floating point
1429number from memory onto the processor. Unfortunately, when we do things
1430such as matrix-vector products, we only multiply each matrix entry once
1431with another number (the corresponding entry of the vector) and then we
1432add it to something else -- so two floating point operations for one
1433load. (Strictly speaking, we also have to load the corresponding vector
1434entry, but at least sometimes we get to re-use that vector entry in
1435doing the products that correspond to the next row of the matrix.) This
1436is a fairly low "arithmetic intensity", and consequently we spend most
1437of our time during matrix-vector products waiting for data to arrive
1438from memory rather than actually doing floating point operations.
1439
1440This is of course one of the rationales for the "matrix-free" approach to
1441solving linear systems (see @ref step_37 "step-37", for example). But if you don't quite
1442want to go all that way to change the structure of the program, then
1443here is a different approach: Storing the system matrix (the "Jacobian")
1444in single-precision instead of double precision floating point numbers
1445(i.e., using `float` instead of `double` as the data type). This reduces
1446the amount of memory necessary by a factor of 1.5 (each matrix entry
1447in a SparseMatrix object requires storing the column index -- 4 bytes --
1448and the actual value -- either 4 or 8 bytes), and consequently
1449will speed up matrix-vector products by a factor of around 1.5 as well because,
1450as pointed out above, most of the time is spent loading data from memory
1451and loading 2/3 the amount of data should be roughly 3/2 times as fast. All
1452of this could be done using SparseMatrix<float> as the data type
1453for the system matrix. (In principle, we would then also like it if
1454the SparseDirectUMFPACK solver we use in this program computes and
1455stores its sparse decomposition in `float` arithmetic. This is not
1456currently implemented, though could be done.)
1457
1458Of course, there is a downside to this: Lower precision data storage
1459also implies that we will not solve the linear system of the Newton
1460step as accurately as we might with `double` precision. At least
1461while we are far away from the solution of the nonlinear problem,
1462this may not be terrible: If we can do a Newton iteration in half
1463the time, we can afford to do a couple more Newton steps if the
1464search directions aren't as good.
1465But it turns out that even that isn't typically necessary: Both
1466theory and computational experience shows that it is entirely
1467sufficient to store the Jacobian matrix in single precision
1468*as long as one stores the right hand side in double precision*.
1469A great overview of why this is so, along with numerical
1470experiments that also consider "half precision" floating point
1471numbers, can be found in @cite Kelley2022 .
1472 *
1473 *
1474<a name="step_15-PlainProg"></a>
1475<h1> The plain program</h1>
1476@include "step-15.cc"
1477*/
void get_function_gradients(const ReadVector< Number > &fe_function, std::vector< Tensor< 1, spacedim, Number > > &gradients) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixType &A, const AdditionalData &parameters=AdditionalData())
virtual bool prepare_coarsening_and_refinement()
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:64
void approximate(const SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void refine_and_coarsen_fixed_number(Triangulation< dim, spacedim > &triangulation, const Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const unsigned int max_n_cells=std::numeric_limits< unsigned int >::max())
@ matrix
Contents is actually a matrix.
std::pair< NumberType, unsigned int > line_search(const std::function< std::pair< NumberType, NumberType >(const NumberType x)> &func, const NumberType f0, const NumberType g0, const std::function< NumberType(const NumberType x_low, const NumberType f_low, const NumberType g_low, const NumberType x_hi, const NumberType f_hi, const NumberType g_hi, const FiniteSizeHistory< NumberType > &x_rec, const FiniteSizeHistory< NumberType > &f_rec, const FiniteSizeHistory< NumberType > &g_rec, const std::pair< NumberType, NumberType > bounds)> &interpolate, const NumberType a1, const NumberType eta=0.9, const NumberType mu=0.01, const NumberType a_max=std::numeric_limits< NumberType >::max(), const unsigned int max_evaluations=20, const bool debug_output=false)
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
Definition numbers.h:254
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation