Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-15.h
Go to the documentation of this file.
1) const
547 *   {
548 *   return std::sin(2 * numbers::PI * (p[0] + p[1]));
549 *   }
550 *  
551 * @endcode
552 *
553 *
554 * <a name="ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
555 * <h3>The <code>MinimalSurfaceProblem</code> class implementation</h3>
556 *
557
558 *
559 *
560 * <a name="MinimalSurfaceProblemMinimalSurfaceProblem"></a>
561 * <h4>MinimalSurfaceProblem::MinimalSurfaceProblem</h4>
562 *
563
564 *
565 * The constructor and destructor of the class are the same as in the first
566 * few tutorials.
567 *
568
569 *
570 *
571 * @code
572 *   template <int dim>
573 *   MinimalSurfaceProblem<dim>::MinimalSurfaceProblem()
574 *   : dof_handler(triangulation)
575 *   , fe(2)
576 *   {}
577 *  
578 *  
579 * @endcode
580 *
581 *
582 * <a name="MinimalSurfaceProblemsetup_system"></a>
583 * <h4>MinimalSurfaceProblem::setup_system</h4>
584 *
585
586 *
587 * As always in the setup-system function, we set up the variables of the
588 * finite element method. There are same differences to @ref step_6 "step-6", because
589 * there we start solving the PDE from scratch in every refinement cycle
590 * whereas here we need to take the solution from the previous mesh onto the
591 * current mesh. Consequently, we can't just reset solution vectors. The
592 * argument passed to this function thus indicates whether we can
593 * distributed degrees of freedom (plus compute constraints) and set the
594 * solution vector to zero or whether this has happened elsewhere already
595 * (specifically, in <code>refine_mesh()</code>).
596 *
597
598 *
599 *
600 * @code
601 *   template <int dim>
602 *   void MinimalSurfaceProblem<dim>::setup_system(const bool initial_step)
603 *   {
604 *   if (initial_step)
605 *   {
606 *   dof_handler.distribute_dofs(fe);
607 *   current_solution.reinit(dof_handler.n_dofs());
608 *  
609 *   hanging_node_constraints.clear();
610 *   DoFTools::make_hanging_node_constraints(dof_handler,
611 *   hanging_node_constraints);
612 *   hanging_node_constraints.close();
613 *   }
614 *  
615 *  
616 * @endcode
617 *
618 * The remaining parts of the function are the same as in @ref step_6 "step-6".
619 *
620
621 *
622 *
623 * @code
624 *   newton_update.reinit(dof_handler.n_dofs());
625 *   system_rhs.reinit(dof_handler.n_dofs());
626 *  
627 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
628 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
629 *  
630 *   hanging_node_constraints.condense(dsp);
631 *  
632 *   sparsity_pattern.copy_from(dsp);
633 *   system_matrix.reinit(sparsity_pattern);
634 *   }
635 *  
636 * @endcode
637 *
638 *
639 * <a name="MinimalSurfaceProblemassemble_system"></a>
640 * <h4>MinimalSurfaceProblem::assemble_system</h4>
641 *
642
643 *
644 * This function does the same as in the previous tutorials except that now,
645 * of course, the matrix and right hand side functions depend on the
646 * previous iteration's solution. As discussed in the introduction, we need
647 * to use zero boundary values for the Newton updates; we compute them at
648 * the end of this function.
649 *
650
651 *
652 * The top of the function contains the usual boilerplate code, setting up
653 * the objects that allow us to evaluate shape functions at quadrature
654 * points and temporary storage locations for the local matrices and
655 * vectors, as well as for the gradients of the previous solution at the
656 * quadrature points. We then start the loop over all cells:
657 *
658 * @code
659 *   template <int dim>
660 *   void MinimalSurfaceProblem<dim>::assemble_system()
661 *   {
662 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
663 *  
664 *   system_matrix = 0;
665 *   system_rhs = 0;
666 *  
667 *   FEValues<dim> fe_values(fe,
668 *   quadrature_formula,
670 *   update_JxW_values);
671 *  
672 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
673 *   const unsigned int n_q_points = quadrature_formula.size();
674 *  
675 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
676 *   Vector<double> cell_rhs(dofs_per_cell);
677 *  
678 *   std::vector<Tensor<1, dim>> old_solution_gradients(n_q_points);
679 *  
680 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
681 *  
682 *   for (const auto &cell : dof_handler.active_cell_iterators())
683 *   {
684 *   cell_matrix = 0;
685 *   cell_rhs = 0;
686 *  
687 *   fe_values.reinit(cell);
688 *  
689 * @endcode
690 *
691 * For the assembly of the linear system, we have to obtain the values
692 * of the previous solution's gradients at the quadrature
693 * points. There is a standard way of doing this: the
694 * FEValues::get_function_gradients function takes a vector that
695 * represents a finite element field defined on a DoFHandler, and
696 * evaluates the gradients of this field at the quadrature points of the
697 * cell with which the FEValues object has last been reinitialized.
698 * The values of the gradients at all quadrature points are then written
699 * into the second argument:
700 *
701 * @code
702 *   fe_values.get_function_gradients(current_solution,
703 *   old_solution_gradients);
704 *  
705 * @endcode
706 *
707 * With this, we can then do the integration loop over all quadrature
708 * points and shape functions. Having just computed the gradients of
709 * the old solution in the quadrature points, we are able to compute
710 * the coefficients @f$a_{n}@f$ in these points. The assembly of the
711 * system itself then looks similar to what we always do with the
712 * exception of the nonlinear terms, as does copying the results from
713 * the local objects into the global ones:
714 *
715 * @code
716 *   for (unsigned int q = 0; q < n_q_points; ++q)
717 *   {
718 *   const double coeff =
719 *   1.0 / std::sqrt(1 + old_solution_gradients[q] *
720 *   old_solution_gradients[q]);
721 *  
722 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
723 *   {
724 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
725 *   cell_matrix(i, j) +=
726 *   (((fe_values.shape_grad(i, q) // ((\nabla \phi_i
727 *   * coeff // * a_n
728 *   * fe_values.shape_grad(j, q)) // * \nabla \phi_j)
729 *   - // -
730 *   (fe_values.shape_grad(i, q) // (\nabla \phi_i
731 *   * coeff * coeff * coeff // * a_n^3
732 *   * (fe_values.shape_grad(j, q) // * (\nabla \phi_j
733 *   * old_solution_gradients[q]) // * \nabla u_n)
734 *   * old_solution_gradients[q])) // * \nabla u_n)))
735 *   * fe_values.JxW(q)); // * dx
736 *  
737 *   cell_rhs(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
738 *   * coeff // * a_n
739 *   * old_solution_gradients[q] // * \nabla u_n
740 *   * fe_values.JxW(q)); // * dx
741 *   }
742 *   }
743 *  
744 *   cell->get_dof_indices(local_dof_indices);
745 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
746 *   {
747 *   for (unsigned int j = 0; j < dofs_per_cell; ++j)
748 *   system_matrix.add(local_dof_indices[i],
749 *   local_dof_indices[j],
750 *   cell_matrix(i, j));
751 *  
752 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
753 *   }
754 *   }
755 *  
756 * @endcode
757 *
758 * Finally, we remove hanging nodes from the system and apply zero
759 * boundary values to the linear system that defines the Newton updates
760 * @f$\delta u^n@f$:
761 *
762 * @code
763 *   hanging_node_constraints.condense(system_matrix);
764 *   hanging_node_constraints.condense(system_rhs);
765 *  
766 *   std::map<types::global_dof_index, double> boundary_values;
767 *   VectorTools::interpolate_boundary_values(dof_handler,
768 *   0,
769 *   Functions::ZeroFunction<dim>(),
770 *   boundary_values);
771 *   MatrixTools::apply_boundary_values(boundary_values,
772 *   system_matrix,
773 *   newton_update,
774 *   system_rhs);
775 *   }
776 *  
777 *  
778 *  
779 * @endcode
780 *
781 *
782 * <a name="MinimalSurfaceProblemsolve"></a>
783 * <h4>MinimalSurfaceProblem::solve</h4>
784 *
785
786 *
787 * The solve function is the same as always. At the end of the solution
788 * process we update the current solution by setting
789 * @f$u^{n+1}=u^n+\alpha^n\;\delta u^n@f$.
790 *
791 * @code
792 *   template <int dim>
793 *   void MinimalSurfaceProblem<dim>::solve()
794 *   {
795 *   SolverControl solver_control(system_rhs.size(),
796 *   system_rhs.l2_norm() * 1e-6);
797 *   SolverCG<Vector<double>> solver(solver_control);
798 *  
799 *   PreconditionSSOR<SparseMatrix<double>> preconditioner;
800 *   preconditioner.initialize(system_matrix, 1.2);
801 *  
802 *   solver.solve(system_matrix, newton_update, system_rhs, preconditioner);
803 *  
804 *   hanging_node_constraints.distribute(newton_update);
805 *  
806 *   const double alpha = determine_step_length();
807 *   current_solution.add(alpha, newton_update);
808 *   }
809 *  
810 *  
811 * @endcode
812 *
813 *
814 * <a name="MinimalSurfaceProblemrefine_mesh"></a>
815 * <h4>MinimalSurfaceProblem::refine_mesh</h4>
816 *
817
818 *
819 * The first part of this function is the same as in @ref step_6 "step-6"... However,
820 * after refining the mesh we have to transfer the old solution to the new
821 * one which we do with the help of the SolutionTransfer class. The process
822 * is slightly convoluted, so let us describe it in detail:
823 *
824 * @code
825 *   template <int dim>
826 *   void MinimalSurfaceProblem<dim>::refine_mesh()
827 *   {
828 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
829 *  
830 *   KellyErrorEstimator<dim>::estimate(
831 *   dof_handler,
832 *   QGauss<dim - 1>(fe.degree + 1),
833 *   std::map<types::boundary_id, const Function<dim> *>(),
834 *   current_solution,
835 *   estimated_error_per_cell);
836 *  
837 *   GridRefinement::refine_and_coarsen_fixed_number(triangulation,
838 *   estimated_error_per_cell,
839 *   0.3,
840 *   0.03);
841 *  
842 * @endcode
843 *
844 * Then we need an additional step: if, for example, you flag a cell that
845 * is once more refined than its neighbor, and that neighbor is not
846 * flagged for refinement, we would end up with a jump of two refinement
847 * levels across a cell interface. To avoid these situations, the library
848 * will silently also have to refine the neighbor cell once. It does so by
849 * calling the Triangulation::prepare_coarsening_and_refinement function
850 * before actually doing the refinement and coarsening. This function
851 * flags a set of additional cells for refinement or coarsening, to
852 * enforce rules like the one-hanging-node rule. The cells that are
853 * flagged for refinement and coarsening after calling this function are
854 * exactly the ones that will actually be refined or coarsened. Usually,
855 * you don't have to do this by hand
857 * you). However, we need to initialize the SolutionTransfer class and it
858 * needs to know the final set of cells that will be coarsened or refined
859 * in order to store the data from the old mesh and transfer to the new
860 * one. Thus, we call the function by hand:
861 *
862 * @code
863 *   triangulation.prepare_coarsening_and_refinement();
864 *  
865 * @endcode
866 *
867 * With this out of the way, we initialize a SolutionTransfer object with
868 * the present DoFHandler and attach the solution vector to it, followed
869 * by doing the actual refinement and distribution of degrees of freedom
870 * on the new mesh
871 *
872 * @code
873 *   SolutionTransfer<dim> solution_transfer(dof_handler);
874 *   solution_transfer.prepare_for_coarsening_and_refinement(current_solution);
875 *  
876 *   triangulation.execute_coarsening_and_refinement();
877 *  
878 *   dof_handler.distribute_dofs(fe);
879 *  
880 * @endcode
881 *
882 * Finally, we retrieve the old solution interpolated to the new
883 * mesh. Since the SolutionTransfer function does not actually store the
884 * values of the old solution, but rather indices, we need to preserve the
885 * old solution vector until we have gotten the new interpolated
886 * values. Thus, we have the new values written into a temporary vector,
887 * and only afterwards write them into the solution vector object:
888 *
889 * @code
890 *   Vector<double> tmp(dof_handler.n_dofs());
891 *   solution_transfer.interpolate(current_solution, tmp);
892 *   current_solution = tmp;
893 *  
894 * @endcode
895 *
896 * On the new mesh, there are different hanging nodes, for which we have to
897 * compute constraints again, after throwing away previous content of the
898 * object. To be on the safe side, we should then also make sure that the
899 * current solution's vector entries satisfy the hanging node constraints
900 * (see the discussion in the documentation of the SolutionTransfer class
901 * for why this is necessary). We could do this by calling
902 * `hanging_node_constraints.distribute(current_solution)` explicitly; we
903 * omit this step because this will happen at the end of the call to
904 * `set_boundary_values()` below, and it is not necessary to do it twice.
905 *
906 * @code
907 *   hanging_node_constraints.clear();
908 *  
909 *   DoFTools::make_hanging_node_constraints(dof_handler,
910 *   hanging_node_constraints);
911 *   hanging_node_constraints.close();
912 *  
913 * @endcode
914 *
915 * Once we have the interpolated solution and all information about
916 * hanging nodes, we have to make sure that the @f$u^n@f$ we now have
917 * actually has the correct boundary values. As explained at the end of
918 * the introduction, this is not automatically the case even if the
919 * solution before refinement had the correct boundary values, and so we
920 * have to explicitly make sure that it now has:
921 *
922 * @code
923 *   set_boundary_values();
924 *  
925 * @endcode
926 *
927 * We end the function by updating all the remaining data structures,
928 * indicating to <code>setup_dofs()</code> that this is not the first
929 * go-around and that it needs to preserve the content of the solution
930 * vector:
931 *
932 * @code
933 *   setup_system(false);
934 *   }
935 *  
936 *  
937 *  
938 * @endcode
939 *
940 *
941 * <a name="MinimalSurfaceProblemset_boundary_values"></a>
942 * <h4>MinimalSurfaceProblem::set_boundary_values</h4>
943 *
944
945 *
946 * The next function ensures that the solution vector's entries respect the
947 * boundary values for our problem. Having refined the mesh (or just
948 * started computations), there might be new nodal points on the
949 * boundary. These have values that are simply interpolated from the
950 * previous mesh in `refine_mesh()`, instead of the correct boundary
951 * values. This is fixed up by setting all boundary nodes of the current
952 * solution vector explicit to the right value.
953 *
954
955 *
956 * There is one issue we have to pay attention to, though: If we have
957 * a hanging node right next to a new boundary node, then its value
958 * must also be adjusted to make sure that the finite element field
959 * remains continuous. This is what the call in the last line of this
960 * function does.
961 *
962 * @code
963 *   template <int dim>
964 *   void MinimalSurfaceProblem<dim>::set_boundary_values()
965 *   {
966 *   std::map<types::global_dof_index, double> boundary_values;
968 *   0,
969 *   BoundaryValues<dim>(),
970 *   boundary_values);
971 *   for (auto &boundary_value : boundary_values)
972 *   current_solution(boundary_value.first) = boundary_value.second;
973 *  
974 *   hanging_node_constraints.distribute(current_solution);
975 *   }
976 *  
977 *  
978 * @endcode
979 *
980 *
981 * <a name="MinimalSurfaceProblemcompute_residual"></a>
982 * <h4>MinimalSurfaceProblem::compute_residual</h4>
983 *
984
985 *
986 * In order to monitor convergence, we need a way to compute the norm of the
987 * (discrete) residual, i.e., the norm of the vector
988 * @f$\left<F(u^n),\varphi_i\right>@f$ with @f$F(u)=-\nabla \cdot \left(
989 * \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)@f$ as discussed in the
990 * introduction. It turns out that (although we don't use this feature in
991 * the current version of the program) one needs to compute the residual
992 * @f$\left<F(u^n+\alpha^n\;\delta u^n),\varphi_i\right>@f$ when determining
993 * optimal step lengths, and so this is what we implement here: the function
994 * takes the step length @f$\alpha^n@f$ as an argument. The original
995 * functionality is of course obtained by passing a zero as argument.
996 *
997
998 *
999 * In the function below, we first set up a vector for the residual, and
1000 * then a vector for the evaluation point @f$u^n+\alpha^n\;\delta u^n@f$. This
1001 * is followed by the same boilerplate code we use for all integration
1002 * operations:
1003 *
1004 * @code
1005 *   template <int dim>
1006 *   double MinimalSurfaceProblem<dim>::compute_residual(const double alpha) const
1007 *   {
1008 *   Vector<double> residual(dof_handler.n_dofs());
1009 *  
1010 *   Vector<double> evaluation_point(dof_handler.n_dofs());
1011 *   evaluation_point = current_solution;
1012 *   evaluation_point.add(alpha, newton_update);
1013 *  
1014 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1015 *   FEValues<dim> fe_values(fe,
1016 *   quadrature_formula,
1017 *   update_gradients | update_quadrature_points |
1018 *   update_JxW_values);
1019 *  
1020 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1021 *   const unsigned int n_q_points = quadrature_formula.size();
1022 *  
1023 *   Vector<double> cell_residual(dofs_per_cell);
1024 *   std::vector<Tensor<1, dim>> gradients(n_q_points);
1025 *  
1026 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1027 *  
1028 *   for (const auto &cell : dof_handler.active_cell_iterators())
1029 *   {
1030 *   cell_residual = 0;
1031 *   fe_values.reinit(cell);
1032 *  
1033 * @endcode
1034 *
1035 * The actual computation is much as in
1036 * <code>assemble_system()</code>. We first evaluate the gradients of
1037 * @f$u^n+\alpha^n\,\delta u^n@f$ at the quadrature points, then compute
1038 * the coefficient @f$a_n@f$, and then plug it all into the formula for
1039 * the residual:
1040 *
1041 * @code
1042 *   fe_values.get_function_gradients(evaluation_point, gradients);
1043 *  
1044 *  
1045 *   for (unsigned int q = 0; q < n_q_points; ++q)
1046 *   {
1047 *   const double coeff =
1048 *   1. / std::sqrt(1 + gradients[q] * gradients[q]);
1049 *  
1050 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1051 *   cell_residual(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
1052 *   * coeff // * a_n
1053 *   * gradients[q] // * \nabla u_n
1054 *   * fe_values.JxW(q)); // * dx
1055 *   }
1056 *  
1057 *   cell->get_dof_indices(local_dof_indices);
1058 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
1059 *   residual(local_dof_indices[i]) += cell_residual(i);
1060 *   }
1061 *  
1062 * @endcode
1063 *
1064 * At the end of this function we also have to deal with the hanging node
1065 * constraints and with the issue of boundary values. With regard to the
1066 * latter, we have to set to zero the elements of the residual vector for
1067 * all entries that correspond to degrees of freedom that sit at the
1068 * boundary. The reason is that because the value of the solution there is
1069 * fixed, they are of course no "real" degrees of freedom and so, strictly
1070 * speaking, we shouldn't have assembled entries in the residual vector
1071 * for them. However, as we always do, we want to do exactly the same
1072 * thing on every cell and so we didn't want to deal with the question
1073 * of whether a particular degree of freedom sits at the boundary in the
1074 * integration above. Rather, we will simply set to zero these entries
1075 * after the fact. To this end, we need to determine which degrees
1076 * of freedom do in fact belong to the boundary and then loop over all of
1077 * those and set the residual entry to zero. This happens in the following
1078 * lines which we have already seen used in @ref step_11 "step-11", using the appropriate
1079 * function from namespace DoFTools:
1080 *
1081 * @code
1082 *   hanging_node_constraints.condense(residual);
1083 *  
1084 *   for (const types::global_dof_index i :
1085 *   DoFTools::extract_boundary_dofs(dof_handler))
1086 *   residual(i) = 0;
1087 *  
1088 * @endcode
1089 *
1090 * At the end of the function, we return the norm of the residual:
1091 *
1092 * @code
1093 *   return residual.l2_norm();
1094 *   }
1095 *  
1096 *  
1097 *  
1098 * @endcode
1099 *
1100 *
1101 * <a name="MinimalSurfaceProblemdetermine_step_length"></a>
1102 * <h4>MinimalSurfaceProblem::determine_step_length</h4>
1103 *
1104
1105 *
1106 * As discussed in the introduction, Newton's method frequently does not
1107 * converge if we always take full steps, i.e., compute @f$u^{n+1}=u^n+\delta
1108 * u^n@f$. Rather, one needs a damping parameter (step length) @f$\alpha^n@f$ and
1109 * set @f$u^{n+1}=u^n+\alpha^n\delta u^n@f$. This function is the one called
1110 * to compute @f$\alpha^n@f$.
1111 *
1112
1113 *
1114 * Here, we simply always return 0.1. This is of course a sub-optimal
1115 * choice: ideally, what one wants is that the step size goes to one as we
1116 * get closer to the solution, so that we get to enjoy the rapid quadratic
1117 * convergence of Newton's method. We will discuss better strategies below
1118 * in the results section, and @ref step_77 "step-77" also covers this aspect.
1119 *
1120 * @code
1121 *   template <int dim>
1122 *   double MinimalSurfaceProblem<dim>::determine_step_length() const
1123 *   {
1124 *   return 0.1;
1125 *   }
1126 *  
1127 *  
1128 *  
1129 * @endcode
1130 *
1131 *
1132 * <a name="MinimalSurfaceProblemoutput_results"></a>
1133 * <h4>MinimalSurfaceProblem::output_results</h4>
1134 *
1135
1136 *
1137 * This last function to be called from `run()` outputs the current solution
1138 * (and the Newton update) in graphical form as a VTU file. It is entirely the
1139 * same as what has been used in previous tutorials.
1140 *
1141 * @code
1142 *   template <int dim>
1143 *   void MinimalSurfaceProblem<dim>::output_results(
1144 *   const unsigned int refinement_cycle) const
1145 *   {
1146 *   DataOut<dim> data_out;
1147 *  
1148 *   data_out.attach_dof_handler(dof_handler);
1149 *   data_out.add_data_vector(current_solution, "solution");
1150 *   data_out.add_data_vector(newton_update, "update");
1151 *   data_out.build_patches();
1152 *  
1153 *   const std::string filename =
1154 *   "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
1155 *   std::ofstream output(filename);
1156 *   data_out.write_vtu(output);
1157 *   }
1158 *  
1159 *  
1160 * @endcode
1161 *
1162 *
1163 * <a name="MinimalSurfaceProblemrun"></a>
1164 * <h4>MinimalSurfaceProblem::run</h4>
1165 *
1166
1167 *
1168 * In the run function, we build the first grid and then have the top-level
1169 * logic for the Newton iteration.
1170 *
1171
1172 *
1173 * As described in the introduction, the domain is the unit disk around
1174 * the origin, created in the same way as shown in @ref step_6 "step-6". The mesh is
1175 * globally refined twice followed later on by several adaptive cycles.
1176 *
1177
1178 *
1179 * Before starting the Newton loop, we also need to do a bit of
1180 * setup work: We need to create the basic data structures and
1181 * ensure that the first Newton iterate already has the correct
1182 * boundary values, as discussed in the introduction.
1183 *
1184 * @code
1185 *   template <int dim>
1186 *   void MinimalSurfaceProblem<dim>::run()
1187 *   {
1188 *   GridGenerator::hyper_ball(triangulation);
1189 *   triangulation.refine_global(2);
1190 *  
1191 *   setup_system(/*first time=*/true);
1192 *   set_boundary_values();
1193 *  
1194 * @endcode
1195 *
1196 * The Newton iteration starts next. We iterate until the (norm of the)
1197 * residual computed at the end of the previous iteration is less than
1198 * @f$10^{-3}@f$, as checked at the end of the `do { ... } while` loop that
1199 * starts here. Because we don't have a reasonable value to initialize
1200 * the variable, we just use the largest value that can be represented
1201 * as a `double`.
1202 *
1203 * @code
1204 *   double last_residual_norm = std::numeric_limits<double>::max();
1205 *   unsigned int refinement_cycle = 0;
1206 *   do
1207 *   {
1208 *   std::cout << "Mesh refinement step " << refinement_cycle << std::endl;
1209 *  
1210 *   if (refinement_cycle != 0)
1211 *   refine_mesh();
1212 *  
1213 * @endcode
1214 *
1215 * On every mesh we do exactly five Newton steps. We print the initial
1216 * residual here and then start the iterations on this mesh.
1217 *
1218
1219 *
1220 * In every Newton step the system matrix and the right hand side have
1221 * to be computed first, after which we store the norm of the right
1222 * hand side as the residual to check against when deciding whether to
1223 * stop the iterations. We then solve the linear system (the function
1224 * also updates @f$u^{n+1}=u^n+\alpha^n\;\delta u^n@f$) and output the
1225 * norm of the residual at the end of this Newton step.
1226 *
1227
1228 *
1229 * After the end of this loop, we then also output the solution on the
1230 * current mesh in graphical form and increment the counter for the
1231 * mesh refinement cycle.
1232 *
1233 * @code
1234 *   std::cout << " Initial residual: " << compute_residual(0) << std::endl;
1235 *  
1236 *   for (unsigned int inner_iteration = 0; inner_iteration < 5;
1237 *   ++inner_iteration)
1238 *   {
1239 *   assemble_system();
1240 *   last_residual_norm = system_rhs.l2_norm();
1241 *  
1242 *   solve();
1243 *  
1244 *   std::cout << " Residual: " << compute_residual(0) << std::endl;
1245 *   }
1246 *  
1247 *   output_results(refinement_cycle);
1248 *  
1249 *   ++refinement_cycle;
1250 *   std::cout << std::endl;
1251 *   }
1252 *   while (last_residual_norm > 1e-2);
1253 *   }
1254 *   } // namespace Step15
1255 *  
1256 * @endcode
1257 *
1258 *
1259 * <a name="Themainfunction"></a>
1260 * <h4>The main function</h4>
1261 *
1262
1263 *
1264 * Finally the main function. This follows the scheme of all other main
1265 * functions:
1266 *
1267 * @code
1268 *   int main()
1269 *   {
1270 *   try
1271 *   {
1272 *   using namespace Step15;
1273 *  
1274 *   MinimalSurfaceProblem<2> laplace_problem_2d;
1275 *   laplace_problem_2d.run();
1276 *   }
1277 *   catch (std::exception &exc)
1278 *   {
1279 *   std::cerr << std::endl
1280 *   << std::endl
1281 *   << "----------------------------------------------------"
1282 *   << std::endl;
1283 *   std::cerr << "Exception on processing: " << std::endl
1284 *   << exc.what() << std::endl
1285 *   << "Aborting!" << std::endl
1286 *   << "----------------------------------------------------"
1287 *   << std::endl;
1288 *  
1289 *   return 1;
1290 *   }
1291 *   catch (...)
1292 *   {
1293 *   std::cerr << std::endl
1294 *   << std::endl
1295 *   << "----------------------------------------------------"
1296 *   << std::endl;
1297 *   std::cerr << "Unknown exception!" << std::endl
1298 *   << "Aborting!" << std::endl
1299 *   << "----------------------------------------------------"
1300 *   << std::endl;
1301 *   return 1;
1302 *   }
1303 *   return 0;
1304 *   }
1305 * @endcode
1306<a name="Results"></a><h1>Results</h1>
1307
1308
1309
1310The output of the program looks as follows:
1311@code
1312Mesh refinement step 0
1313 Initial residual: 1.53143
1314 Residual: 1.08746
1315 Residual: 0.966748
1316 Residual: 0.859602
1317 Residual: 0.766462
1318 Residual: 0.685475
1319
1320Mesh refinement step 1
1321 Initial residual: 0.868959
1322 Residual: 0.762125
1323 Residual: 0.677792
1324 Residual: 0.605762
1325 Residual: 0.542748
1326 Residual: 0.48704
1327
1328Mesh refinement step 2
1329 Initial residual: 0.426445
1330 Residual: 0.382731
1331 Residual: 0.343865
1332 Residual: 0.30918
1333 Residual: 0.278147
1334 Residual: 0.250327
1335
1336Mesh refinement step 3
1337 Initial residual: 0.282026
1338 Residual: 0.253146
1339 Residual: 0.227414
1340 Residual: 0.20441
1341 Residual: 0.183803
1342 Residual: 0.165319
1343
1344Mesh refinement step 4
1345 Initial residual: 0.154404
1346 Residual: 0.138723
1347 Residual: 0.124694
1348 Residual: 0.112124
1349 Residual: 0.100847
1350 Residual: 0.0907222
1351
1352....
1353@endcode
1354
1355Obviously, the scheme converges, if not very fast. We will come back to
1356strategies for accelerating the method below.
1357
1358One can visualize the solution after each set of five Newton
1359iterations, i.e., on each of the meshes on which we approximate the
1360solution. This yields the following set of images:
1361
1362<div class="twocolumn" style="width: 80%">
1363 <div>
1364 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_1.png"
1365 alt="Solution after zero cycles with contour lines." width="230" height="273">
1366 </div>
1367 <div>
1368 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_2.png"
1369 alt="Solution after one cycle with contour lines." width="230" height="273">
1370 </div>
1371 <div>
1372 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_3.png"
1373 alt="Solution after two cycles with contour lines." width="230" height="273">
1374 </div>
1375 <div>
1376 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_4.png"
1377 alt="Solution after three cycles with contour lines." width="230" height="273">
1378 </div>
1379</div>
1380
1381It is clearly visible, that the solution minimizes the surface
1382after each refinement. The solution converges to a picture one
1383would imagine a soap bubble to be that is located inside a wire loop
1384that is bent like
1385the boundary. Also it is visible, how the boundary
1386is smoothed out after each refinement. On the coarse mesh,
1387the boundary doesn't look like a sine, whereas it does the
1388finer the mesh gets.
1389
1390The mesh is mostly refined near the boundary, where the solution
1391increases or decreases strongly, whereas it is coarsened on
1392the inside of the domain, where nothing interesting happens,
1393because there isn't much change in the solution. The ninth
1394solution and mesh are shown here:
1395
1396<div class="onecolumn" style="width: 60%">
1397 <div>
1398 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_9.png"
1399 alt="Grid and solution of the ninth cycle with contour lines." width="507" height="507">
1400 </div>
1401</div>
1402
1403
1404
1405<a name="extensions"></a>
1406<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1407
1408
1409The program shows the basic structure of a solver for a nonlinear, stationary
1410problem. However, it does not converge particularly fast, for good reasons:
1411
1412- The program always takes a step size of 0.1. This precludes the rapid,
1413 quadratic convergence for which Newton's method is typically chosen.
1414- It does not connect the nonlinear iteration with the mesh refinement
1415 iteration.
1416
1417Obviously, a better program would have to address these two points.
1418We will discuss them in the following.
1419
1420
1421<a name="Steplengthcontrol"></a><h4> Step length control </h4>
1422
1423
1424Newton's method has two well known properties:
1425- It may not converge from arbitrarily chosen starting points. Rather, a
1426 starting point has to be close enough to the solution to guarantee
1427 convergence. However, we can enlarge the area from which Newton's method
1428 converges by damping the iteration using a <i>step length</i> 0<@f$\alpha^n\le
1429 1@f$.
1430- It exhibits rapid convergence of quadratic order if (i) the step length is
1431 chosen as @f$\alpha^n=1@f$, and (ii) it does in fact converge with this choice
1432 of step length.
1433
1434A consequence of these two observations is that a successful strategy is to
1435choose @f$\alpha^n<1@f$ for the initial iterations until the iterate has come
1436close enough to allow for convergence with full step length, at which point we
1437want to switch to @f$\alpha^n=1@f$. The question is how to choose @f$\alpha^n@f$ in an
1438automatic fashion that satisfies these criteria.
1439
1440We do not want to review the literature on this topic here, but only briefly
1441mention that there are two fundamental approaches to the problem: backtracking
1442line search and trust region methods. The former is more widely used for
1443partial differential equations and essentially does the following:
1444- Compute a search direction
1445- See if the resulting residual of @f$u^n + \alpha^n\;\delta u^n@f$ with
1446 @f$\alpha^n=1@f$ is "substantially smaller" than that of @f$u^n@f$ alone.
1447- If so, then take @f$\alpha^n=1@f$.
1448- If not, try whether the residual is "substantially smaller" with
1449 @f$\alpha^n=2/3@f$.
1450- If so, then take @f$\alpha^n=2/3@f$.
1451- If not, try whether the residual is "substantially smaller" with
1452 @f$\alpha^n=(2/3)^2@f$.
1453- Etc.
1454One can of course choose other factors @f$r, r^2, \ldots@f$ than the @f$2/3,
1455(2/3)^2, \ldots@f$ chosen above, for @f$0<r<1@f$. It is obvious where the term
1456"backtracking" comes from: we try a long step, but if that doesn't work we try
1457a shorter step, and ever shorter step, etc. The function
1458<code>determine_step_length()</code> is written the way it is to support
1459exactly this kind of use case.
1460
1461Whether we accept a particular step length @f$\alpha^n@f$ depends on how we define
1462"substantially smaller". There are a number of ways to do so, but without
1463going into detail let us just mention that the most common ones are to use the
1464Wolfe and Armijo-Goldstein conditions. For these, one can show the following:
1465- There is always a step length @f$\alpha^n@f$ for which the conditions are
1466 satisfied, i.e., the iteration never gets stuck as long as the problem is
1467 convex.
1468- If we are close enough to the solution, then the conditions allow for
1469 @f$\alpha^n=1@f$, thereby enabling quadratic convergence.
1470
1471We will not dwell on this here any further but leave the implementation of
1472such algorithms as an exercise. We note, however, that when implemented
1473correctly then it is a common observation that most reasonably nonlinear
1474problems can be solved in anywhere between 5 and 15 Newton iterations to
1475engineering accuracy &mdash; substantially fewer than we need with the current
1476version of the program.
1477
1478More details on globalization methods including backtracking can be found,
1479for example, in @cite GNS08 and @cite NW99.
1480
1481A separate point, very much worthwhile making, however, is that in practice
1482the implementation of efficient nonlinear solvers is about as complicated as
1483the implementation of efficient finite element methods. One should not
1484attempt to reinvent the wheel by implementing all of the necessary steps
1485oneself. Substantial pieces of the puzzle are already available in
1486the LineMinimization::line_search() function and could be used to this end.
1487But, instead, just like building finite element solvers on libraries
1488such as deal.II, one should be building nonlinear solvers on libraries such
1489as [SUNDIALS](https://computing.llnl.gov/projects/sundials). In fact,
1490deal.II has interfaces to SUNDIALS and in particular to its nonlinear solver
1491sub-package KINSOL through the SUNDIALS::KINSOL class. It would not be
1492very difficult to base the current problem on that interface --
1493indeed, that is what @ref step_77 "step-77" does.
1494
1495
1496
1497<a name="Integratingmeshrefinementandnonlinearandlinearsolvers"></a><h4> Integrating mesh refinement and nonlinear and linear solvers </h4>
1498
1499
1500We currently do exactly 5 iterations on each mesh. But is this optimal? One
1501could ask the following questions:
1502- Maybe it is worthwhile doing more iterations on the initial meshes since
1503 there, computations are cheap.
1504- On the other hand, we do not want to do too many iterations on every mesh:
1505 yes, we could drive the residual to zero on every mesh, but that would only
1506 mean that the nonlinear iteration error is far smaller than the
1507 discretization error.
1508- Should we use solve the linear systems in each Newton step with higher or
1509 lower accuracy?
1510
1511Ultimately, what this boils down to is that we somehow need to couple the
1512discretization error on the current mesh with the nonlinear residual we want
1513to achieve with the Newton iterations on a given mesh, and to the linear
1514iteration we want to achieve with the CG method within each Newton
1515iterations.
1516
1517How to do this is, again, not entirely trivial, and we again leave it as a
1518future exercise.
1519
1520
1521
1522<a name="UsingautomaticdifferentiationtocomputetheJacobianmatrix"></a><h4> Using automatic differentiation to compute the Jacobian matrix </h4>
1523
1524
1525As outlined in the introduction, when solving a nonlinear problem of
1526the form
1527 @f[
1528 F(u) \dealcoloneq
1529 -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
1530 = 0
1531 @f]
1532we use a Newton iteration that requires us to repeatedly solve the
1533linear partial differential equation
1534 @f{align*}
1535 F'(u^{n},\delta u^{n}) &=- F(u^{n})
1536 @f}
1537so that we can compute the update
1538 @f{align*}
1539 u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
1540 @f}
1541with the solution @f$\delta u^{n}@f$ of the Newton step. For the problem
1542here, we could compute the derivative @f$F'(u,\delta u)@f$ by hand and
1543obtained
1544 @f[
1545 F'(u,\delta u)
1546 =
1547 - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u|^{2}\right)^{\frac{1}{2}}}\nabla
1548 \delta u \right) +
1549 \nabla \cdot \left( \frac{\nabla u \cdot
1550 \nabla \delta u}{\left(1+|\nabla u|^{2}\right)^{\frac{3}{2}}} \nabla u
1551 \right).
1552 @f]
1553But this is already a sizable expression that is cumbersome both to
1554derive and to implement. It is also, in some sense, duplicative: If we
1555implement what @f$F(u)@f$ is somewhere in the code, then @f$F'(u,\delta u)@f$
1556is not an independent piece of information but is something that, at
1557least in principle, a computer should be able to infer itself.
1558Wouldn't it be nice if that could actually happen? That is, if we
1559really only had to implement @f$F(u)@f$, and @f$F'(u,\delta u)@f$ was then somehow
1560done implicitly? That is in fact possible, and runs under the name
1561"automatic differentiation". @ref step_71 "step-71" discusses this very
1562concept in general terms, and @ref step_72 "step-72" illustrates how this can be
1563applied in practice for the very problem we are considering here.
1564
1565
1566<a name="StoringtheJacobianmatrixinlowerprecisionfloatingpointvariables"></a><h4> Storing the Jacobian matrix in lower-precision floating point variables </h4>
1567
1568
1569On modern computer systems, *accessing* data in main memory takes far
1570longer than *actually doing* something with it: We can do many floating
1571point operations for the time it takes to load one floating point
1572number from memory onto the processor. Unfortunately, when we do things
1573such as matrix-vector products, we only multiply each matrix entry once
1574with another number (the corresponding entry of the vector) and then we
1575add it to something else -- so two floating point operations for one
1576load. (Strictly speaking, we also have to load the corresponding vector
1577entry, but at least sometimes we get to re-use that vector entry in
1578doing the products that correspond to the next row of the matrix.) This
1579is a fairly low "arithmetic intensity", and consequently we spend most
1580of our time during matrix-vector products waiting for data to arrive
1581from memory rather than actually doing floating point operations.
1582
1583This is of course one of the rationales for the "matrix-free" approach to
1584solving linear systems (see @ref step_37 "step-37", for example). But if you don't quite
1585want to go all that way to change the structure of the program, then
1586here is a different approach: Storing the system matrix (the "Jacobian")
1587in single-precision instead of double precision floating point numbers
1588(i.e., using `float` instead of `double` as the data type). This reduces
1589the amount of memory necessary by a factor of 1.5 (each matrix entry
1590in a SparseMatrix object requires storing the column index -- 4 bytes --
1591and the actual value -- either 4 or 8 bytes), and consequently
1592will speed up matrix-vector products by a factor of around 1.5 as well because,
1593as pointed out above, most of the time is spent loading data from memory
1594and loading 2/3 the amount of data should be roughly 3/2 times as fast. All
1595of this could be done using SparseMatrix<float> as the data type
1596for the system matrix. (In principle, we would then also like it if
1597the SparseDirectUMFPACK solver we use in this program computes and
1598stores its sparse decomposition in `float` arithmetic. This is not
1599currently implemented, though could be done.)
1600
1601Of course, there is a downside to this: Lower precision data storage
1602also implies that we will not solve the linear system of the Newton
1603step as accurately as we might with `double` precision. At least
1604while we are far away from the solution of the nonlinear problem,
1605this may not be terrible: If we can do a Newton iteration in half
1606the time, we can afford to do a couple more Newton steps if the
1607search directions aren't as good.
1608But it turns out that even that isn't typically necessary: Both
1609theory and computational experience shows that it is entirely
1610sufficient to store the Jacobian matrix in single precision
1611*as long as one stores the right hand side in double precision*.
1612A great overview of why this is so, along with numerical
1613experiments that also consider "half precision" floating point
1614numbers, can be found in @cite Kelley2022 .
1615 *
1616 *
1617<a name="PlainProg"></a>
1618<h1> The plain program</h1>
1619@include "step-15.cc"
1620*/
virtual void execute_coarsening_and_refinement()
Point< 2 > second
Definition grid_out.cc:4616
Point< 2 > first
Definition grid_out.cc:4615
__global__ void set(Number *val, const Number s, const size_type N)
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:439
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
std::vector< value_type > preserve(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
const Event initial
Definition event.cc:65
void approximate(SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
@ matrix
Contents is actually a matrix.
std::pair< NumberType, unsigned int > line_search(const std::function< std::pair< NumberType, NumberType >(const NumberType x)> &func, const NumberType f0, const NumberType g0, const std::function< NumberType(const NumberType x_low, const NumberType f_low, const NumberType g_low, const NumberType x_hi, const NumberType f_hi, const NumberType g_hi, const FiniteSizeHistory< NumberType > &x_rec, const FiniteSizeHistory< NumberType > &f_rec, const FiniteSizeHistory< NumberType > &g_rec, const std::pair< NumberType, NumberType > bounds)> &interpolate, const NumberType a1, const NumberType eta=0.9, const NumberType mu=0.01, const NumberType a_max=std::numeric_limits< NumberType >::max(), const unsigned int max_evaluations=20, const bool debug_output=false)
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:75
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:472
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:189
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
Definition numbers.h:259
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation