Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.3.3
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
step-15.h
Go to the documentation of this file.
1) const
546 * {
547 * return std::sin(2 * numbers::PI * (p[0] + p[1]));
548 * }
549 *
550 * @endcode
551 *
552 *
553 * <a name="ThecodeMinimalSurfaceProblemcodeclassimplementation"></a>
554 * <h3>The <code>MinimalSurfaceProblem</code> class implementation</h3>
555 *
556
557 *
558 *
559 * <a name="MinimalSurfaceProblemMinimalSurfaceProblem"></a>
560 * <h4>MinimalSurfaceProblem::MinimalSurfaceProblem</h4>
561 *
562
563 *
564 * The constructor and destructor of the class are the same as in the first
565 * few tutorials.
566 *
567
568 *
569 *
570 * @code
571 * template <int dim>
572 * MinimalSurfaceProblem<dim>::MinimalSurfaceProblem()
573 * : dof_handler(triangulation)
574 * , fe(2)
575 * {}
576 *
577 *
578 * @endcode
579 *
580 *
581 * <a name="MinimalSurfaceProblemsetup_system"></a>
582 * <h4>MinimalSurfaceProblem::setup_system</h4>
583 *
584
585 *
586 * As always in the setup-system function, we setup the variables of the
587 * finite element method. There are same differences to @ref step_6 "step-6", because
588 * there we start solving the PDE from scratch in every refinement cycle
589 * whereas here we need to take the solution from the previous mesh onto the
590 * current mesh. Consequently, we can't just reset solution vectors. The
591 * argument passed to this function thus indicates whether we can
592 * distributed degrees of freedom (plus compute constraints) and set the
593 * solution vector to zero or whether this has happened elsewhere already
594 * (specifically, in <code>refine_mesh()</code>).
595 *
596
597 *
598 *
599 * @code
600 * template <int dim>
601 * void MinimalSurfaceProblem<dim>::setup_system(const bool initial_step)
602 * {
603 * if (initial_step)
604 * {
605 * dof_handler.distribute_dofs(fe);
606 * current_solution.reinit(dof_handler.n_dofs());
607 *
608 * hanging_node_constraints.clear();
609 * DoFTools::make_hanging_node_constraints(dof_handler,
610 * hanging_node_constraints);
611 * hanging_node_constraints.close();
612 * }
613 *
614 *
615 * @endcode
616 *
617 * The remaining parts of the function are the same as in @ref step_6 "step-6".
618 *
619
620 *
621 *
622 * @code
623 * newton_update.reinit(dof_handler.n_dofs());
624 * system_rhs.reinit(dof_handler.n_dofs());
625 *
626 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
627 * DoFTools::make_sparsity_pattern(dof_handler, dsp);
628 *
629 * hanging_node_constraints.condense(dsp);
630 *
631 * sparsity_pattern.copy_from(dsp);
632 * system_matrix.reinit(sparsity_pattern);
633 * }
634 *
635 * @endcode
636 *
637 *
638 * <a name="MinimalSurfaceProblemassemble_system"></a>
639 * <h4>MinimalSurfaceProblem::assemble_system</h4>
640 *
641
642 *
643 * This function does the same as in the previous tutorials except that now,
644 * of course, the matrix and right hand side functions depend on the
645 * previous iteration's solution. As discussed in the introduction, we need
646 * to use zero boundary values for the Newton updates; we compute them at
647 * the end of this function.
648 *
649
650 *
651 * The top of the function contains the usual boilerplate code, setting up
652 * the objects that allow us to evaluate shape functions at quadrature
653 * points and temporary storage locations for the local matrices and
654 * vectors, as well as for the gradients of the previous solution at the
655 * quadrature points. We then start the loop over all cells:
656 *
657 * @code
658 * template <int dim>
659 * void MinimalSurfaceProblem<dim>::assemble_system()
660 * {
661 * const QGauss<dim> quadrature_formula(fe.degree + 1);
662 *
663 * system_matrix = 0;
664 * system_rhs = 0;
665 *
666 * FEValues<dim> fe_values(fe,
667 * quadrature_formula,
670 *
671 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
672 * const unsigned int n_q_points = quadrature_formula.size();
673 *
674 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
675 * Vector<double> cell_rhs(dofs_per_cell);
676 *
677 * std::vector<Tensor<1, dim>> old_solution_gradients(n_q_points);
678 *
679 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
680 *
681 * for (const auto &cell : dof_handler.active_cell_iterators())
682 * {
683 * cell_matrix = 0;
684 * cell_rhs = 0;
685 *
686 * fe_values.reinit(cell);
687 *
688 * @endcode
689 *
690 * For the assembly of the linear system, we have to obtain the values
691 * of the previous solution's gradients at the quadrature
692 * points. There is a standard way of doing this: the
693 * FEValues::get_function_gradients function takes a vector that
694 * represents a finite element field defined on a DoFHandler, and
695 * evaluates the gradients of this field at the quadrature points of the
696 * cell with which the FEValues object has last been reinitialized.
697 * The values of the gradients at all quadrature points are then written
698 * into the second argument:
699 *
700 * @code
701 * fe_values.get_function_gradients(current_solution,
702 * old_solution_gradients);
703 *
704 * @endcode
705 *
706 * With this, we can then do the integration loop over all quadrature
707 * points and shape functions. Having just computed the gradients of
708 * the old solution in the quadrature points, we are able to compute
709 * the coefficients @f$a_{n}@f$ in these points. The assembly of the
710 * system itself then looks similar to what we always do with the
711 * exception of the nonlinear terms, as does copying the results from
712 * the local objects into the global ones:
713 *
714 * @code
715 * for (unsigned int q = 0; q < n_q_points; ++q)
716 * {
717 * const double coeff =
718 * 1.0 / std::sqrt(1 + old_solution_gradients[q] *
719 * old_solution_gradients[q]);
720 *
721 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
722 * {
723 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
724 * cell_matrix(i, j) +=
725 * (((fe_values.shape_grad(i, q) // ((\nabla \phi_i
726 * * coeff // * a_n
727 * * fe_values.shape_grad(j, q)) // * \nabla \phi_j)
728 * - // -
729 * (fe_values.shape_grad(i, q) // (\nabla \phi_i
730 * * coeff * coeff * coeff // * a_n^3
731 * * (fe_values.shape_grad(j, q) // * (\nabla \phi_j
732 * * old_solution_gradients[q]) // * \nabla u_n)
733 * * old_solution_gradients[q])) // * \nabla u_n)))
734 * * fe_values.JxW(q)); // * dx
735 *
736 * cell_rhs(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
737 * * coeff // * a_n
738 * * old_solution_gradients[q] // * u_n
739 * * fe_values.JxW(q)); // * dx
740 * }
741 * }
742 *
743 * cell->get_dof_indices(local_dof_indices);
744 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
745 * {
746 * for (unsigned int j = 0; j < dofs_per_cell; ++j)
747 * system_matrix.add(local_dof_indices[i],
748 * local_dof_indices[j],
749 * cell_matrix(i, j));
750 *
751 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
752 * }
753 * }
754 *
755 * @endcode
756 *
757 * Finally, we remove hanging nodes from the system and apply zero
758 * boundary values to the linear system that defines the Newton updates
759 * @f$\delta u^n@f$:
760 *
761 * @code
762 * hanging_node_constraints.condense(system_matrix);
763 * hanging_node_constraints.condense(system_rhs);
764 *
765 * std::map<types::global_dof_index, double> boundary_values;
766 * VectorTools::interpolate_boundary_values(dof_handler,
767 * 0,
768 * Functions::ZeroFunction<dim>(),
769 * boundary_values);
770 * MatrixTools::apply_boundary_values(boundary_values,
771 * system_matrix,
772 * newton_update,
773 * system_rhs);
774 * }
775 *
776 *
777 *
778 * @endcode
779 *
780 *
781 * <a name="MinimalSurfaceProblemsolve"></a>
782 * <h4>MinimalSurfaceProblem::solve</h4>
783 *
784
785 *
786 * The solve function is the same as always. At the end of the solution
787 * process we update the current solution by setting
788 * @f$u^{n+1}=u^n+\alpha^n\;\delta u^n@f$.
789 *
790 * @code
791 * template <int dim>
792 * void MinimalSurfaceProblem<dim>::solve()
793 * {
794 * SolverControl solver_control(system_rhs.size(),
795 * system_rhs.l2_norm() * 1e-6);
796 * SolverCG<Vector<double>> solver(solver_control);
797 *
798 * PreconditionSSOR<SparseMatrix<double>> preconditioner;
799 * preconditioner.initialize(system_matrix, 1.2);
800 *
801 * solver.solve(system_matrix, newton_update, system_rhs, preconditioner);
802 *
803 * hanging_node_constraints.distribute(newton_update);
804 *
805 * const double alpha = determine_step_length();
806 * current_solution.add(alpha, newton_update);
807 * }
808 *
809 *
810 * @endcode
811 *
812 *
813 * <a name="MinimalSurfaceProblemrefine_mesh"></a>
814 * <h4>MinimalSurfaceProblem::refine_mesh</h4>
815 *
816
817 *
818 * The first part of this function is the same as in @ref step_6 "step-6"... However,
819 * after refining the mesh we have to transfer the old solution to the new
820 * one which we do with the help of the SolutionTransfer class. The process
821 * is slightly convoluted, so let us describe it in detail:
822 *
823 * @code
824 * template <int dim>
825 * void MinimalSurfaceProblem<dim>::refine_mesh()
826 * {
827 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
828 *
829 * KellyErrorEstimator<dim>::estimate(
830 * dof_handler,
831 * QGauss<dim - 1>(fe.degree + 1),
832 * std::map<types::boundary_id, const Function<dim> *>(),
833 * current_solution,
834 * estimated_error_per_cell);
835 *
836 * GridRefinement::refine_and_coarsen_fixed_number(triangulation,
837 * estimated_error_per_cell,
838 * 0.3,
839 * 0.03);
840 *
841 * @endcode
842 *
843 * Then we need an additional step: if, for example, you flag a cell that
844 * is once more refined than its neighbor, and that neighbor is not
845 * flagged for refinement, we would end up with a jump of two refinement
846 * levels across a cell interface. To avoid these situations, the library
847 * will silently also have to refine the neighbor cell once. It does so by
848 * calling the Triangulation::prepare_coarsening_and_refinement function
849 * before actually doing the refinement and coarsening. This function
850 * flags a set of additional cells for refinement or coarsening, to
851 * enforce rules like the one-hanging-node rule. The cells that are
852 * flagged for refinement and coarsening after calling this function are
853 * exactly the ones that will actually be refined or coarsened. Usually,
854 * you don't have to do this by hand
856 * you). However, we need to initialize the SolutionTransfer class and it
857 * needs to know the final set of cells that will be coarsened or refined
858 * in order to store the data from the old mesh and transfer to the new
859 * one. Thus, we call the function by hand:
860 *
861 * @code
862 * triangulation.prepare_coarsening_and_refinement();
863 *
864 * @endcode
865 *
866 * With this out of the way, we initialize a SolutionTransfer object with
867 * the present DoFHandler and attach the solution vector to it, followed
868 * by doing the actual refinement and distribution of degrees of freedom
869 * on the new mesh
870 *
871 * @code
872 * SolutionTransfer<dim> solution_transfer(dof_handler);
873 * solution_transfer.prepare_for_coarsening_and_refinement(current_solution);
874 *
875 * triangulation.execute_coarsening_and_refinement();
876 *
877 * dof_handler.distribute_dofs(fe);
878 *
879 * @endcode
880 *
881 * Finally, we retrieve the old solution interpolated to the new
882 * mesh. Since the SolutionTransfer function does not actually store the
883 * values of the old solution, but rather indices, we need to preserve the
884 * old solution vector until we have gotten the new interpolated
885 * values. Thus, we have the new values written into a temporary vector,
886 * and only afterwards write them into the solution vector object:
887 *
888 * @code
889 * Vector<double> tmp(dof_handler.n_dofs());
890 * solution_transfer.interpolate(current_solution, tmp);
891 * current_solution = tmp;
892 *
893 * @endcode
894 *
895 * On the new mesh, there are different hanging nodes, for which we have to
896 * compute constraints again, after throwing away previous content of the
897 * object. To be on the safe side, we should then also make sure that the
898 * current solution's vector entries satisfy the hanging node constraints
899 * (see the discussion in the documentation of the SolutionTransfer class
900 * for why this is necessary). We could do this by calling
901 * `hanging_node_constraints.distribute(current_solution)` explicitly; we
902 * omit this step because this will happen at the end of the call to
903 * `set_boundary_values()` below, and it is not necessary to do it twice.
904 *
905 * @code
906 * hanging_node_constraints.clear();
907 *
908 * DoFTools::make_hanging_node_constraints(dof_handler,
909 * hanging_node_constraints);
910 * hanging_node_constraints.close();
911 *
912 * @endcode
913 *
914 * Once we have the interpolated solution and all information about
915 * hanging nodes, we have to make sure that the @f$u^n@f$ we now have
916 * actually has the correct boundary values. As explained at the end of
917 * the introduction, this is not automatically the case even if the
918 * solution before refinement had the correct boundary values, and so we
919 * have to explicitly make sure that it now has:
920 *
921 * @code
922 * set_boundary_values();
923 *
924 * @endcode
925 *
926 * We end the function by updating all the remaining data structures,
927 * indicating to <code>setup_dofs()</code> that this is not the first
928 * go-around and that it needs to preserve the content of the solution
929 * vector:
930 *
931 * @code
932 * setup_system(false);
933 * }
934 *
935 *
936 *
937 * @endcode
938 *
939 *
940 * <a name="MinimalSurfaceProblemset_boundary_values"></a>
941 * <h4>MinimalSurfaceProblem::set_boundary_values</h4>
942 *
943
944 *
945 * The next function ensures that the solution vector's entries respect the
946 * boundary values for our problem. Having refined the mesh (or just
947 * started computations), there might be new nodal points on the
948 * boundary. These have values that are simply interpolated from the
949 * previous mesh in `refine_mesh()`, instead of the correct boundary
950 * values. This is fixed up by setting all boundary nodes of the current
951 * solution vector explicit to the right value.
952 *
953
954 *
955 * There is one issue we have to pay attention to, though: If we have
956 * a hanging node right next to a new boundary node, then its value
957 * must also be adjusted to make sure that the finite element field
958 * remains continuous. This is what the call in the last line of this
959 * function does.
960 *
961 * @code
962 * template <int dim>
963 * void MinimalSurfaceProblem<dim>::set_boundary_values()
964 * {
965 * std::map<types::global_dof_index, double> boundary_values;
967 * 0,
968 * BoundaryValues<dim>(),
969 * boundary_values);
970 * for (auto &boundary_value : boundary_values)
971 * current_solution(boundary_value.first) = boundary_value.second;
972 *
973 * hanging_node_constraints.distribute(current_solution);
974 * }
975 *
976 *
977 * @endcode
978 *
979 *
980 * <a name="MinimalSurfaceProblemcompute_residual"></a>
981 * <h4>MinimalSurfaceProblem::compute_residual</h4>
982 *
983
984 *
985 * In order to monitor convergence, we need a way to compute the norm of the
986 * (discrete) residual, i.e., the norm of the vector
987 * @f$\left<F(u^n),\varphi_i\right>@f$ with @f$F(u)=-\nabla \cdot \left(
988 * \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)@f$ as discussed in the
989 * introduction. It turns out that (although we don't use this feature in
990 * the current version of the program) one needs to compute the residual
991 * @f$\left<F(u^n+\alpha^n\;\delta u^n),\varphi_i\right>@f$ when determining
992 * optimal step lengths, and so this is what we implement here: the function
993 * takes the step length @f$\alpha^n@f$ as an argument. The original
994 * functionality is of course obtained by passing a zero as argument.
995 *
996
997 *
998 * In the function below, we first set up a vector for the residual, and
999 * then a vector for the evaluation point @f$u^n+\alpha^n\;\delta u^n@f$. This
1000 * is followed by the same boilerplate code we use for all integration
1001 * operations:
1002 *
1003 * @code
1004 * template <int dim>
1005 * double MinimalSurfaceProblem<dim>::compute_residual(const double alpha) const
1006 * {
1007 * Vector<double> residual(dof_handler.n_dofs());
1008 *
1009 * Vector<double> evaluation_point(dof_handler.n_dofs());
1010 * evaluation_point = current_solution;
1011 * evaluation_point.add(alpha, newton_update);
1012 *
1013 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1014 * FEValues<dim> fe_values(fe,
1015 * quadrature_formula,
1016 * update_gradients | update_quadrature_points |
1017 * update_JxW_values);
1018 *
1019 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1020 * const unsigned int n_q_points = quadrature_formula.size();
1021 *
1022 * Vector<double> cell_residual(dofs_per_cell);
1023 * std::vector<Tensor<1, dim>> gradients(n_q_points);
1024 *
1025 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1026 *
1027 * for (const auto &cell : dof_handler.active_cell_iterators())
1028 * {
1029 * cell_residual = 0;
1030 * fe_values.reinit(cell);
1031 *
1032 * @endcode
1033 *
1034 * The actual computation is much as in
1035 * <code>assemble_system()</code>. We first evaluate the gradients of
1036 * @f$u^n+\alpha^n\,\delta u^n@f$ at the quadrature points, then compute
1037 * the coefficient @f$a_n@f$, and then plug it all into the formula for
1038 * the residual:
1039 *
1040 * @code
1041 * fe_values.get_function_gradients(evaluation_point, gradients);
1042 *
1043 *
1044 * for (unsigned int q = 0; q < n_q_points; ++q)
1045 * {
1046 * const double coeff =
1047 * 1. / std::sqrt(1 + gradients[q] * gradients[q]);
1048 *
1049 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1050 * cell_residual(i) -= (fe_values.shape_grad(i, q) // \nabla \phi_i
1051 * * coeff // * a_n
1052 * * gradients[q] // * u_n
1053 * * fe_values.JxW(q)); // * dx
1054 * }
1055 *
1056 * cell->get_dof_indices(local_dof_indices);
1057 * for (unsigned int i = 0; i < dofs_per_cell; ++i)
1058 * residual(local_dof_indices[i]) += cell_residual(i);
1059 * }
1060 *
1061 * @endcode
1062 *
1063 * At the end of this function we also have to deal with the hanging node
1064 * constraints and with the issue of boundary values. With regard to the
1065 * latter, we have to set to zero the elements of the residual vector for
1066 * all entries that correspond to degrees of freedom that sit at the
1067 * boundary. The reason is that because the value of the solution there is
1068 * fixed, they are of course no "real" degrees of freedom and so, strictly
1069 * speaking, we shouldn't have assembled entries in the residual vector
1070 * for them. However, as we always do, we want to do exactly the same
1071 * thing on every cell and so we didn't not want to deal with the question
1072 * of whether a particular degree of freedom sits at the boundary in the
1073 * integration above. Rather, we will simply set to zero these entries
1074 * after the fact. To this end, we need to determine which degrees
1075 * of freedom do in fact belong to the boundary and then loop over all of
1076 * those and set the residual entry to zero. This happens in the following
1077 * lines which we have already seen used in @ref step_11 "step-11", using the appropriate
1078 * function from namespace DoFTools:
1079 *
1080 * @code
1081 * hanging_node_constraints.condense(residual);
1082 *
1083 * for (types::global_dof_index i :
1084 * DoFTools::extract_boundary_dofs(dof_handler))
1085 * residual(i) = 0;
1086 *
1087 * @endcode
1088 *
1089 * At the end of the function, we return the norm of the residual:
1090 *
1091 * @code
1092 * return residual.l2_norm();
1093 * }
1094 *
1095 *
1096 *
1097 * @endcode
1098 *
1099 *
1100 * <a name="MinimalSurfaceProblemdetermine_step_length"></a>
1101 * <h4>MinimalSurfaceProblem::determine_step_length</h4>
1102 *
1103
1104 *
1105 * As discussed in the introduction, Newton's method frequently does not
1106 * converge if we always take full steps, i.e., compute @f$u^{n+1}=u^n+\delta
1107 * u^n@f$. Rather, one needs a damping parameter (step length) @f$\alpha^n@f$ and
1108 * set @f$u^{n+1}=u^n+\alpha^n\delta u^n@f$. This function is the one called
1109 * to compute @f$\alpha^n@f$.
1110 *
1111
1112 *
1113 * Here, we simply always return 0.1. This is of course a sub-optimal
1114 * choice: ideally, what one wants is that the step size goes to one as we
1115 * get closer to the solution, so that we get to enjoy the rapid quadratic
1116 * convergence of Newton's method. We will discuss better strategies below
1117 * in the results section, and @ref step_77 "step-77" also covers this aspect.
1118 *
1119 * @code
1120 * template <int dim>
1121 * double MinimalSurfaceProblem<dim>::determine_step_length() const
1122 * {
1123 * return 0.1;
1124 * }
1125 *
1126 *
1127 *
1128 * @endcode
1129 *
1130 *
1131 * <a name="MinimalSurfaceProblemoutput_results"></a>
1132 * <h4>MinimalSurfaceProblem::output_results</h4>
1133 *
1134
1135 *
1136 * This last function to be called from `run()` outputs the current solution
1137 * (and the Newton update) in graphical form as a VTU file. It is entirely the
1138 * same as what has been used in previous tutorials.
1139 *
1140 * @code
1141 * template <int dim>
1142 * void MinimalSurfaceProblem<dim>::output_results(
1143 * const unsigned int refinement_cycle) const
1144 * {
1145 * DataOut<dim> data_out;
1146 *
1147 * data_out.attach_dof_handler(dof_handler);
1148 * data_out.add_data_vector(current_solution, "solution");
1149 * data_out.add_data_vector(newton_update, "update");
1150 * data_out.build_patches();
1151 *
1152 * const std::string filename =
1153 * "solution-" + Utilities::int_to_string(refinement_cycle, 2) + ".vtu";
1154 * std::ofstream output(filename);
1155 * data_out.write_vtu(output);
1156 * }
1157 *
1158 *
1159 * @endcode
1160 *
1161 *
1162 * <a name="MinimalSurfaceProblemrun"></a>
1163 * <h4>MinimalSurfaceProblem::run</h4>
1164 *
1165
1166 *
1167 * In the run function, we build the first grid and then have the top-level
1168 * logic for the Newton iteration.
1169 *
1170
1171 *
1172 * As described in the introduction, the domain is the unit disk around
1173 * the origin, created in the same way as shown in @ref step_6 "step-6". The mesh is
1174 * globally refined twice followed later on by several adaptive cycles.
1175 *
1176
1177 *
1178 * Before starting the Newton loop, we also need to do a bit of
1179 * setup work: We need to create the basic data structures and
1180 * ensure that the first Newton iterate already has the correct
1181 * boundary values, as discussed in the introduction.
1182 *
1183 * @code
1184 * template <int dim>
1185 * void MinimalSurfaceProblem<dim>::run()
1186 * {
1187 * GridGenerator::hyper_ball(triangulation);
1188 * triangulation.refine_global(2);
1189 *
1190 * setup_system(/*first time=*/true);
1191 * set_boundary_values();
1192 *
1193 * @endcode
1194 *
1195 * The Newton iteration starts next. We iterate until the (norm of the)
1196 * residual computed at the end of the previous iteration is less than
1197 * @f$10^{-3}@f$, as checked at the end of the `do { ... } while` loop that
1198 * starts here. Because we don't have a reasonable value to initialize
1199 * the variable, we just use the largest value that can be represented
1200 * as a `double`.
1201 *
1202 * @code
1203 * double last_residual_norm = std::numeric_limits<double>::max();
1204 * unsigned int refinement_cycle = 0;
1205 * do
1206 * {
1207 * std::cout << "Mesh refinement step " << refinement_cycle << std::endl;
1208 *
1209 * if (refinement_cycle != 0)
1210 * refine_mesh();
1211 *
1212 * @endcode
1213 *
1214 * On every mesh we do exactly five Newton steps. We print the initial
1215 * residual here and then start the iterations on this mesh.
1216 *
1217
1218 *
1219 * In every Newton step the system matrix and the right hand side have
1220 * to be computed first, after which we store the norm of the right
1221 * hand side as the residual to check against when deciding whether to
1222 * stop the iterations. We then solve the linear system (the function
1223 * also updates @f$u^{n+1}=u^n+\alpha^n\;\delta u^n@f$) and output the
1224 * norm of the residual at the end of this Newton step.
1225 *
1226
1227 *
1228 * After the end of this loop, we then also output the solution on the
1229 * current mesh in graphical form and increment the counter for the
1230 * mesh refinement cycle.
1231 *
1232 * @code
1233 * std::cout << " Initial residual: " << compute_residual(0) << std::endl;
1234 *
1235 * for (unsigned int inner_iteration = 0; inner_iteration < 5;
1236 * ++inner_iteration)
1237 * {
1238 * assemble_system();
1239 * last_residual_norm = system_rhs.l2_norm();
1240 *
1241 * solve();
1242 *
1243 * std::cout << " Residual: " << compute_residual(0) << std::endl;
1244 * }
1245 *
1246 * output_results(refinement_cycle);
1247 *
1248 * ++refinement_cycle;
1249 * std::cout << std::endl;
1250 * }
1251 * while (last_residual_norm > 1e-3);
1252 * }
1253 * } // namespace Step15
1254 *
1255 * @endcode
1256 *
1257 *
1258 * <a name="Themainfunction"></a>
1259 * <h4>The main function</h4>
1260 *
1261
1262 *
1263 * Finally the main function. This follows the scheme of all other main
1264 * functions:
1265 *
1266 * @code
1267 * int main()
1268 * {
1269 * try
1270 * {
1271 * using namespace Step15;
1272 *
1273 * MinimalSurfaceProblem<2> laplace_problem_2d;
1274 * laplace_problem_2d.run();
1275 * }
1276 * catch (std::exception &exc)
1277 * {
1278 * std::cerr << std::endl
1279 * << std::endl
1280 * << "----------------------------------------------------"
1281 * << std::endl;
1282 * std::cerr << "Exception on processing: " << std::endl
1283 * << exc.what() << std::endl
1284 * << "Aborting!" << std::endl
1285 * << "----------------------------------------------------"
1286 * << std::endl;
1287 *
1288 * return 1;
1289 * }
1290 * catch (...)
1291 * {
1292 * std::cerr << std::endl
1293 * << std::endl
1294 * << "----------------------------------------------------"
1295 * << std::endl;
1296 * std::cerr << "Unknown exception!" << std::endl
1297 * << "Aborting!" << std::endl
1298 * << "----------------------------------------------------"
1299 * << std::endl;
1300 * return 1;
1301 * }
1302 * return 0;
1303 * }
1304 * @endcode
1305<a name="Results"></a><h1>Results</h1>
1306
1307
1308
1309The output of the program looks as follows:
1310@code
1311Mesh refinement step 0
1312 Initial residual: 1.53143
1313 Residual: 1.08746
1314 Residual: 0.966748
1315 Residual: 0.859602
1316 Residual: 0.766462
1317 Residual: 0.685475
1318
1319Mesh refinement step 1
1320 Initial residual: 0.868959
1321 Residual: 0.762125
1322 Residual: 0.677792
1323 Residual: 0.605762
1324 Residual: 0.542748
1325 Residual: 0.48704
1326
1327Mesh refinement step 2
1328 Initial residual: 0.426445
1329 Residual: 0.382731
1330 Residual: 0.343865
1331 Residual: 0.30918
1332 Residual: 0.278147
1333 Residual: 0.250327
1334
1335Mesh refinement step 3
1336 Initial residual: 0.282026
1337 Residual: 0.253146
1338 Residual: 0.227414
1339 Residual: 0.20441
1340 Residual: 0.183803
1341 Residual: 0.165319
1342
1343Mesh refinement step 4
1344 Initial residual: 0.154404
1345 Residual: 0.138723
1346 Residual: 0.124694
1347 Residual: 0.112124
1348 Residual: 0.100847
1349 Residual: 0.0907222
1350
1351....
1352@endcode
1353
1354Obviously, the scheme converges, if not very fast. We will come back to
1355strategies for accelerating the method below.
1356
1357One can visualize the solution after each set of five Newton
1358iterations, i.e., on each of the meshes on which we approximate the
1359solution. This yields the following set of images:
1360
1361<div class="twocolumn" style="width: 80%">
1362 <div>
1363 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_1.png"
1364 alt="Solution after zero cycles with contour lines." width="230" height="273">
1365 </div>
1366 <div>
1367 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_2.png"
1368 alt="Solution after one cycle with contour lines." width="230" height="273">
1369 </div>
1370 <div>
1371 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_3.png"
1372 alt="Solution after two cycles with contour lines." width="230" height="273">
1373 </div>
1374 <div>
1375 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_4.png"
1376 alt="Solution after three cycles with contour lines." width="230" height="273">
1377 </div>
1378</div>
1379
1380It is clearly visible, that the solution minimizes the surface
1381after each refinement. The solution converges to a picture one
1382would imagine a soap bubble to be that is located inside a wire loop
1383that is bent like
1384the boundary. Also it is visible, how the boundary
1385is smoothed out after each refinement. On the coarse mesh,
1386the boundary doesn't look like a sine, whereas it does the
1387finer the mesh gets.
1388
1389The mesh is mostly refined near the boundary, where the solution
1390increases or decreases strongly, whereas it is coarsened on
1391the inside of the domain, where nothing interesting happens,
1392because there isn't much change in the solution. The ninth
1393solution and mesh are shown here:
1394
1395<div class="onecolumn" style="width: 60%">
1396 <div>
1397 <img src="https://www.dealii.org/images/steps/developer/step_15_solution_9.png"
1398 alt="Grid and solution of the ninth cycle with contour lines." width="507" height="507">
1399 </div>
1400</div>
1401
1402
1403
1404<a name="extensions"></a>
1405<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1406
1407
1408The program shows the basic structure of a solver for a nonlinear, stationary
1409problem. However, it does not converge particularly fast, for good reasons:
1410
1411- The program always takes a step size of 0.1. This precludes the rapid,
1412 quadratic convergence for which Newton's method is typically chosen.
1413- It does not connect the nonlinear iteration with the mesh refinement
1414 iteration.
1415
1416Obviously, a better program would have to address these two points.
1417We will discuss them in the following.
1418
1419
1420<a name="Steplengthcontrol"></a><h4> Step length control </h4>
1421
1422
1423Newton's method has two well known properties:
1424- It may not converge from arbitrarily chosen starting points. Rather, a
1425 starting point has to be close enough to the solution to guarantee
1426 convergence. However, we can enlarge the area from which Newton's method
1427 converges by damping the iteration using a <i>step length</i> 0<@f$\alpha^n\le
1428 1@f$.
1429- It exhibits rapid convergence of quadratic order if (i) the step length is
1430 chosen as @f$\alpha^n=1@f$, and (ii) it does in fact converge with this choice
1431 of step length.
1432
1433A consequence of these two observations is that a successful strategy is to
1434choose @f$\alpha^n<1@f$ for the initial iterations until the iterate has come
1435close enough to allow for convergence with full step length, at which point we
1436want to switch to @f$\alpha^n=1@f$. The question is how to choose @f$\alpha^n@f$ in an
1437automatic fashion that satisfies these criteria.
1438
1439We do not want to review the literature on this topic here, but only briefly
1440mention that there are two fundamental approaches to the problem: backtracking
1441line search and trust region methods. The former is more widely used for
1442partial differential equations and essentially does the following:
1443- Compute a search direction
1444- See if the resulting residual of @f$u^n + \alpha^n\;\delta u^n@f$ with
1445 @f$\alpha^n=1@f$ is "substantially smaller" than that of @f$u^n@f$ alone.
1446- If so, then take @f$\alpha^n=1@f$.
1447- If not, try whether the residual is "substantially smaller" with
1448 @f$\alpha^n=2/3@f$.
1449- If so, then take @f$\alpha^n=2/3@f$.
1450- If not, try whether the residual is "substantially smaller" with
1451 @f$\alpha^n=(2/3)^2@f$.
1452- Etc.
1453One can of course choose other factors @f$r, r^2, \ldots@f$ than the @f$2/3,
1454(2/3)^2, \ldots@f$ chosen above, for @f$0<r<1@f$. It is obvious where the term
1455"backtracking" comes from: we try a long step, but if that doesn't work we try
1456a shorter step, and ever shorter step, etc. The function
1457<code>determine_step_length()</code> is written the way it is to support
1458exactly this kind of use case.
1459
1460Whether we accept a particular step length @f$\alpha^n@f$ depends on how we define
1461"substantially smaller". There are a number of ways to do so, but without
1462going into detail let us just mention that the most common ones are to use the
1463Wolfe and Armijo-Goldstein conditions. For these, one can show the following:
1464- There is always a step length @f$\alpha^n@f$ for which the conditions are
1465 satisfied, i.e., the iteration never gets stuck as long as the problem is
1466 convex.
1467- If we are close enough to the solution, then the conditions allow for
1468 @f$\alpha^n=1@f$, thereby enabling quadratic convergence.
1469
1470We will not dwell on this here any further but leave the implementation of
1471such algorithms as an exercise. We note, however, that when implemented
1472correctly then it is a common observation that most reasonably nonlinear
1473problems can be solved in anywhere between 5 and 15 Newton iterations to
1474engineering accuracy &mdash; substantially fewer than we need with the current
1475version of the program.
1476
1477More details on globalization methods including backtracking can be found,
1478for example, in @cite GNS08 and @cite NW99.
1479
1480A separate point, very much worthwhile making, however, is that in practice
1481the implementation of efficient nonlinear solvers is about as complicated as
1482the implementation of efficient finite element methods. One should not
1483attempt to reinvent the wheel by implementing all of the necessary steps
1484oneself. Substantial pieces of the puzzle are already available in
1485the LineMinimization::line_search() function and could be used to this end.
1486But, instead, just like building finite element solvers on libraries
1487such as deal.II, one should be building nonlinear solvers on libraries such
1488as [SUNDIALS](https://computing.llnl.gov/projects/sundials). In fact,
1489deal.II has interfaces to SUNDIALS and in particular to its nonlinear solver
1490sub-package KINSOL through the SUNDIALS::KINSOL class. It would not be
1491very difficult to base the current problem on that interface --
1492indeed, that is what @ref step_77 "step-77" does.
1493
1494
1495
1496<a name="Integratingmeshrefinementandnonlinearandlinearsolvers"></a><h4> Integrating mesh refinement and nonlinear and linear solvers </h4>
1497
1498
1499We currently do exactly 5 iterations on each mesh. But is this optimal? One
1500could ask the following questions:
1501- Maybe it is worthwhile doing more iterations on the initial meshes since
1502 there, computations are cheap.
1503- On the other hand, we do not want to do too many iterations on every mesh:
1504 yes, we could drive the residual to zero on every mesh, but that would only
1505 mean that the nonlinear iteration error is far smaller than the
1506 discretization error.
1507- Should we use solve the linear systems in each Newton step with higher or
1508 lower accuracy?
1509
1510Ultimately, what this boils down to is that we somehow need to couple the
1511discretization error on the current mesh with the nonlinear residual we want
1512to achieve with the Newton iterations on a given mesh, and to the linear
1513iteration we want to achieve with the CG method within each Newton
1514iterations.
1515
1516How to do this is, again, not entirely trivial, and we again leave it as a
1517future exercise.
1518
1519
1520
1521<a name="UsingautomaticdifferentiationtocomputetheJacobianmatrix"></a><h4> Using automatic differentiation to compute the Jacobian matrix </h4>
1522
1523
1524As outlined in the introduction, when solving a nonlinear problem of
1525the form
1526 @f[
1527 F(u) \dealcoloneq
1528 -\nabla \cdot \left( \frac{1}{\sqrt{1+|\nabla u|^{2}}}\nabla u \right)
1529 = 0
1530 @f]
1531we use a Newton iteration that requires us to repeatedly solve the
1532linear partial differential equation
1533 @f{align*}
1534 F'(u^{n},\delta u^{n}) &=- F(u^{n})
1535 @f}
1536so that we can compute the update
1537 @f{align*}
1538 u^{n+1}&=u^{n}+\alpha^n \delta u^{n}
1539 @f}
1540with the solution @f$\delta u^{n}@f$ of the Newton step. For the problem
1541here, we could compute the derivative @f$F'(u,\delta u)@f$ by hand and
1542obtained
1543 @f[
1544 F'(u,\delta u)
1545 =
1546 - \nabla \cdot \left( \frac{1}{\left(1+|\nabla u|^{2}\right)^{\frac{1}{2}}}\nabla
1547 \delta u \right) +
1548 \nabla \cdot \left( \frac{\nabla u \cdot
1549 \nabla \delta u}{\left(1+|\nabla u|^{2}\right)^{\frac{3}{2}}} \nabla u
1550 \right).
1551 @f]
1552But this is already a sizable expression that is cumbersome both to
1553derive and to implement. It is also, in some sense, duplicative: If we
1554implement what @f$F(u)@f$ is somewhere in the code, then @f$F'(u,\delta u)@f$
1555is not an independent piece of information but is something that, at
1556least in principle, a computer should be able to infer itself.
1557Wouldn't it be nice if that could actually happen? That is, if we
1558really only had to implement @f$F(u)@f$, and @f$F'(u,\delta u)@f$ was then somehow
1559done implicitly? That is in fact possible, and runs under the name
1560"automatic differentiation". @ref step_71 "step-71" discusses this very
1561concept in general terms, and @ref step_72 "step-72" illustrates how this can be
1562applied in practice for the very problem we are considering here.
1563 *
1564 *
1565<a name="PlainProg"></a>
1566<h1> The plain program</h1>
1567@include "step-15.cc"
1568*/
virtual void execute_coarsening_and_refinement()
Definition: vector.h:110
VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
Point< 2 > first
Definition: grid_out.cc:4587
__global__ void set(Number *val, const Number s, const size_type N)
void loop(ITERATOR begin, typename identity< ITERATOR >::type end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
Definition: loop.h:439
std::vector< value_type > preserve(const typename ::Triangulation< dim, spacedim >::cell_iterator &parent, const value_type parent_value)
const Event initial
Definition: event.cc:65
void approximate(SynchronousIterators< std::tuple< typename DoFHandler< dim, spacedim >::active_cell_iterator, Vector< float >::iterator > > const &cell, const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof_handler, const InputVector &solution, const unsigned int component)
static const types::blas_int zero
@ matrix
Contents is actually a matrix.
@ general
No special properties.
static const types::blas_int one
std::pair< NumberType, unsigned int > line_search(const std::function< std::pair< NumberType, NumberType >(const NumberType x)> &func, const NumberType f0, const NumberType g0, const std::function< NumberType(const NumberType x_low, const NumberType f_low, const NumberType g_low, const NumberType x_hi, const NumberType f_hi, const NumberType g_hi, const FiniteSizeHistory< NumberType > &x_rec, const FiniteSizeHistory< NumberType > &f_rec, const FiniteSizeHistory< NumberType > &g_rec, const std::pair< NumberType, NumberType > bounds)> &interpolate, const NumberType a1, const NumberType eta=0.9, const NumberType mu=0.01, const NumberType a_max=std::numeric_limits< NumberType >::max(), const unsigned int max_evaluations=20, const bool debug_output=false)
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition: advection.h:75
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition: divergence.h:472
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
int(&) functions(const void *v1, const void *v2)
static constexpr double PI
Definition: numbers.h:231
::VectorizedArray< Number, width > sin(const ::VectorizedArray< Number, width > &)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation