Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-65.h
Go to the documentation of this file.
1
589 = 0) const override
590 *   {
591 *   if (p.norm_square() < 0.25)
592 *   return p.norm_square();
593 *   else
594 *   return 0.1 * p.norm_square() + (0.25 - 0.025);
595 *   }
596 *  
597 *   virtual Tensor<1, dim>
598 *   gradient(const Point<dim> &p,
599 *   const unsigned int /*component*/ = 0) const override
600 *   {
601 *   if (p.norm_square() < 0.25)
602 *   return 2. * p;
603 *   else
604 *   return 0.2 * p;
605 *   }
606 *   };
607 *  
608 *  
609 *   template <int dim>
610 *   double coefficient(const Point<dim> &p)
611 *   {
612 *   if (p.norm_square() < 0.25)
613 *   return 0.5;
614 *   else
615 *   return 5.0;
616 *   }
617 *  
618 *  
619 *  
620 * @endcode
621 *
622 *
623 * <a name="step_65-ThePoissonProblemclass"></a>
624 * <h3>The PoissonProblem class</h3>
625 *
626
627 *
628 * The implementation of the Poisson problem is very similar to what
629 * we used in the @ref step_5 "step-5" tutorial program. The two main differences
630 * are that we pass a mapping object to the various steps in the
631 * program in order to switch between two mapping representations as
632 * explained in the introduction, and the `timer` object (of type
633 * TimerOutput) that will be used for measuring the run times in the
634 * various cases. (The concept of mapping objects was first
635 * introduced in @ref step_10 "step-10" and @ref step_11 "step-11", in case you want to look up
636 * the use of these classes.)
637 *
638 * @code
639 *   template <int dim>
640 *   class PoissonProblem
641 *   {
642 *   public:
643 *   PoissonProblem();
644 *   void run();
645 *  
646 *   private:
647 *   void create_grid();
648 *   void setup_system(const Mapping<dim> &mapping);
649 *   void assemble_system(const Mapping<dim> &mapping);
650 *   void solve();
651 *   void postprocess(const Mapping<dim> &mapping);
652 *  
654 *   const FE_Q<dim> fe;
655 *   DoFHandler<dim> dof_handler;
656 *  
657 *   AffineConstraints<double> constraints;
658 *   SparsityPattern sparsity_pattern;
659 *   SparseMatrix<double> system_matrix;
660 *   Vector<double> solution;
661 *   Vector<double> system_rhs;
662 *  
663 *   TimerOutput timer;
664 *   };
665 *  
666 *  
667 *  
668 * @endcode
669 *
670 * In the constructor, we set up the timer object to record wall times but
671 * be quiet during the normal execution. We will query it for timing details
672 * in the `PoissonProblem::run()` function. Furthermore, we select a
673 * relatively high polynomial degree of three for the finite element in use.
674 *
675 * @code
676 *   template <int dim>
677 *   PoissonProblem<dim>::PoissonProblem()
678 *   : fe(3)
679 *   , dof_handler(triangulation)
680 *   , timer(std::cout, TimerOutput::never, TimerOutput::wall_times)
681 *   {}
682 *  
683 *  
684 *  
685 * @endcode
686 *
687 *
688 * <a name="step_65-Gridcreationandinitializationofthemanifolds"></a>
689 * <h3>Grid creation and initialization of the manifolds</h3>
690 *
691
692 *
693 * The next function presents the typical usage of
694 * TransfiniteInterpolationManifold. The first step is to create the desired
695 * grid, which can be done by composition of two grids from
696 * GridGenerator. The inner ball mesh is simple enough: We run
697 * GridGenerator::hyper_cube() centered at the origin with radius 0.5 (third
698 * function argument). The second mesh is more interesting and constructed
699 * as follows: We want to have a mesh that is spherical in the interior but
700 * flat on the outer surface. Furthermore, the mesh topology of the inner
701 * ball should be compatible with the outer grid in the sense that their
702 * vertices coincide so as to allow the two grid to be merged. The grid coming
703 * out of GridGenerator::hyper_shell fulfills the requirements on the inner
704 * side in case it is created with @f$2d@f$ coarse cells (6 coarse cells in 3d
705 * which we are going to use) &ndash; this is the same number of cells as
706 * there are boundary faces for the ball. For the outer surface, we use the
707 * fact that the 6 faces on the surface of the shell without a manifold
708 * attached would degenerate to the surface of a cube. What we are still
709 * missing is the radius of the outer shell boundary. Since we desire a cube
710 * of extent
711 * @f$[-1, 1]@f$ and the 6-cell shell puts its 8 outer vertices at the 8
712 * opposing diagonals, we must translate the points @f$(\pm 1, \pm 1, \pm 1)@f$
713 * into a radius: Clearly, the radius must be @f$\sqrt{d}@f$ in @f$d@f$ dimensions,
714 * i.e., @f$\sqrt{3}@f$ for the three-dimensional case we want to consider.
715 *
716
717 *
718 * Thus, we have a plan: After creating the inner triangulation for
719 * the ball and the one for the outer shell, we merge those two
720 * grids but remove all manifolds that the functions in
721 * GridGenerator may have set from the resulting triangulation, to
722 * ensure that we have full control over manifolds. In particular,
723 * we want additional points added on the boundary during refinement
724 * to follow a flat manifold description. To start the process of
725 * adding more appropriate manifold ids, we assign the manifold id 0
726 * to all mesh entities (cells, faces, lines), which will later be
727 * associated with the TransfiniteInterpolationManifold. Then, we
728 * must identify the faces and lines that are along the sphere of
729 * radius 0.5 and mark them with a different manifold id, so as to then
730 * assign a SphericalManifold to those. We will choose the manifold
731 * id of 1. Since we have thrown away all manifolds that pre-existed
732 * after calling GridGenerator::hyper_ball(), we manually go through
733 * the cells of the mesh and all their faces. We have found a face
734 * on the sphere if all four vertices have a radius of 0.5, or, as
735 * we write in the program, have @f$r^2-0.25 \approx 0@f$. Note that we call
736 * `cell->face(f)->set_all_manifold_ids(1)` to set the manifold id
737 * both on the faces and the surrounding lines. Furthermore, we want
738 * to distinguish the cells inside the ball and outside the ball by
739 * a material id for visualization, corresponding to the picture in the
740 * introduction.
741 *
742 * @code
743 *   template <int dim>
744 *   void PoissonProblem<dim>::create_grid()
745 *   {
746 *   Triangulation<dim> tria_inner;
747 *   GridGenerator::hyper_ball(tria_inner, Point<dim>(), 0.5);
748 *  
749 *   Triangulation<dim> tria_outer;
751 *   tria_outer, Point<dim>(), 0.5, std::sqrt(dim), 2 * dim);
752 *  
753 *   GridGenerator::merge_triangulations(tria_inner, tria_outer, triangulation);
754 *  
757 *  
758 *   for (const auto &cell : triangulation.cell_iterators())
759 *   {
760 *   for (const auto &face : cell->face_iterators())
761 *   {
762 *   bool face_at_sphere_boundary = true;
763 *   for (const auto v : face->vertex_indices())
764 *   {
765 *   if (std::abs(face->vertex(v).norm_square() - 0.25) > 1e-12)
766 *   face_at_sphere_boundary = false;
767 *   }
768 *   if (face_at_sphere_boundary)
769 *   face->set_all_manifold_ids(1);
770 *   }
771 *   if (cell->center().norm_square() < 0.25)
772 *   cell->set_material_id(1);
773 *   else
774 *   cell->set_material_id(0);
775 *   }
776 *  
777 * @endcode
778 *
779 * With all cells, faces and lines marked appropriately, we can
780 * attach the Manifold objects to those numbers. The entities with
781 * manifold id 1 will get a spherical manifold, whereas the other
782 * entities, which have the manifold id 0, will be assigned the
783 * TransfiniteInterpolationManifold. As mentioned in the
784 * introduction, we must explicitly initialize the manifold with
785 * the current mesh using a call to
787 * up the coarse mesh cells and the manifolds attached to the
788 * boundaries of those cells. We also note that the manifold
789 * objects we create locally in this function are allowed to go
790 * out of scope (as they do at the end of the function scope),
791 * because the Triangulation object internally copies them.
792 *
793
794 *
795 * With all manifolds attached, we will finally go about and refine the
796 * mesh a few times to create a sufficiently large test case.
797 *
798 * @code
799 *   triangulation.set_manifold(1, SphericalManifold<dim>());
800 *  
801 *   TransfiniteInterpolationManifold<dim> transfinite_manifold;
802 *   transfinite_manifold.initialize(triangulation);
803 *   triangulation.set_manifold(0, transfinite_manifold);
804 *  
805 *   triangulation.refine_global(9 - 2 * dim);
806 *   }
807 *  
808 *  
809 *  
810 * @endcode
811 *
812 *
813 * <a name="step_65-Setupofdatastructures"></a>
814 * <h3>Setup of data structures</h3>
815 *
816
817 *
818 * The following function is well-known from other tutorials in that
819 * it enumerates the degrees of freedom, creates a constraint object
820 * and sets up a sparse matrix for the linear system. The only thing
821 * worth mentioning is the fact that the function receives a
822 * reference to a mapping object that we then pass to the
823 * VectorTools::interpolate_boundary_values() function to ensure
824 * that our boundary values are evaluated on the high-order mesh
825 * used for assembly. In the present example, it does not really
826 * matter because the outer surfaces are flat, but for curved outer
827 * cells this leads to more accurate approximation of the boundary
828 * values.
829 *
830 * @code
831 *   template <int dim>
832 *   void PoissonProblem<dim>::setup_system(const Mapping<dim> &mapping)
833 *   {
834 *   dof_handler.distribute_dofs(fe);
835 *   std::cout << " Number of active cells: "
836 *   << triangulation.n_global_active_cells() << std::endl;
837 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
838 *   << std::endl;
839 *  
840 *   {
841 *   TimerOutput::Scope scope(timer, "Compute constraints");
842 *  
843 *   constraints.clear();
844 *  
845 *   DoFTools::make_hanging_node_constraints(dof_handler, constraints);
847 *   mapping, dof_handler, 0, ExactSolution<dim>(), constraints);
848 *  
849 *   constraints.close();
850 *   }
851 *  
852 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
853 *   DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, false);
854 *  
855 *   sparsity_pattern.copy_from(dsp);
856 *   system_matrix.reinit(sparsity_pattern);
857 *  
858 *   solution.reinit(dof_handler.n_dofs());
859 *   system_rhs.reinit(dof_handler.n_dofs());
860 *   }
861 *  
862 *  
863 * @endcode
864 *
865 *
866 * <a name="step_65-Assemblyofthesystemmatrixandrighthandside"></a>
867 * <h3>Assembly of the system matrix and right hand side</h3>
868 *
869
870 *
871 * The function that assembles the linear system is also well known
872 * from the previous tutorial programs. One thing to note is that we
873 * set the number of quadrature points to the polynomial degree plus
874 * two, not the degree plus one as in most other tutorials. This is
875 * because we expect some extra accuracy as the mapping also
876 * involves a degree one more than the polynomials for the solution.
877 *
878
879 *
880 * The only somewhat unusual code in the assembly is the way we compute the
881 * cell matrix. Rather than using three nested loop over the quadrature
882 * point index, the row, and the column of the matrix, we first collect the
883 * derivatives of the shape function, multiplied by the square root of the
884 * product of the coefficient and the integration factor `JxW` in a separate
885 * matrix `partial_matrix`. To compute the cell matrix, we then execute
886 * `cell_matrix = partial_matrix * transpose(partial_matrix)` in the line
887 * `partial_matrix.mTmult(cell_matrix, partial_matrix);`. To understand why
888 * this works, we realize that the matrix-matrix multiplication performs a
889 * summation over the columns of `partial_matrix`. If we denote the
890 * coefficient by @f$a(\mathbf{x}_q)@f$, the entries in the temporary matrix are
891 * @f$\sqrt{\text{det}(J) w_q a(x)} \frac{\partial \varphi_i(\boldsymbol
892 * \xi_q)}{\partial x_k}@f$. If we take the product of the <i>i</i>th row with
893 * the <i>j</i>th column of that matrix, we compute a nested sum involving
894 * @f$\sum_q \sum_{k=1}^d \sqrt{\text{det}(J) w_q a(x)} \frac{\partial
895 * \varphi_i(\boldsymbol \xi_q)}{\partial x_k} \sqrt{\text{det}(J) w_q a(x)}
896 * \frac{\partial \varphi_j(\boldsymbol \xi_q)}{\partial x_k} = \sum_q
897 * \sum_{k=1}^d\text{det}(J) w_q a(x)\frac{\partial \varphi_i(\boldsymbol
898 * \xi_q)}{\partial x_k} \frac{\partial \varphi_j(\boldsymbol
899 * \xi_q)}{\partial x_k}@f$, which is exactly the terms needed for the
900 * bilinear form of the Laplace equation.
901 *
902
903 *
904 * The reason for choosing this somewhat unusual scheme is due to the heavy
905 * work involved in computing the cell matrix for a relatively high
906 * polynomial degree in 3d. As we want to highlight the cost of the mapping
907 * in this tutorial program, we better do the assembly in an optimized way
908 * in order to not chase bottlenecks that have been solved by the community
909 * already. Matrix-matrix multiplication is one of the best optimized
910 * kernels in the HPC context, and the FullMatrix::mTmult() function will
911 * call into those optimized BLAS functions. If the user has provided a good
912 * BLAS library when configuring deal.II (like OpenBLAS or Intel's MKL), the
913 * computation of the cell matrix will execute close to the processor's peak
914 * arithmetic performance. As a side note, we mention that despite an
915 * optimized matrix-matrix multiplication, the current strategy is
916 * sub-optimal in terms of complexity as the work to be done is proportional
917 * to @f$(p+1)^9@f$ operations for degree @f$p@f$ (this also applies to the usual
918 * evaluation with FEValues). One could compute the cell matrix with
919 * @f$\mathcal O((p+1)^7)@f$ operations by utilizing the tensor product
920 * structure of the shape functions, as is done by the matrix-free framework
921 * in deal.II. We refer to @ref step_37 "step-37" and the documentation of the
922 * tensor-product-aware evaluators FEEvaluation for details on how an even
923 * more efficient cell matrix computation could be realized.
924 *
925 * @code
926 *   template <int dim>
927 *   void PoissonProblem<dim>::assemble_system(const Mapping<dim> &mapping)
928 *   {
929 *   TimerOutput::Scope scope(timer, "Assemble linear system");
930 *  
931 *   const QGauss<dim> quadrature_formula(fe.degree + 2);
932 *   FEValues<dim> fe_values(mapping,
933 *   fe,
934 *   quadrature_formula,
937 *  
938 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
939 *   const unsigned int n_q_points = quadrature_formula.size();
940 *  
941 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
942 *   Vector<double> cell_rhs(dofs_per_cell);
943 *   FullMatrix<double> partial_matrix(dofs_per_cell, dim * n_q_points);
944 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
945 *  
946 *   for (const auto &cell : dof_handler.active_cell_iterators())
947 *   {
948 *   cell_rhs = 0.;
949 *   fe_values.reinit(cell);
950 *  
951 *   for (unsigned int q_index = 0; q_index < n_q_points; ++q_index)
952 *   {
953 *   const double current_coefficient =
954 *   coefficient(fe_values.quadrature_point(q_index));
955 *   for (unsigned int i = 0; i < dofs_per_cell; ++i)
956 *   {
957 *   for (unsigned int d = 0; d < dim; ++d)
958 *   partial_matrix(i, q_index * dim + d) =
959 *   std::sqrt(fe_values.JxW(q_index) * current_coefficient) *
960 *   fe_values.shape_grad(i, q_index)[d];
961 *   cell_rhs(i) +=
962 *   (fe_values.shape_value(i, q_index) * // phi_i(x_q)
963 *   (-dim) * // f(x_q)
964 *   fe_values.JxW(q_index)); // dx
965 *   }
966 *   }
967 *  
968 *   partial_matrix.mTmult(cell_matrix, partial_matrix);
969 *  
970 *   cell->get_dof_indices(local_dof_indices);
971 *   constraints.distribute_local_to_global(
972 *   cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs);
973 *   }
974 *   }
975 *  
976 *  
977 *  
978 * @endcode
979 *
980 *
981 * <a name="step_65-Solutionofthelinearsystem"></a>
982 * <h3>Solution of the linear system</h3>
983 *
984
985 *
986 * For solving the linear system, we pick a simple Jacobi-preconditioned
987 * conjugate gradient solver, similar to the settings in the early tutorials.
988 *
989 * @code
990 *   template <int dim>
991 *   void PoissonProblem<dim>::solve()
992 *   {
993 *   TimerOutput::Scope scope(timer, "Solve linear system");
994 *  
995 *   SolverControl solver_control(1000, 1e-12);
996 *   SolverCG<Vector<double>> solver(solver_control);
997 *  
998 *   PreconditionJacobi<SparseMatrix<double>> preconditioner;
999 *   preconditioner.initialize(system_matrix);
1000 *  
1001 *   solver.solve(system_matrix, solution, system_rhs, preconditioner);
1002 *   constraints.distribute(solution);
1003 *  
1004 *   std::cout << " Number of solver iterations: "
1005 *   << solver_control.last_step() << std::endl;
1006 *   }
1007 *  
1008 *  
1009 *  
1010 * @endcode
1011 *
1012 *
1013 * <a name="step_65-Outputofthesolutionandcomputationoferrors"></a>
1014 * <h3>Output of the solution and computation of errors</h3>
1015 *
1016
1017 *
1018 * In the next function we do various post-processing steps with the
1019 * solution, all of which involve the mapping in one way or the other.
1020 *
1021
1022 *
1023 * The first operation we do is to write the solution as well as the
1024 * material ids to a VTU file. This is similar to what was done in many
1025 * other tutorial programs. The new ingredient presented in this tutorial
1026 * program is that we want to ensure that the data written to the file
1027 * used for visualization is actually a faithful representation of what
1028 * is used internally by deal.II. That is because most of the visualization
1029 * data formats only represent cells by their vertex coordinates, but
1030 * have no way of representing the curved boundaries that are used
1031 * in deal.II when using higher order mappings -- in other words, what
1032 * you see in the visualization tool is not actually what you are computing
1033 * on. (The same, incidentally, is true when using higher order shape
1034 * functions: Most visualization tools only render bilinear/trilinear
1035 * representations. This is discussed in detail in DataOut::build_patches().)
1036 *
1037
1038 *
1039 * So we need to ensure that a high-order representation is written
1040 * to the file. We need to consider two particular topics. Firstly, we tell
1041 * the DataOut object via the DataOutBase::VtkFlags that we intend to
1042 * interpret the subdivisions of the elements as a high-order Lagrange
1043 * polynomial rather than a collection of bilinear patches.
1044 * Recent visualization programs, like ParaView version 5.5
1045 * or newer, can then render a high-order solution (see a <a
1046 * href="https://github.com/dealii/dealii/wiki/Notes-on-visualizing-high-order-output">wiki
1047 * page</a> for more details).
1048 * Secondly, we need to make sure that the mapping is passed to the
1049 * DataOut::build_patches() method. Finally, the DataOut class only prints
1050 * curved faces for <i>boundary</i> cells by default, so we need to ensure
1051 * that also inner cells are printed in a curved representation via the
1052 * mapping.
1053 *
1054
1055 *
1056 * @note As of 2023, Visit 3.3.3 can still not deal with higher-order cells.
1057 * Rather, it simply reports that there is no data to show. To view the
1058 * results of this program with Visit, you will want to comment out the
1059 * line that sets `flags.write_higher_order_cells = true;`. On the other
1060 * hand, Paraview is able to understand VTU files with higher order cells
1061 * just fine.
1062 *
1063 * @code
1064 *   template <int dim>
1065 *   void PoissonProblem<dim>::postprocess(const Mapping<dim> &mapping)
1066 *   {
1067 *   {
1068 *   TimerOutput::Scope scope(timer, "Write output");
1069 *  
1070 *   DataOut<dim> data_out;
1071 *  
1072 *   DataOutBase::VtkFlags flags;
1073 *   flags.write_higher_order_cells = true;
1074 *   data_out.set_flags(flags);
1075 *  
1076 *   data_out.attach_dof_handler(dof_handler);
1077 *   data_out.add_data_vector(solution, "solution");
1078 *  
1079 *   Vector<double> material_ids(triangulation.n_active_cells());
1080 *   for (const auto &cell : triangulation.active_cell_iterators())
1081 *   material_ids[cell->active_cell_index()] = cell->material_id();
1082 *   data_out.add_data_vector(material_ids, "material_ids");
1083 *  
1084 *   data_out.build_patches(mapping,
1085 *   fe.degree,
1087 *  
1088 *   std::ofstream file(
1089 *   ("solution-" +
1090 *   std::to_string(triangulation.n_global_levels() - 10 + 2 * dim) +
1091 *   ".vtu")
1092 *   .c_str());
1093 *  
1094 *   data_out.write_vtu(file);
1095 *   }
1096 *  
1097 * @endcode
1098 *
1099 * The next operation in the postprocessing function is to compute the @f$L_2@f$
1100 * and @f$H^1@f$ errors against the analytical solution. As the analytical
1101 * solution is a quadratic polynomial, we expect a very accurate result at
1102 * this point. If we were solving on a simple mesh with planar faces and a
1103 * coefficient whose jumps are aligned with the faces between cells, then
1104 * we would expect the numerical result to coincide with the
1105 * analytical solution up to roundoff accuracy. However, since we are using
1106 * deformed cells following a sphere, which are only tracked by
1107 * polynomials of degree 4 (one more than the degree for the finite
1108 * elements), we will see that there is an error around @f$10^{-7}@f$. We could
1109 * get more accuracy by increasing the polynomial degree or refining the
1110 * mesh.
1111 *
1112 * @code
1113 *   {
1114 *   TimerOutput::Scope scope(timer, "Compute error norms");
1115 *  
1116 *   Vector<double> norm_per_cell_p(triangulation.n_active_cells());
1117 *  
1119 *   dof_handler,
1120 *   solution,
1121 *   ExactSolution<dim>(),
1122 *   norm_per_cell_p,
1123 *   QGauss<dim>(fe.degree + 2),
1125 *   std::cout << " L2 error vs exact solution: "
1126 *   << norm_per_cell_p.l2_norm() << std::endl;
1127 *  
1129 *   dof_handler,
1130 *   solution,
1131 *   ExactSolution<dim>(),
1132 *   norm_per_cell_p,
1133 *   QGauss<dim>(fe.degree + 2),
1135 *   std::cout << " H1 error vs exact solution: "
1136 *   << norm_per_cell_p.l2_norm() << std::endl;
1137 *   }
1138 *  
1139 * @endcode
1140 *
1141 * The final post-processing operation we do here is to compute an error
1142 * estimate with the KellyErrorEstimator. We use the exact same settings
1143 * as in the @ref step_6 "step-6" tutorial program, except for the fact that we also
1144 * hand in the mapping to ensure that errors are evaluated along the
1145 * curved element, consistent with the remainder of the program. However,
1146 * we do not really use the result here to drive a mesh adaptation step
1147 * (that would refine the mesh around the material interface along the
1148 * sphere), as the focus here is on the cost of this operation.
1149 *
1150 * @code
1151 *   {
1152 *   TimerOutput::Scope scope(timer, "Compute error estimator");
1153 *  
1154 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1156 *   mapping,
1157 *   dof_handler,
1158 *   QGauss<dim - 1>(fe.degree + 1),
1159 *   std::map<types::boundary_id, const Function<dim> *>(),
1160 *   solution,
1161 *   estimated_error_per_cell);
1162 *   std::cout << " Max cell-wise error estimate: "
1163 *   << estimated_error_per_cell.linfty_norm() << std::endl;
1164 *   }
1165 *   }
1166 *  
1167 *  
1168 *  
1169 * @endcode
1170 *
1171 *
1172 * <a name="step_65-ThePoissonProblemrunfunction"></a>
1173 * <h3>The PoissonProblem::run() function</h3>
1174 *
1175
1176 *
1177 * Finally, we define the `run()` function that controls how we want to
1178 * execute this program (which is called by the main() function in the usual
1179 * way). We start by calling the `create_grid()` function that sets up our
1180 * geometry with the appropriate manifolds. We then run two instances of a
1181 * solver chain, starting from the setup of the equations, the assembly of
1182 * the linear system, its solution with a simple iterative solver, and the
1183 * postprocessing discussed above. The two instances differ in the way they
1184 * use the mapping. The first uses a conventional MappingQ mapping
1185 * object which we initialize to a degree one more than we use for the
1186 * finite element &ndash; after all, we expect the geometry representation
1187 * to be the bottleneck as the analytic solution is only a quadratic
1188 * polynomial. (In reality, things are interlinked to quite some extent
1189 * because the evaluation of the polynomials in real coordinates involves
1190 * the mapping of a higher-degree polynomials, which represent some smooth
1191 * rational functions. As a consequence, higher-degree polynomials still pay
1192 * off, so it does not make sense to increase the degree of the mapping
1193 * further.) Once the first pass is completed, we let the timer print a
1194 * summary of the compute times of the individual stages.
1195 *
1196 * @code
1197 *   template <int dim>
1198 *   void PoissonProblem<dim>::run()
1199 *   {
1200 *   create_grid();
1201 *  
1202 *   {
1203 *   std::cout << std::endl
1204 *   << "====== Running with the basic MappingQ class ====== "
1205 *   << std::endl
1206 *   << std::endl;
1207 *  
1208 *   MappingQ<dim> mapping(fe.degree + 1);
1209 *   setup_system(mapping);
1210 *   assemble_system(mapping);
1211 *   solve();
1212 *   postprocess(mapping);
1213 *  
1214 *   timer.print_summary();
1215 *   timer.reset();
1216 *   }
1217 *  
1218 * @endcode
1219 *
1220 * For the second instance, we instead set up the MappingQCache class. Its
1221 * use is very simple: After constructing it (with the degree, given that
1222 * we want it to show the correct degree functionality in other contexts),
1223 * we fill the cache via the MappingQCache::initialize() function. At this
1224 * stage, we specify which mapping we want to use (obviously, the same
1225 * MappingQ as previously in order to repeat the same computations)
1226 * for the cache, and then run through the same functions again, now
1227 * handing in the modified mapping. In the end, we again print the
1228 * accumulated wall times since the reset to see how the times compare to
1229 * the original setting.
1230 *
1231 * @code
1232 *   {
1233 *   std::cout
1234 *   << "====== Running with the optimized MappingQCache class ====== "
1235 *   << std::endl
1236 *   << std::endl;
1237 *  
1238 *   MappingQCache<dim> mapping(fe.degree + 1);
1239 *   {
1240 *   TimerOutput::Scope scope(timer, "Initialize mapping cache");
1241 *   mapping.initialize(MappingQ<dim>(fe.degree + 1), triangulation);
1242 *   }
1243 *   std::cout << " Memory consumption cache: "
1244 *   << 1e-6 * mapping.memory_consumption() << " MB" << std::endl;
1245 *  
1246 *   setup_system(mapping);
1247 *   assemble_system(mapping);
1248 *   solve();
1249 *   postprocess(mapping);
1250 *  
1251 *   timer.print_summary();
1252 *   }
1253 *   }
1254 *   } // namespace Step65
1255 *  
1256 *  
1257 *  
1258 *   int main()
1259 *   {
1260 *   Step65::PoissonProblem<3> test_program;
1261 *   test_program.run();
1262 *   return 0;
1263 *   }
1264 * @endcode
1265
1266<a name="step_65-Results"></a><h1>Results</h1>
1267
1268
1269<a name="step_65-Programoutput"></a><h3>Program output</h3>
1270
1271
1272If we run the three-dimensional version of this program with polynomials of
1273degree three, we get the following program output:
1274
1275@code
1276> make run
1277Scanning dependencies of target step-65
1278[ 33%] Building CXX object CMakeFiles/step-65.dir/step-65.cc.o
1279[ 66%] Linking CXX executable step-65
1280[ 66%] Built target step-65
1281[100%] Run step-65 with Release configuration
1282
1283====== Running with the basic MappingQ class ======
1284
1285 Number of active cells: 6656
1286 Number of degrees of freedom: 181609
1287 Number of solver iterations: 285
1288 L2 error vs exact solution: 8.99339e-08
1289 H1 error vs exact solution: 6.45341e-06
1290 Max cell-wise error estimate: 0.00743406
1291
1292
1293+---------------------------------------------+------------+------------+
1294| Total wallclock time elapsed since start | 49.4s | |
1295| | | |
1296| Section | no. calls | wall time | % of total |
1297+---------------------------------+-----------+------------+------------+
1298| Assemble linear system | 1 | 5.8s | 12% |
1299| Compute constraints | 1 | 0.109s | 0.22% |
1300| Compute error estimator | 1 | 16.5s | 33% |
1301| Compute error norms | 1 | 9.11s | 18% |
1302| Solve linear system | 1 | 9.92s | 20% |
1303| Write output | 1 | 4.85s | 9.8% |
1304+---------------------------------+-----------+------------+------------+
1305
1306====== Running with the optimized MappingQCache class ======
1307
1308 Memory consumption cache: 22.9981 MB
1309 Number of active cells: 6656
1310 Number of degrees of freedom: 181609
1311 Number of solver iterations: 285
1312 L2 error vs exact solution: 8.99339e-08
1313 H1 error vs exact solution: 6.45341e-06
1314 Max cell-wise error estimate: 0.00743406
1315
1316
1317+---------------------------------------------+------------+------------+
1318| Total wallclock time elapsed since start | 18.4s | |
1319| | | |
1320| Section | no. calls | wall time | % of total |
1321+---------------------------------+-----------+------------+------------+
1322| Assemble linear system | 1 | 1.44s | 7.8% |
1323| Compute constraints | 1 | 0.00336s | 0% |
1324| Compute error estimator | 1 | 0.476s | 2.6% |
1325| Compute error norms | 1 | 0.505s | 2.7% |
1326| Initialize mapping cache | 1 | 4.96s | 27% |
1327| Solve linear system | 1 | 9.95s | 54% |
1328| Write output | 1 | 0.875s | 4.8% |
1329+---------------------------------+-----------+------------+------------+
1330
1331[100%] Built target run
1332@endcode
1333
1334Before discussing the timings, we look at the memory consumption for the
1335MappingQCache object: Our program prints that it utilizes 23 MB of
1336memory. If we relate this number to the memory consumption of a single
1337(solution or right hand side) vector,
1338which is 1.5 MB (namely, 181,609 elements times 8 bytes per entry in
1339double precision), or to the memory consumed by the
1340system matrix and the sparsity pattern (which is 274 MB), we realize that it is
1341not an overly heavy data structure, given its benefits.
1342
1343With respect to the timers, we see a clear improvement in the overall run time
1344of the program by a factor of 2.7. If we disregard the iterative solver, which
1345is the same in both cases (and not optimal, given the simple preconditioner we
1346use, and the fact that sparse matrix-vector products waste operations for
1347cubic polynomials), the advantage is a factor of almost 5. This is pretty
1348impressive for a linear stationary problem, and cost savings would indeed be
1349much more prominent for time-dependent and nonlinear problems where assembly
1350is called several times. If we look into the individual components, we get a
1351clearer picture of what is going on and why the cache is so efficient: In the
1352MappingQ case, essentially every operation that involves a mapping take
1353at least 5 seconds to run. The norm computation runs two
1354VectorTools::integrate_difference() functions, which each take almost 5
1355seconds. (The computation of constraints is cheaper because it only evaluates
1356the mapping in cells at the boundary for the interpolation of boundary
1357conditions.) If we compare these 5 seconds to the time it takes to fill the
1358MappingQCache, which is 5.2 seconds (for all cells, not just the active ones),
1359it becomes obvious that the computation of the mapping support points
1360dominates over everything else in the MappingQ case. Perhaps the most
1361striking result is the time for the error estimator, labeled "Compute error
1362estimator", where the MappingQ implementation takes 17.3 seconds and
1363the MappingQCache variant less than 0.5 seconds. The reason why the former is
1364so expensive (three times more expensive than the assembly, for instance) is
1365that the error estimation involves evaluation of quantities over faces, where
1366each face in the mesh requests additional points of the mapping that in turn
1367go through the very expensive TransfiniteInterpolationManifold class. As there
1368are six faces per cell, this happens much more often than in assembly. Again,
1369MappingQCache nicely eliminates the repeated evaluation, aggregating all the
1370expensive steps involving the manifold in a single initialization call that
1371gets repeatedly used.
1372 *
1373 *
1374<a name="step_65-PlainProg"></a>
1375<h1> The plain program</h1>
1376@include "step-65.cc"
1377*/
SmartPointer< const Triangulation< dim, spacedim > > triangulation
virtual void build_patches(const unsigned int n_subdivisions=0)
Definition data_out.cc:1062
Definition fe_q.h:554
void mTmult(FullMatrix< number2 > &C, const FullMatrix< number2 > &B, const bool adding=false) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const Mapping< dim, spacedim > &mapping, const Triangulation< dim, spacedim > &triangulation)
Abstract base class for mapping classes.
Definition mapping.h:318
Definition point.h:111
void initialize(const MatrixType &A, const AdditionalData &parameters=AdditionalData())
constexpr numbers::NumberTraits< Number >::real_type norm_square() const
@ wall_times
Definition timer.h:651
const Triangulation< dim, spacedim > * triangulation
void initialize(const Triangulation< dim, spacedim > &triangulation)
virtual types::global_cell_index n_global_active_cells() const
Point< 3 > vertices[4]
DerivativeForm< 1, spacedim, dim, Number > transpose(const DerivativeForm< 1, dim, spacedim, Number > &DF)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int vertex_indices[2]
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
@ update_gradients
Shape function gradients.
@ update_quadrature_points
Transformed quadrature points.
void reset_all_manifolds()
void set_all_manifold_ids(const types::manifold_id number)
void hyper_shell(Triangulation< dim, spacedim > &tria, const Point< spacedim > &center, const double inner_radius, const double outer_radius, const unsigned int n_cells=0, bool colorize=false)
void hyper_ball(Triangulation< dim > &tria, const Point< dim > &center=Point< dim >(), const double radius=1., const bool attach_spherical_manifold_on_boundary_cells=false)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void merge_triangulations(const Triangulation< dim, spacedim > &triangulation_1, const Triangulation< dim, spacedim > &triangulation_2, Triangulation< dim, spacedim > &result, const double duplicated_vertex_tolerance=1.0e-12, const bool copy_manifold_ids=false, const bool copy_boundary_ids=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
double norm(const FEValuesBase< dim > &fe, const ArrayView< const std::vector< Tensor< 1, dim > > > &Du)
Definition divergence.h:471
void L2(Vector< number > &result, const FEValuesBase< dim > &fe, const std::vector< double > &input, const double factor=1.)
Definition l2.h:159
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void integrate_difference(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const ReadVector< Number > &fe_function, const Function< spacedim, Number > &exact_solution, OutVector &difference, const Quadrature< dim > &q, const NormType &norm, const Function< spacedim, double > *weight=nullptr, const double exponent=2.)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
int(& functions)(const void *v1, const void *v2)
STL namespace.
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > abs(const ::VectorizedArray< Number, width > &)
unsigned int material_id
Definition types.h:167
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation