deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-4.h
Go to the documentation of this file.
1) const
396 *   {
397 *   double return_value = 0.0;
398 *   for (unsigned int i = 0; i < dim; ++i)
399 *   return_value += 4.0 * std::pow(p[i], 4.0);
400 *  
401 *   return return_value;
402 *   }
403 *  
404 *  
405 * @endcode
406 *
407 * As boundary values, we choose @f$x^2+y^2@f$ in 2d, and @f$x^2+y^2+z^2@f$ in 3d. This
408 * happens to be equal to the square of the vector from the origin to the
409 * point at which we would like to evaluate the function, irrespective of the
410 * dimension. So that is what we return:
411 *
412 * @code
413 *   template <int dim>
414 *   double BoundaryValues<dim>::value(const Point<dim> &p,
415 *   const unsigned int /*component*/) const
416 *   {
417 *   return p.square();
418 *   }
419 *  
420 *  
421 *  
422 * @endcode
423 *
424 *
425 * <a name="step_4-ImplementationofthecodeStep4codeclass"></a>
426 * <h3>Implementation of the <code>Step4</code> class</h3>
427 *
428
429 *
432 * a formal parameter <code>dim</code> that we assume unknown at the time we
433 * define the template functions. Only later, the compiler will find a
434 * declaration of <code>Step4@<2@></code> (in the <code>main</code> function,
436 * a process referred to as `instantiation of a template'. When doing so, it
437 * will also replace instances of <code>RightHandSide@<dim@></code> by
438 * <code>RightHandSide@<2@></code> and instantiate the latter class from the
439 * class template.
440 *
441
442 *
443 * In fact, the compiler will also find a declaration <code>Step4@<3@></code>
444 * in <code>main()</code>. This will cause it to again go back to the general
445 * <code>Step4@<dim@></code> template, replace all occurrences of
446 * <code>dim</code>, this time by 3, and compile the class a second time. Note
447 * that the two instantiations <code>Step4@<2@></code> and
448 * <code>Step4@<3@></code> are completely independent classes; their only
449 * common feature is that they are both instantiated from the same general
450 * template, but they are not convertible into each other, for example, and
451 * share no code (both instantiations are compiled completely independently).
452 *
453
454 *
455 *
456
457 *
458 *
459 * <a name="step_4-Step4Step4"></a>
460 * <h4>Step4::Step4</h4>
461 *
462
463 *
464 * After this introduction, here is the constructor of the <code>Step4</code>
465 * class. It specifies the desired polynomial degree of the finite elements
466 * and associates the DoFHandler to the triangulation just as in the previous
467 * example program, @ref step_3 "step-3":
468 *
469 * @code
470 *   template <int dim>
471 *   Step4<dim>::Step4()
472 *   : fe(/* polynomial degree = */ 1)
473 *   , dof_handler(triangulation)
474 *   {}
475 *  
476 *  
477 * @endcode
478 *
479 *
480 * <a name="step_4-Step4make_grid"></a>
481 * <h4>Step4::make_grid</h4>
482 *
483
484 *
485 * Grid creation is something inherently dimension dependent. However, as long
486 * as the domains are sufficiently similar in 2d or 3d, the library can
487 * abstract for you. In our case, we would like to again solve on the square
488 * @f$[-1,1]\times [-1,1]@f$ in 2d, or on the cube @f$[-1,1] \times [-1,1] \times
489 * [-1,1]@f$ in 3d; both can be termed GridGenerator::hyper_cube(), so we may
490 * use the same function in whatever dimension we are. Of course, the
491 * functions that create a hypercube in two and three dimensions are very much
492 * different, but that is something you need not care about. Let the library
493 * handle the difficult things.
494 *
495 * @code
496 *   template <int dim>
497 *   void Step4<dim>::make_grid()
498 *   {
499 *   GridGenerator::hyper_cube(triangulation, -1, 1);
500 *   triangulation.refine_global(4);
501 *  
502 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
503 *   << std::endl
504 *   << " Total number of cells: " << triangulation.n_cells()
505 *   << std::endl;
506 *   }
507 *  
508 * @endcode
509 *
510 *
511 * <a name="step_4-Step4setup_system"></a>
512 * <h4>Step4::setup_system</h4>
513 *
514
515 *
516 * This function looks exactly like in the previous example, although it
517 * performs actions that in their details are quite different if
518 * <code>dim</code> happens to be 3. The only significant difference from a
519 * user's perspective is the number of cells resulting, which is much higher
520 * in three than in two space dimensions!
521 *
522 * @code
525 *   {
526 *   dof_handler.distribute_dofs(fe);
527 *  
528 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
529 *   << std::endl;
530 *  
531 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
532 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
533 *   sparsity_pattern.copy_from(dsp);
534 *  
535 *   system_matrix.reinit(sparsity_pattern);
536 *  
537 *   solution.reinit(dof_handler.n_dofs());
538 *   system_rhs.reinit(dof_handler.n_dofs());
539 *   }
540 *  
541 *  
542 * @endcode
543 *
544 *
545 * <a name="step_4-Step4assemble_system"></a>
547 *
548
549 *
551 * right hand side function and non-zero boundary values. Both are tasks that
553 * the matrix and right hand side.
554 *
555
556 *
558 * vector dimension independently: there is simply no difference to the
559 * two-dimensional case. Since the important objects used in this function
560 * (quadrature formula, FEValues) depend on the dimension by way of a template
562 * the dimension for which this function is compiled. By declaring all classes
563 * which might depend on the dimension using a template parameter, the library
564 * can make nearly all work for you and you don't have to care about most
565 * things.
566 *
567 * @code
568 *   template <int dim>
569 *   void Step4<dim>::assemble_system()
570 *   {
571 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
572 *  
573 * @endcode
574 *
575 * We wanted to have a non-constant right hand side, so we use an object of
576 * the class declared above to generate the necessary data. Since this right
577 * hand side object is only used locally in the present function, we declare
578 * it here as a local variable:
579 *
580 * @code
581 *   RightHandSide<dim> right_hand_side;
582 *  
583 * @endcode
584 *
585 * Compared to the previous example, in order to evaluate the non-constant
586 * right hand side function we now also need the quadrature points on the
587 * cell we are presently on (previously, we only required values and
588 * gradients of the shape function from the FEValues object, as well as the
589 * quadrature weights, FEValues::JxW() ). We can tell the FEValues object to
590 * do for us by also giving it the #update_quadrature_points flag:
591 *
592 * @code
593 *   FEValues<dim> fe_values(fe,
594 *   quadrature_formula,
595 *   update_values | update_gradients |
596 *   update_quadrature_points | update_JxW_values);
597 *  
598 * @endcode
599 *
600 * We then again define the same abbreviation as in the previous program.
601 * The value of this variable of course depends on the dimension which we
602 * are presently using, but the FiniteElement class does all the necessary
603 * work for you and you don't have to care about the dimension dependent
604 * parts:
605 *
606 * @code
607 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
608 *  
609 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
610 *   Vector<double> cell_rhs(dofs_per_cell);
611 *  
612 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
613 *  
614 * @endcode
615 *
616 * Next, we again have to loop over all cells and assemble local
618 * dimensions, but a hexahedron in 3d. In fact, the
619 * <code>active_cell_iterator</code> data type is something different,
621 * alike and you will probably never see a difference. In any case, the real
622 * type is hidden by using `auto`:
623 *
624 * @code
625 *   for (const auto &cell : dof_handler.active_cell_iterators())
626 *   {
627 *   fe_values.reinit(cell);
628 *  
629 *   cell_matrix = 0;
630 *   cell_rhs = 0;
631 *  
632 * @endcode
633 *
634 * Now we have to assemble the local matrix and right hand side. This is
637 * of each other) and merge the loops for the local matrix and the local
638 * vector as far as possible to make things a bit faster.
639 *
640
641 *
643 * difference to how we did things in @ref step_3 "step-3": Instead of using a
644 * constant right hand side with value 1, we use the object representing
645 * the right hand side and evaluate it at the quadrature points:
646 *
647 * @code
648 *   for (const unsigned int q_index : fe_values.quadrature_point_indices())
649 *   for (const unsigned int i : fe_values.dof_indices())
650 *   {
651 *   for (const unsigned int j : fe_values.dof_indices())
652 *   cell_matrix(i, j) +=
653 *   (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
654 *   fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
655 *   fe_values.JxW(q_index)); // dx
656 *  
657 *   const auto &x_q = fe_values.quadrature_point(q_index);
658 *   cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
659 *   right_hand_side.value(x_q) * // f(x_q)
660 *   fe_values.JxW(q_index)); // dx
661 *   }
662 * @endcode
663 *
664 * As a final remark to these loops: when we assemble the local
666 * the gradients of shape functions @f$i@f$ and @f$j@f$ at point number
667 * q_index and
668 * multiply it with the scalar weights JxW. This is what actually
669 * happens: <code>fe_values.shape_grad(i,q_index)</code> returns a
670 * <code>dim</code> dimensional vector, represented by a
671 * <code>Tensor@<1,dim@></code> object, and the operator* that
673 * <code>fe_values.shape_grad(j,q_index)</code> makes sure that the
674 * <code>dim</code> components of the two vectors are properly
676 * then is multiplied with the weights. Internally, this operator* makes
677 * sure that this happens correctly for all <code>dim</code> components
678 * of the vectors, whether <code>dim</code> be 2, 3, or any other space
679 * dimension; from a user's perspective, this is not something worth
680 * bothering with, however, making things a lot simpler if one wants to
681 * write code dimension independently.
682 *
683
684 *
685 * With the local systems assembled, the transfer into the global matrix
686 * and right hand side is done exactly as before, but here we have again
687 * merged some loops for efficiency:
688 *
689 * @code
690 *   cell->get_dof_indices(local_dof_indices);
691 *   for (const unsigned int i : fe_values.dof_indices())
692 *   {
693 *   for (const unsigned int j : fe_values.dof_indices())
694 *   system_matrix.add(local_dof_indices[i],
695 *   local_dof_indices[j],
696 *   cell_matrix(i, j));
697 *  
698 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
699 *   }
700 *   }
701 *  
702 * @endcode
703 *
704 * As the final step in this function, we wanted to have non-homogeneous
705 * boundary values in this example, unlike the one before. This is a simple
706 * task, we only have to replace the Functions::ZeroFunction used there by an
707 * object of the class which describes the boundary values we would like to
708 * use (i.e. the <code>BoundaryValues</code> class declared above):
709 *
710
711 *
712 * The function VectorTools::interpolate_boundary_values() will only work
713 * on faces that have been marked with boundary indicator 0 (because that's
714 * what we say the function should work on with the second argument below).
715 * If there are faces with boundary id other than 0, then the function
718 * on those parts of the boundary a zero Neumann boundary condition holds.
719 *
720 * @code
721 *   std::map<types::global_dof_index, double> boundary_values;
727 *   system_matrix,
728 *   solution,
729 *   system_rhs);
730 *   }
731 *  
732 *  
733 * @endcode
734 *
735 *
736 * <a name="step_4-Step4solve"></a>
737 * <h4>Step4::solve</h4>
738 *
739
740 *
743 * this function is copied verbatim from the previous example.
744 *
745 * @code
746 *   template <int dim>
747 *   void Step4<dim>::solve()
748 *   {
749 *   SolverControl solver_control(1000, 1e-6 * system_rhs.l2_norm());
750 *   SolverCG<Vector<double>> solver(solver_control);
751 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
752 *  
753 *   std::cout << " " << solver_control.last_step()
754 *   << " CG iterations needed to obtain convergence." << std::endl;
755 *   }
756 *  
757 *  
758 * @endcode
759 *
760 *
761 * <a name="step_4-Step4output_results"></a>
763 *
764
765 *
766 * This function also does what the respective one did in @ref step_3 "step-3". No changes
768 *
769
770 *
771 * Since the program will run both 2d and 3d versions of the Laplace solver,
774 * have other values than 2 or 3, but we neglect this here for the sake of
775 * brevity).
776 *
777 * @code
778 *   template <int dim>
780 *   {
781 *   DataOut<dim> data_out;
782 *  
783 *   data_out.attach_dof_handler(dof_handler);
784 *   data_out.add_data_vector(solution, "solution");
785 *  
786 *   data_out.build_patches();
787 *  
788 *   std::ofstream output(dim == 2 ? "solution-2d.vtk" : "solution-3d.vtk");
789 *   data_out.write_vtk(output);
790 *   }
791 *  
792 *  
793 *  
794 * @endcode
795 *
796 *
797 * <a name="step_4-Step4run"></a>
798 * <h4>Step4::run</h4>
799 *
800
801 *
802 * This is the function which has the top-level control over everything. Apart
803 * from one line of additional output, it is the same as for the previous
804 * example.
805 *
806 * @code
807 *   template <int dim>
808 *   void Step4<dim>::run()
809 *   {
810 *   std::cout << "Solving problem in " << dim << " space dimensions."
811 *   << std::endl;
812 *  
813 *   make_grid();
814 *   setup_system();
816 *   solve();
817 *   output_results();
818 *   }
819 *  
820 *  
821 * @endcode
822 *
823 *
824 * <a name="step_4-Thecodemaincodefunction"></a>
825 * <h3>The <code>main</code> function</h3>
826 *
827
828 *
829 * And this is the main function. It also looks mostly like in @ref step_3 "step-3", but if
830 * you look at the code below, note how we first create a variable of type
833 * and then we do the whole thing over in 3d.
834 *
835
836 *
838 * probably either want to solve a 2d problem, or one in 3d, but not both at
844 * in 2d where computations are fast, and then switch a single place to a 3 to
846 * computations.
847 *
848
849 *
850 * Each of the two blocks is enclosed in braces to make sure that the
851 * <code>laplace_problem_2d</code> variable goes out of scope (and releases
852 * the memory it holds) before we move on to allocate memory for the 3d
853 * case. Without the additional braces, the <code>laplace_problem_2d</code>
854 * variable would only be destroyed at the end of the function, i.e. after
855 * running the 3d problem, and would needlessly hog memory while the 3d run
856 * could actually use it.
857 *
858 * @code
859 *   int main()
860 *   {
861 *   {
862 *   Step4<2> laplace_problem_2d;
863 *   laplace_problem_2d.run();
864 *   }
865 *  
866 *   {
867 *   Step4<3> laplace_problem_3d;
868 *   laplace_problem_3d.run();
869 *   }
870 *  
871 *   return 0;
872 *   }
873 * @endcode
874<a name="step_4-Results"></a><h1>Results</h1>
875
876
877
878The output of the program looks as follows (the number of iterations
879may vary by one or two, depending on your computer, since this is
880often dependent on the round-off accuracy of floating point
881operations, which differs between processors):
882@code
883Solving problem in 2 space dimensions.
884 Number of active cells: 256
885 Total number of cells: 341
886 Number of degrees of freedom: 289
887 19 CG iterations needed to obtain convergence.
888Solving problem in 3 space dimensions.
889 Number of active cells: 4096
890 Total number of cells: 4681
891 Number of degrees of freedom: 4913
892 20 CG iterations needed to obtain convergence.
893@endcode
894It is obvious that in three spatial dimensions the number of cells and
895therefore also the number of degrees of freedom is
896much higher. What cannot be seen here, is that besides this higher
897number of rows and columns in the matrix, there are also significantly
898more entries per row of the matrix in three space
899dimensions. Together, this leads to a much higher numerical effort for
900solving the system of equation, which you can feel in the run time of the two
901solution steps when you actually run the program.
902
903
904
905The program produces two files: <code>solution-2d.vtk</code> and
906<code>solution-3d.vtk</code>, which can be viewed using the programs
907VisIt or Paraview (in case you do not have these programs, you can easily
908change the
909output format in the program to something which you can view more
910easily). Visualizing solutions is a bit of an art, but it can also be fun, so
911you should play around with your favorite visualization tool to get familiar
912with its functionality. Here's what I have come up with for the 2d solution:
913
914<p align="center">
915 <img src="https://www.dealii.org/images/steps/developer/step-4.solution-2d.png" alt="">
916</p>
917
918(See also <a href="https://www.math.colostate.edu/~bangerth/videos.676.11.html">video lecture 11</a>, <a href="https://www.math.colostate.edu/~bangerth/videos.676.32.html">video lecture 32</a>.)
920a 3D plot. As can be seen, the solution is almost flat in the interior
923of the solution is equal to the right hand side and that was chosen as
924a quartic polynomial which is nearly zero in the interior and is only
926maximal values of the right hand side function are at the corners of
931solution. For an explanation of this technique, see @ref step_7 "step-7".
932
936that to get a better solution, we may have to compute on a finer mesh.
937
938In three spatial dimensions, visualization is a bit more difficult. The left
939picture shows the solution and the mesh it was computed on on the surface of
941what is happening on the inside. The picture on the right is an attempt at
942visualizing the interior as well, by showing surfaces where the solution has
943constant values (as indicated by the legend at the top left). Isosurface
946
947<table width="60%" align="center">
948 <tr>
949 <td align="center">
950 <img src="https://www.dealii.org/images/steps/developer/step-4.solution-3d.png" alt="">
951 </td>
952 <td align="center">
953 <img src="https://www.dealii.org/images/steps/developer/step-4.contours-3d.png" alt="">
954 </td>
955 </tr>
956</table>
957
958@note
965axes, shows the file name from which the data was taken in the top left and
966the name of the user doing so and the time and date on the bottom right. None
970@f$[-1,1]^3@f$, so there is no need for tick marks. As a consequence, I have
974
975
976
977<a name="step-4-postprocessing"></a>
978<a name="step_4-PostprocessingWhattodowiththesolution"></a><h3> Postprocessing: What to do with the solution? </h3>
979
980
982to numerically approximate the solution of a partial differential equation, and
986want to solve a PDE because we want to *extract information* from it. Examples
988- Let's say you solve the equations of elasticity (which we will do in @ref step_8 "step-8"),
990 elastic object under a given load. From an engineering perspective, what you
992 say at a specific point; or you may want to know the maximum
993 [stress](https://en.wikipedia.org/wiki/Stress_(mechanics)) in order to
996- If you are solving fluid flow problems (such as in @ref step_22 "step-22", @ref step_57 "step-57", @ref step_67 "step-67",
997 and @ref step_69 "step-69"), then you might be interested in the fluid velocity at specific
998 points, and oftentimes the forces the fluid exerts on the boundary of the
1008 [radar cross section](https://en.wikipedia.org/wiki/Radar_cross_section)
1009 of planes or ships).
1010
1012PDE is only the first step. The second step is to *evaluate* the computed
1013solution in order to extract relevant numbers that allow us to
1014either *optimize a design*, or to *make decisions*. This second step is often
1015called "postprocessing the solution".
1016
1022kind of situation, it makes sense to evaluate the *average vertical
1024@f[
1025 \bar u_h = \frac{\int_\Omega u_h(\mathbf x) \, dx}{|\Omega|},
1026@f]
1027where @f$|\Omega| = \int_\Omega 1 \, dx@f$ is the area of the domain. To compute
1029over cells,
1030@f[
1031 \int_\Omega u_h(\mathbf x) \, dx
1032 =
1033 \sum_K \int_K u_h(\mathbf x) \, dx,
1034@f]
1035and then integrals over cells are approximated by quadrature:
1036@f{align*}{
1037 \int_\Omega u_h(\mathbf x) \, dx
1038 &=
1039 \sum_K \int_K u_h(\mathbf x) \, dx,
1040 \\
1041 &=
1043@f}
1044where @f$w_q^K@f$ is the weight of the @f$q@f$th quadrature point evaluated on
1045cell @f$K@f$. All of this is as always provided by the FEValues class -- the
1046entry point for all integrals in deal.II.
1047
1049to get the values of the solution @f$u@f$ at the quadrature points of a cell.
1050This functionality is provided by FEValues::get_function_values(), a
1051function that takes a global vector of nodal values as input and returns
1052a vector of function values at the quadrature points of the current cell.
1053Using this function, to see how it all works together you can
1055solution has been computed (the `output_results()` function seems like a good
1056place to also do postprocessing, for example):
1057@code
1058 QGauss<dim> quadrature_formula(fe.degree + 1);
1059 FEValues<dim> fe_values(fe,
1062
1063 std::vector<double> solution_values(quadrature_formula.size());
1064 double integral_of_u = 0;
1065 double volume_of_omega = 0;
1066
1067 for (const auto &cell : dof_handler.active_cell_iterators())
1068 {
1069 fe_values.reinit(cell);
1070 fe_values.get_function_values(solution, solution_values);
1071
1072 for (const unsigned int q_point : fe_values.quadrature_point_indices())
1073 {
1074 integral_of_u += solution_values[q_point] * fe_values.JxW(q_point);
1075 volume_of_omega += 1 * fe_values.JxW(q_point);
1076 }
1077 }
1078 std::cout << " Mean value of u=" << integral_of_u / volume_of_omega
1079 << std::endl;
1080@endcode
1083the integral @f$|\Omega| = \int_\Omega 1 \, dx@f$ in exactly the same way, via
1084quadrature. (We could avoid having to compute @f$|\Omega|@f$ by hand here, using the
1085fact that deal.II has a function for this: GridTools::volume(). That said,
1087concurrently in the same loop, and so that's what we do.)
1088
1089This program of course also solves the same Poisson equation in three space
1090dimensions. In this situation, the Poisson equation is often used as a model
1091for diffusion of either a physical species (say, of ink in a tank of water,
1092or a pollutant in the air) or of energy (specifically, of thermal energy in
1093a solid body). In that context, the quantity
1094@f[
1095 \Phi_h = \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1096@f]
1097is the *flux* of this species or energy across the boundary. (In actual
1098physical models, one would also have to multiply the right hand side by
1099a diffusivity or conductivity constant, but let us ignore this here.) In
1100much the same way as before, we compute such integrals by splitting
1101it over integrals of *faces* of cells, and then applying quadrature:
1102@f{align*}{
1103 \Phi_h
1104 &=
1105 \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1106 \\
1107 &=
1108 \sum_K
1109 \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
1110 \int_f \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1111 \\
1112 &=
1113 \sum_K
1114 \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
1115 \sum_q \nabla u_h(\mathbf x_q^f) \cdot \mathbf n(\mathbf x_q^f) w_q^f,
1116@f}
1117where now @f$\mathbf x_q^f@f$ are the quadrature points located on face @f$f@f$,
1118and @f$w_q^f@f$ are the weights associated with these faces. The second
1119of the sum symbols loops over all faces of cell @f$K@f$, but restricted to
1120those that are actually at the boundary.
1121
1122This all is easily implemented by the following code that replaces the use of the
1123FEValues class (which is used for integrating over cells -- i.e., domain integrals)
1124by the FEFaceValues class (which is used for integrating over faces -- i.e.,
1125boundary integrals):
1126@code
1127 QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1128 FEFaceValues<dim> fe_face_values(fe,
1129 face_quadrature_formula,
1130 update_gradients | update_normal_vectors |
1131 update_JxW_values);
1132
1133 std::vector<Tensor<1, dim>> solution_gradients(face_quadrature_formula.size());
1134 double flux = 0;
1135
1136 for (const auto &cell : dof_handler.active_cell_iterators())
1137 for (const auto &face : cell->face_iterators())
1138 if (face->at_boundary())
1139 {
1140 fe_face_values.reinit(cell, face);
1141 fe_face_values.get_function_gradients(solution, solution_gradients);
1142
1143 for (const unsigned int q_point :
1144 fe_face_values.quadrature_point_indices())
1145 {
1146 flux += solution_gradients[q_point] *
1147 fe_face_values.normal_vector(q_point) *
1148 fe_face_values.JxW(q_point);
1149 }
1150 }
1151 std::cout << " Flux=" << flux << std::endl;
1152@endcode
1153
1154If you add these two code snippets to the code, you will get output like the
1155following when you run the program:
1156@code
1157Solving problem in 2 space dimensions.
1158 Number of active cells: 256
1159 Total number of cells: 341
1160 Number of degrees of freedom: 289
1161 26 CG iterations needed to obtain convergence.
1162 Mean value of u=1.33303
1163 Flux=-3.68956
1164Solving problem in 3 space dimensions.
1165 Number of active cells: 4096
1166 Total number of cells: 4681
1167 Number of degrees of freedom: 4913
1168 30 CG iterations needed to obtain convergence.
1169 Mean value of u=1.58058
1170 Flux=-8.29435
1171@endcode
1172
1173This makes some sense: If you look, for example, at the 2d output above,
1174the solution varies between values of 1 and 2, but with a larger part of the
1175solution closer to one than two; so an average value of 1.33 for the mean value
1176is reasonable. For the flux, recall that @f$\nabla u \cdot \mathbf n@f$ is the
1177directional derivative in the normal direction -- in other words, how the
1178solution changes as we move from the interior of the domain towards the
1179boundary. If you look at the 2d solution, you will realize that for most parts
1180of the boundary, the solution *decreases* as we approach the boundary, so the
1181normal derivative is negative -- so if we integrate along the boundary, we
1182should expect (and obtain!) a negative value.
1183
1184
1185
1186<a name="step-4-extensions"></a>
1187<a name="step_4-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1188
1189
1190There are many ways with which one can play with this program. The simpler
1191ones include essentially all the possibilities already discussed in the
1192<a href="step_3.html#extensions" target="body">Possibilities for extensions in the documentation of step 3</a>,
1193except that you will have to think about whether something now also applies
1194to the 3d case discussed in the current program.
1195
1196It is also worthwhile considering the postprocessing options discussed
1197above. The documentation states two numbers (the mean value and the
1198normal flux) for both the 2d and 3d cases. Can we trust these
1199numbers? We have convinced ourselves that at least the mean value
1200is reasonable, and that the sign of the flux is probably correct.
1201But are these numbers accurate?
1202
1203A general rule is that we should never trust a number unless we have
1204verified it in some way. From the theory of finite element methods,
1205we know that as we make the mesh finer and finer, the numerical
1206solution @f$u_h@f$ we compute here must converge to the exact solution
1207@f$u@f$. As a consequence, we also expect that @f$\bar u_h \rightarrow \bar u@f$
1208and @f$\Phi_h \rightarrow \Phi@f$, but that does not mean that for any
1209given mesh @f$\bar u_h@f$ or @f$\Phi_h@f$ are particularly accurate approximations.
1210
1211To test this kind of thing, we have already considered the convergence of
1212a point value in @ref step_3 "step-3". We can do the same here by selecting how many
1213times the mesh is globally refined in the `make_grid()` function of this
1214program. For the mean value of the solution, we then get the following
1215numbers:
1216 <table align="center" class="doxtable">
1217 <tr> <th># of refinements</th>
1218 <th>@f$\bar u_h@f$ in 2d</th>
1219 <th>@f$\bar u_h@f$ in 3d</th>
1220 </tr>
1221 <tr> <td>4</td> <td>1.33303</td> <td>1.58058</td> </tr>
1222 <tr> <td>5</td> <td>1.33276</td> <td>1.57947</td> </tr>
1223 <tr> <td>6</td> <td>1.3327</td> <td>1.5792</td> </tr>
1224 <tr> <td>7</td> <td>1.33269</td> <td>1.57914</td> </tr>
1225 <tr> <td>8</td> <td>1.33268</td> <td></td> </tr>
1226 <tr> <td>9</td> <td>1.33268</td> <td></td> </tr>
1227 </table>
1228I did not have the patience to run the last two values for the 3d case --
1229one needs quite a fine mesh for this, with correspondingly long run times.
1230But we can be reasonably assured that values around 1.33 (for the 2d case)
1231and 1.58 (for the 3d case) are about right -- and at least for engineering
1232applications, three digits of accuracy are good enough.
1233
1234The situation looks very different for the flux. Here, we get results
1235such as the following:
1236 <table align="center" class="doxtable">
1237 <tr> <th># of refinements</th>
1238 <th>@f$\Phi_h@f$ in 2d</th>
1239 <th>@f$\Phi_h@f$ in 3d</th>
1240 </tr>
1241 <tr> <td>4</td> <td>-3.68956</td> <td>-8.29435</td> </tr>
1242 <tr> <td>5</td> <td>-4.90147</td> <td>-13.0691</td> </tr>
1243 <tr> <td>6</td> <td>-5.60745</td> <td>-15.9171</td> </tr>
1244 <tr> <td>7</td> <td>-5.99111</td> <td>-17.4918</td> </tr>
1245 <tr> <td>8</td> <td>-6.19196</td> <td></td> </tr>
1246 <tr> <td>9</td> <td>-6.29497</td> <td></td> </tr>
1247 <tr> <td>10</td> <td>-6.34721</td> <td></td> </tr>
1248 <tr> <td>11</td> <td>-6.37353</td> <td></td> </tr>
1249 </table>
1250So this is not great. For the 2d case, we might infer that perhaps
1251a value around -6.4 might be right if we just refine the mesh enough --
1252though 11 refinements already leads to some 4,194,304 cells. In any
1253case, the first number (the one shown in the beginning where we
1254discussed postprocessing) was off by almost a factor of 2!
1255
1256For the 3d case, the last number shown was on a mesh with 2,097,152
1257cells; the next one would have had 8 times as many cells. In any case, the
1258numbers mean that we can't even be sure
1261numbers. In fact, that last column isn't even doing a particularly
1262good job convincing us that the code might be correctly implemented.
1263
1264If you keep reading through the other tutorial programs, you will find many ways
1265to make these sorts of computations more accurate and to come to
1266believe that the flux actually does converge to its correct value.
1267For example, we can dramatically increase the accuracy of the computation
1268by using adaptive mesh refinement (@ref step_6 "step-6") near the boundary, and
1269in particular by using higher polynomial degree finite elements (also
1270@ref step_6 "step-6", but also @ref step_7 "step-7"). Using the latter, using cubic elements
1271(polynomial degree 3), we can actually compute the flux pretty
1272accurately even in 3d: @f$\Phi_h=-19.0148@f$ with 4 global refinement steps,
1273and @f$\Phi_h=-19.1533@f$ with 5 refinement steps. These numbers are already
1274pretty close together and give us a reasonable idea of the first
1275two correct digits of the "true" answer.
1276
1277@note We would be remiss to not also comment on the fact that there
1278 are good theoretical reasons why computing the flux accurately
1279 appears to be so much more difficult than the average value.
1280 This has to do with the fact that finite element theory
1281 provides us with the estimate
1282 @f$\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}@f$
1283 when using the linear elements this program uses -- that is, for
1284 every global mesh refinement, @f$h@f$ is reduced by a factor of two
1285 and the error goes down by a factor of 4. Now, the @f$L_2@f$ error is
1286 not equivalent to the error in the mean value, but the two are
1287 related: They are both integrals over the domain, using the *value*
1288 of the solution. We expect the mean value to converge no worse than
1289 the @f$L_2@f$ norm of the error. At the same time, theory also provides
1290 us with this estimate:
1291 @f$\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
1292 C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}@f$. The move from values to
1293 gradients reduces the convergence rates by one order, and the move
1294 from domain to boundary by another half order. Here, then, each
1295 refinement step reduces the error not by a factor of 4 any more,
1296 by only by a factor of @f$\sqrt{2} \approx 1.4@f$. It takes a lot
1297 of global refinement steps to reduce the error by, say, a factor
1298 ten or hundred, and this is reflected in the very slow convergence
1299 evidenced by the table. On the other hand, for cubic elements (i.e.,
1300 polynomial degree 3), we would get
1301 @f$\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}@f$
1302 and after reduction by 1.5 orders, we would still have
1303 @f$\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
1304 C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}@f$. This rate,
1305 @f${\cal O}(h^{2.5})@f$ is still quite rapid, and it is perhaps not
1306 surprising that we get much better answers with these higher
1307 order elements. This also illustrates that when trying to
1308 approximate anything that relates to a gradient of the solution,
1309 using linear elements (polynomial degree one) is really not a
1310 good choice at all.
1311
1312@note In this very specific case, it turns out that we can actually
1313 compute the exact value of @f$\Phi@f$. This is because for the Poisson
1314 equation we compute the solution of here, @f$-\Delta u = f@f$, we can
1315 integrate over the domain, @f$-\int_\Omega \Delta u = \int_\Omega f@f$,
1316 and then use that @f$\Delta = \text{div}\;\text{grad}@f$; this allows
1317 us to use the
1318 divergence theorem followed by multiplying by minus one to find
1319 @f$\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f@f$. The
1320 left hand side happens to be @f$\Phi@f$. For the specific right
1321 hand side @f$f(x_1,x_2)=4(x_1^4+x_2^4)@f$ we use in 2d, we then
1322 get @f$-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
1323 = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25@f$,
1324 which has a numerical value of exactly -6.4 -- right on with our
1325 guess above. In 3d, we can do the same and get that the exact
1326 value is
1327 @f$-\int_\Omega f =
1328 -\int_{-1}^{1} \int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4+x_3^4) \; dx_3 \; dx_2\; dx_1
1329 = -48\times\frac 25=-19.2@f$. What we found with cubic elements
1330 is then quite close to this exact value. Of course, in practice
1331 we almost never have exact values to compare with: If we could
1332 compute something on a piece of paper, we wouldn't have to solve
1339 *
1340 *
1341<a name="step_4-PlainProg"></a>
1342<h1> The plain program</h1>
1343@include "step-4.cc"
1344*/
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
friend class Tensor
Definition tensor.h:865
static constexpr unsigned int dimension
Definition tensor.h:476
std::conditional_t< rank_==1, std::array< Number, dim >, std::array< Tensor< rank_ - 1, dim, Number >, dim > > values
Definition tensor.h:851
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
unsigned int level
Definition grid_out.cc:4632
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
std::vector< index_type > data
Definition mpi.cc:740
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
constexpr types::blas_int zero
constexpr types::blas_int one
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)