deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-4.h
Go to the documentation of this file.
1) const
395 *   {
396 *   double return_value = 0.0;
397 *   for (unsigned int i = 0; i < dim; ++i)
398 *   return_value += 4.0 * std::pow(p[i], 4.0);
399 *  
400 *   return return_value;
401 *   }
402 *  
403 *  
404 * @endcode
405 *
406 * As boundary values, we choose @f$x^2+y^2@f$ in 2d, and @f$x^2+y^2+z^2@f$ in 3d. This
407 * happens to be equal to the square of the vector from the origin to the
408 * point at which we would like to evaluate the function, irrespective of the
409 * dimension. So that is what we return:
410 *
411 * @code
412 *   template <int dim>
413 *   double BoundaryValues<dim>::value(const Point<dim> &p,
414 *   const unsigned int /*component*/) const
415 *   {
416 *   return p.square();
417 *   }
418 *  
419 *  
420 *  
421 * @endcode
422 *
423 *
424 * <a name="step_4-ImplementationofthecodeStep4codeclass"></a>
425 * <h3>Implementation of the <code>Step4</code> class</h3>
426 *
427
428 *
429 * Next for the implementation of the class template that makes use of the
430 * functions above. As before, we will write everything as templates that have
431 * a formal parameter <code>dim</code> that we assume unknown at the time we
432 * define the template functions. Only later, the compiler will find a
433 * declaration of <code>Step4@<2@></code> (in the <code>main</code> function,
434 * actually) and compile the entire class with <code>dim</code> replaced by 2,
435 * a process referred to as `instantiation of a template'. When doing so, it
436 * will also replace instances of <code>RightHandSide@<dim@></code> by
437 * <code>RightHandSide@<2@></code> and instantiate the latter class from the
438 * class template.
439 *
440
441 *
442 * In fact, the compiler will also find a declaration <code>Step4@<3@></code>
443 * in <code>main()</code>. This will cause it to again go back to the general
444 * <code>Step4@<dim@></code> template, replace all occurrences of
445 * <code>dim</code>, this time by 3, and compile the class a second time. Note
446 * that the two instantiations <code>Step4@<2@></code> and
447 * <code>Step4@<3@></code> are completely independent classes; their only
448 * common feature is that they are both instantiated from the same general
449 * template, but they are not convertible into each other, for example, and
450 * share no code (both instantiations are compiled completely independently).
451 *
452
453 *
454 *
455
456 *
457 *
458 * <a name="step_4-Step4Step4"></a>
459 * <h4>Step4::Step4</h4>
460 *
461
462 *
463 * After this introduction, here is the constructor of the <code>Step4</code>
464 * class. It specifies the desired polynomial degree of the finite elements
465 * and associates the DoFHandler to the triangulation just as in the previous
466 * example program, @ref step_3 "step-3":
467 *
468 * @code
469 *   template <int dim>
470 *   Step4<dim>::Step4()
471 *   : fe(/* polynomial degree = */ 1)
472 *   , dof_handler(triangulation)
473 *   {}
474 *  
475 *  
476 * @endcode
477 *
478 *
479 * <a name="step_4-Step4make_grid"></a>
480 * <h4>Step4::make_grid</h4>
481 *
482
483 *
484 * Grid creation is something inherently dimension dependent. However, as long
485 * as the domains are sufficiently similar in 2d or 3d, the library can
486 * abstract for you. In our case, we would like to again solve on the square
487 * @f$[-1,1]\times [-1,1]@f$ in 2d, or on the cube @f$[-1,1] \times [-1,1] \times
488 * [-1,1]@f$ in 3d; both can be termed GridGenerator::hyper_cube(), so we may
489 * use the same function in whatever dimension we are. Of course, the
490 * functions that create a hypercube in two and three dimensions are very much
491 * different, but that is something you need not care about. Let the library
492 * handle the difficult things.
493 *
494 * @code
495 *   template <int dim>
496 *   void Step4<dim>::make_grid()
497 *   {
498 *   GridGenerator::hyper_cube(triangulation, -1, 1);
499 *   triangulation.refine_global(4);
500 *  
501 *   std::cout << " Number of active cells: " << triangulation.n_active_cells()
502 *   << std::endl
503 *   << " Total number of cells: " << triangulation.n_cells()
504 *   << std::endl;
505 *   }
506 *  
507 * @endcode
508 *
509 *
510 * <a name="step_4-Step4setup_system"></a>
511 * <h4>Step4::setup_system</h4>
512 *
513
514 *
515 * This function looks exactly like in the previous example, although it
516 * performs actions that in their details are quite different if
517 * <code>dim</code> happens to be 3. The only significant difference from a
518 * user's perspective is the number of cells resulting, which is much higher
519 * in three than in two space dimensions!
520 *
521 * @code
522 *   template <int dim>
523 *   void Step4<dim>::setup_system()
524 *   {
525 *   dof_handler.distribute_dofs(fe);
526 *  
527 *   std::cout << " Number of degrees of freedom: " << dof_handler.n_dofs()
528 *   << std::endl;
529 *  
530 *   DynamicSparsityPattern dsp(dof_handler.n_dofs());
531 *   DoFTools::make_sparsity_pattern(dof_handler, dsp);
532 *   sparsity_pattern.copy_from(dsp);
533 *  
534 *   system_matrix.reinit(sparsity_pattern);
535 *  
536 *   solution.reinit(dof_handler.n_dofs());
537 *   system_rhs.reinit(dof_handler.n_dofs());
538 *   }
539 *  
540 *  
541 * @endcode
542 *
543 *
544 * <a name="step_4-Step4assemble_system"></a>
545 * <h4>Step4::assemble_system</h4>
546 *
547
548 *
549 * Unlike in the previous example, we would now like to use a non-constant
550 * right hand side function and non-zero boundary values. Both are tasks that
551 * are readily achieved with only a few new lines of code in the assemblage of
552 * the matrix and right hand side.
553 *
554
555 *
556 * More interesting, though, is the way we assemble matrix and right hand side
557 * vector dimension independently: there is simply no difference to the
558 * two-dimensional case. Since the important objects used in this function
559 * (quadrature formula, FEValues) depend on the dimension by way of a template
560 * parameter as well, they can take care of setting up properly everything for
561 * the dimension for which this function is compiled. By declaring all classes
562 * which might depend on the dimension using a template parameter, the library
563 * can make nearly all work for you and you don't have to care about most
564 * things.
565 *
566 * @code
567 *   template <int dim>
568 *   void Step4<dim>::assemble_system()
569 *   {
570 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
571 *  
572 * @endcode
573 *
574 * We wanted to have a non-constant right hand side, so we use an object of
575 * the class declared above to generate the necessary data. Since this right
576 * hand side object is only used locally in the present function, we declare
577 * it here as a local variable:
578 *
579 * @code
580 *   RightHandSide<dim> right_hand_side;
581 *  
582 * @endcode
583 *
584 * Compared to the previous example, in order to evaluate the non-constant
585 * right hand side function we now also need the quadrature points on the
586 * cell we are presently on (previously, we only required values and
587 * gradients of the shape function from the FEValues object, as well as the
588 * quadrature weights, FEValues::JxW() ). We can tell the FEValues object to
589 * do for us by also giving it the #update_quadrature_points flag:
590 *
591 * @code
592 *   FEValues<dim> fe_values(fe,
593 *   quadrature_formula,
594 *   update_values | update_gradients |
595 *   update_quadrature_points | update_JxW_values);
596 *  
597 * @endcode
598 *
599 * We then again define the same abbreviation as in the previous program.
600 * The value of this variable of course depends on the dimension which we
601 * are presently using, but the FiniteElement class does all the necessary
602 * work for you and you don't have to care about the dimension dependent
603 * parts:
604 *
605 * @code
606 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
607 *  
608 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
609 *   Vector<double> cell_rhs(dofs_per_cell);
610 *  
611 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
612 *  
613 * @endcode
614 *
615 * Next, we again have to loop over all cells and assemble local
616 * contributions. Note, that a cell is a quadrilateral in two space
617 * dimensions, but a hexahedron in 3d. In fact, the
618 * <code>active_cell_iterator</code> data type is something different,
619 * depending on the dimension we are in, but to the outside world they look
620 * alike and you will probably never see a difference. In any case, the real
621 * type is hidden by using `auto`:
622 *
623 * @code
624 *   for (const auto &cell : dof_handler.active_cell_iterators())
625 *   {
626 *   fe_values.reinit(cell);
627 *  
628 *   cell_matrix = 0;
629 *   cell_rhs = 0;
630 *  
631 * @endcode
632 *
633 * Now we have to assemble the local matrix and right hand side. This is
634 * done exactly like in the previous example, but now we revert the
635 * order of the loops (which we can safely do since they are independent
636 * of each other) and merge the loops for the local matrix and the local
637 * vector as far as possible to make things a bit faster.
638 *
639
640 *
641 * Assembling the right hand side presents the only significant
642 * difference to how we did things in @ref step_3 "step-3": Instead of using a
643 * constant right hand side with value 1, we use the object representing
644 * the right hand side and evaluate it at the quadrature points:
645 *
646 * @code
647 *   for (const unsigned int q_index : fe_values.quadrature_point_indices())
648 *   for (const unsigned int i : fe_values.dof_indices())
649 *   {
650 *   for (const unsigned int j : fe_values.dof_indices())
651 *   cell_matrix(i, j) +=
652 *   (fe_values.shape_grad(i, q_index) * // grad phi_i(x_q)
653 *   fe_values.shape_grad(j, q_index) * // grad phi_j(x_q)
654 *   fe_values.JxW(q_index)); // dx
655 *  
656 *   const auto &x_q = fe_values.quadrature_point(q_index);
657 *   cell_rhs(i) += (fe_values.shape_value(i, q_index) * // phi_i(x_q)
658 *   right_hand_side.value(x_q) * // f(x_q)
659 *   fe_values.JxW(q_index)); // dx
660 *   }
661 * @endcode
662 *
663 * As a final remark to these loops: when we assemble the local
664 * contributions into <code>cell_matrix(i,j)</code>, we have to multiply
665 * the gradients of shape functions @f$i@f$ and @f$j@f$ at point number
666 * q_index and
667 * multiply it with the scalar weights JxW. This is what actually
668 * happens: <code>fe_values.shape_grad(i,q_index)</code> returns a
669 * <code>dim</code> dimensional vector, represented by a
670 * <code>Tensor@<1,dim@></code> object, and the operator* that
671 * multiplies it with the result of
672 * <code>fe_values.shape_grad(j,q_index)</code> makes sure that the
673 * <code>dim</code> components of the two vectors are properly
674 * contracted, and the result is a scalar floating point number that
675 * then is multiplied with the weights. Internally, this operator* makes
676 * sure that this happens correctly for all <code>dim</code> components
677 * of the vectors, whether <code>dim</code> be 2, 3, or any other space
678 * dimension; from a user's perspective, this is not something worth
679 * bothering with, however, making things a lot simpler if one wants to
680 * write code dimension independently.
681 *
682
683 *
684 * With the local systems assembled, the transfer into the global matrix
685 * and right hand side is done exactly as before, but here we have again
686 * merged some loops for efficiency:
687 *
688 * @code
689 *   cell->get_dof_indices(local_dof_indices);
690 *   for (const unsigned int i : fe_values.dof_indices())
691 *   {
692 *   for (const unsigned int j : fe_values.dof_indices())
693 *   system_matrix.add(local_dof_indices[i],
694 *   local_dof_indices[j],
695 *   cell_matrix(i, j));
696 *  
697 *   system_rhs(local_dof_indices[i]) += cell_rhs(i);
698 *   }
699 *   }
700 *  
701 * @endcode
702 *
703 * As the final step in this function, we wanted to have non-homogeneous
704 * boundary values in this example, unlike the one before. This is a simple
705 * task, we only have to replace the Functions::ZeroFunction used there by an
706 * object of the class which describes the boundary values we would like to
707 * use (i.e. the <code>BoundaryValues</code> class declared above):
708 *
709
710 *
711 * The function VectorTools::interpolate_boundary_values() will only work
712 * on faces that have been marked with boundary indicator 0 (because that's
713 * what we say the function should work on with the second argument below).
714 * If there are faces with boundary id other than 0, then the function
715 * interpolate_boundary_values() will do nothing on these faces. For
716 * the Laplace equation doing nothing is equivalent to assuming that
717 * on those parts of the boundary a zero Neumann boundary condition holds.
718 *
719 * @code
720 *   std::map<types::global_dof_index, double> boundary_values;
722 *   types::boundary_id(0),
723 *   BoundaryValues<dim>(),
724 *   boundary_values);
725 *   MatrixTools::apply_boundary_values(boundary_values,
726 *   system_matrix,
727 *   solution,
728 *   system_rhs);
729 *   }
730 *  
731 *  
732 * @endcode
733 *
734 *
735 * <a name="step_4-Step4solve"></a>
736 * <h4>Step4::solve</h4>
737 *
738
739 *
740 * Solving the linear system of equations is something that looks almost
741 * identical in most programs. In particular, it is dimension independent, so
742 * this function is copied verbatim from the previous example.
743 *
744 * @code
745 *   template <int dim>
746 *   void Step4<dim>::solve()
747 *   {
748 *   SolverControl solver_control(1000, 1e-6 * system_rhs.l2_norm());
749 *   SolverCG<Vector<double>> solver(solver_control);
750 *   solver.solve(system_matrix, solution, system_rhs, PreconditionIdentity());
751 *  
752 *   std::cout << " " << solver_control.last_step()
753 *   << " CG iterations needed to obtain convergence." << std::endl;
754 *   }
755 *  
756 *  
757 * @endcode
758 *
759 *
760 * <a name="step_4-Step4output_results"></a>
761 * <h4>Step4::output_results</h4>
762 *
763
764 *
765 * This function also does what the respective one did in @ref step_3 "step-3". No changes
766 * here for dimension independence either.
767 *
768
769 *
770 * Since the program will run both 2d and 3d versions of the Laplace solver,
771 * we use the dimension in the filename to generate distinct filenames for
772 * each run (in a better program, one would check whether <code>dim</code> can
773 * have other values than 2 or 3, but we neglect this here for the sake of
774 * brevity).
775 *
776 * @code
777 *   template <int dim>
778 *   void Step4<dim>::output_results() const
779 *   {
780 *   DataOut<dim> data_out;
781 *  
782 *   data_out.attach_dof_handler(dof_handler);
783 *   data_out.add_data_vector(solution, "solution");
784 *  
785 *   data_out.build_patches();
786 *  
787 *   std::ofstream output(dim == 2 ? "solution-2d.vtk" : "solution-3d.vtk");
788 *   data_out.write_vtk(output);
789 *   }
790 *  
791 *  
792 *  
793 * @endcode
794 *
795 *
796 * <a name="step_4-Step4run"></a>
797 * <h4>Step4::run</h4>
798 *
799
800 *
801 * This is the function which has the top-level control over everything. Apart
802 * from one line of additional output, it is the same as for the previous
803 * example.
804 *
805 * @code
806 *   template <int dim>
807 *   void Step4<dim>::run()
808 *   {
809 *   std::cout << "Solving problem in " << dim << " space dimensions."
810 *   << std::endl;
811 *  
812 *   make_grid();
813 *   setup_system();
814 *   assemble_system();
815 *   solve();
816 *   output_results();
817 *   }
818 *  
819 *  
820 * @endcode
821 *
822 *
823 * <a name="step_4-Thecodemaincodefunction"></a>
824 * <h3>The <code>main</code> function</h3>
825 *
826
827 *
828 * And this is the main function. It also looks mostly like in @ref step_3 "step-3", but if
829 * you look at the code below, note how we first create a variable of type
830 * <code>Step4@<2@></code> (forcing the compiler to compile the class template
831 * with <code>dim</code> replaced by <code>2</code>) and run a 2d simulation,
832 * and then we do the whole thing over in 3d.
833 *
834
835 *
836 * In practice, this is probably not what you would do very frequently (you
837 * probably either want to solve a 2d problem, or one in 3d, but not both at
838 * the same time). However, it demonstrates the mechanism by which we can
839 * simply change which dimension we want in a single place, and thereby force
840 * the compiler to recompile the dimension independent class templates for the
841 * dimension we request. The emphasis here lies on the fact that we only need
842 * to change a single place. This makes it rather trivial to debug the program
843 * in 2d where computations are fast, and then switch a single place to a 3 to
844 * run the much more computing intensive program in 3d for `real'
845 * computations.
846 *
847
848 *
849 * Each of the two blocks is enclosed in braces to make sure that the
850 * <code>laplace_problem_2d</code> variable goes out of scope (and releases
851 * the memory it holds) before we move on to allocate memory for the 3d
852 * case. Without the additional braces, the <code>laplace_problem_2d</code>
853 * variable would only be destroyed at the end of the function, i.e. after
854 * running the 3d problem, and would needlessly hog memory while the 3d run
855 * could actually use it.
856 *
857 * @code
858 *   int main()
859 *   {
860 *   {
861 *   Step4<2> laplace_problem_2d;
862 *   laplace_problem_2d.run();
863 *   }
864 *  
865 *   {
866 *   Step4<3> laplace_problem_3d;
867 *   laplace_problem_3d.run();
868 *   }
869 *  
870 *   return 0;
871 *   }
872 * @endcode
873<a name="step_4-Results"></a><h1>Results</h1>
874
875
876
877The output of the program looks as follows (the number of iterations
878may vary by one or two, depending on your computer, since this is
879often dependent on the round-off accuracy of floating point
880operations, which differs between processors):
881@code
882Solving problem in 2 space dimensions.
883 Number of active cells: 256
884 Total number of cells: 341
885 Number of degrees of freedom: 289
886 19 CG iterations needed to obtain convergence.
887Solving problem in 3 space dimensions.
888 Number of active cells: 4096
889 Total number of cells: 4681
890 Number of degrees of freedom: 4913
891 20 CG iterations needed to obtain convergence.
892@endcode
893It is obvious that in three spatial dimensions the number of cells and
894therefore also the number of degrees of freedom is
895much higher. What cannot be seen here, is that besides this higher
896number of rows and columns in the matrix, there are also significantly
897more entries per row of the matrix in three space
898dimensions. Together, this leads to a much higher numerical effort for
899solving the system of equation, which you can feel in the run time of the two
900solution steps when you actually run the program.
901
902
903
904The program produces two files: <code>solution-2d.vtk</code> and
905<code>solution-3d.vtk</code>, which can be viewed using the programs
906VisIt or Paraview (in case you do not have these programs, you can easily
907change the
908output format in the program to something which you can view more
909easily). Visualizing solutions is a bit of an art, but it can also be fun, so
910you should play around with your favorite visualization tool to get familiar
911with its functionality. Here's what I have come up with for the 2d solution:
912
913<p align="center">
914 <img src="https://www.dealii.org/images/steps/developer/step-4.solution-2d.png" alt="">
915</p>
916
917(See also <a href="https://www.math.colostate.edu/~bangerth/videos.676.11.html">video lecture 11</a>, <a href="https://www.math.colostate.edu/~bangerth/videos.676.32.html">video lecture 32</a>.)
918The picture shows the solution of the problem under consideration as
919a 3D plot. As can be seen, the solution is almost flat in the interior
920of the domain and has a higher curvature near the boundary. This, of
921course, is due to the fact that for Laplace's equation the curvature
922of the solution is equal to the right hand side and that was chosen as
923a quartic polynomial which is nearly zero in the interior and is only
924rising sharply when approaching the boundaries of the domain; the
925maximal values of the right hand side function are at the corners of
926the domain, where also the solution is moving most rapidly.
927It is also nice to see that the solution follows the desired quadratic
928boundary values along the boundaries of the domain.
929It can also be useful to verify a computed solution against an analytical
930solution. For an explanation of this technique, see @ref step_7 "step-7".
931
932On the other hand, even though the picture does not show the mesh lines
933explicitly, you can see them as little kinks in the solution. This clearly
934indicates that the solution hasn't been computed to very high accuracy and
935that to get a better solution, we may have to compute on a finer mesh.
936
937In three spatial dimensions, visualization is a bit more difficult. The left
938picture shows the solution and the mesh it was computed on on the surface of
939the domain. This is nice, but it has the drawback that it completely hides
940what is happening on the inside. The picture on the right is an attempt at
941visualizing the interior as well, by showing surfaces where the solution has
942constant values (as indicated by the legend at the top left). Isosurface
943pictures look best if one makes the individual surfaces slightly transparent
944so that it is possible to see through them and see what's behind.
945
946<table width="60%" align="center">
947 <tr>
948 <td align="center">
949 <img src="https://www.dealii.org/images/steps/developer/step-4.solution-3d.png" alt="">
950 </td>
951 <td align="center">
952 <img src="https://www.dealii.org/images/steps/developer/step-4.contours-3d.png" alt="">
953 </td>
954 </tr>
955</table>
956
957@note
958A final remark on visualization: the idea of visualization is to give insight,
959which is not the same as displaying information. In particular, it is easy to
960overload a picture with information, but while it shows more information it
961makes it also more difficult to glean insight. As an example, the program I
962used to generate these pictures, VisIt, by default puts tick marks on every
963axis, puts a big fat label "X Axis" on the @f$x@f$ axis and similar for the other
964axes, shows the file name from which the data was taken in the top left and
965the name of the user doing so and the time and date on the bottom right. None
966of this is important
967here: the axes are equally easy to make out because the tripod at the bottom
968left is still visible, and we know from the program that the domain is
969@f$[-1,1]^3@f$, so there is no need for tick marks. As a consequence, I have
970switched off all the extraneous stuff in the picture: the art of visualization
971is to reduce the picture to those parts that are important to see what one
972wants to see, but no more.
973
974
975
976<a name="step-4-postprocessing"></a>
977<a name="step_4-PostprocessingWhattodowiththesolution"></a><h3> Postprocessing: What to do with the solution? </h3>
978
979
980This tutorial -- like most of the other programs -- principally only shows how
981to numerically approximate the solution of a partial differential equation, and
982then how to visualize this solution graphically. But
983solving a PDE is of course not the goal in most practical applications (unless
984you are a numerical methods developer and the *method* is the goal): We generally
985want to solve a PDE because we want to *extract information* from it. Examples
986for what people are interested in from solutions include the following:
987- Let's say you solve the equations of elasticity (which we will do in @ref step_8 "step-8"),
988 then that's presumably because you want to know about the deformation of an
989 elastic object under a given load. From an engineering perspective, what you
990 then presumably want to learn is the degree of deformation of the object,
991 say at a specific point; or you may want to know the maximum
992 [stress](https://en.wikipedia.org/wiki/Stress_(mechanics)) in order to
993 determine whether the applied load exceeds the safe maximal stress the
994 material can withstand.
995- If you are solving fluid flow problems (such as in @ref step_22 "step-22", @ref step_57 "step-57", @ref step_67 "step-67",
996 and @ref step_69 "step-69"), then you might be interested in the fluid velocity at specific
997 points, and oftentimes the forces the fluid exerts on the boundary of the
998 fluid domain. The latter is important in many applications: If the fluid
999 in question is the air flowing around an airplane, then we are typically
1000 interested in the drag and lift forces on the fuselage and wings. If the
1001 fluid is water flowing around a ship, then we typically care about
1002 the drag force on the ship.
1003- If you are solving the Maxwell equations of electromagnetics, you are
1004 typically interested in how much energy is radiated in certain directions
1005 (say, in order to know the range of information transmission via an
1006 antenna, or to determine the
1007 [radar cross section](https://en.wikipedia.org/wiki/Radar_cross_section)
1008 of planes or ships).
1009
1010The point here is that from an engineering perspective, *solving* the
1011PDE is only the first step. The second step is to *evaluate* the computed
1012solution in order to extract relevant numbers that allow us to
1013either *optimize a design*, or to *make decisions*. This second step is often
1014called "postprocessing the solution".
1015
1016This program does not solve a solid or fluid mechanics problem, so we
1017should try to illustrate postprocessing with something that makes sense
1018in the context of the equation we solve here. The Poisson equation in two
1019space dimensions is a model for the vertical deformation of a membrane
1020that is clamped at the boundary and is subject to a vertical force. For this
1021kind of situation, it makes sense to evaluate the *average vertical
1022displacement*,
1023@f[
1024 \bar u_h = \frac{\int_\Omega u_h(\mathbf x) \, dx}{|\Omega|},
1025@f]
1026where @f$|\Omega| = \int_\Omega 1 \, dx@f$ is the area of the domain. To compute
1027@f$\bar u_h@f$, as usual we replace integrals over the domain by a sum of integrals
1028over cells,
1029@f[
1030 \int_\Omega u_h(\mathbf x) \, dx
1031 =
1032 \sum_K \int_K u_h(\mathbf x) \, dx,
1033@f]
1034and then integrals over cells are approximated by quadrature:
1035@f{align*}{
1036 \int_\Omega u_h(\mathbf x) \, dx
1037 &=
1038 \sum_K \int_K u_h(\mathbf x) \, dx,
1039 \\
1040 &=
1041 \sum_K \sum_q u_h(\mathbf x_q^K) w_q^K,
1042@f}
1043where @f$w_q^K@f$ is the weight of the @f$q@f$th quadrature point evaluated on
1044cell @f$K@f$. All of this is as always provided by the FEValues class -- the
1045entry point for all integrals in deal.II.
1046
1047The actual implementation of this is straightforward once you know how
1048to get the values of the solution @f$u@f$ at the quadrature points of a cell.
1049This functionality is provided by FEValues::get_function_values(), a
1050function that takes a global vector of nodal values as input and returns
1051a vector of function values at the quadrature points of the current cell.
1052Using this function, to see how it all works together you can
1053place the following code snippet anywhere in the program after the
1054solution has been computed (the `output_results()` function seems like a good
1055place to also do postprocessing, for example):
1056@code
1057 QGauss<dim> quadrature_formula(fe.degree + 1);
1058 FEValues<dim> fe_values(fe,
1059 quadrature_formula,
1061
1062 std::vector<double> solution_values(quadrature_formula.size());
1063 double integral_of_u = 0;
1064 double volume_of_omega = 0;
1065
1066 for (const auto &cell : dof_handler.active_cell_iterators())
1067 {
1068 fe_values.reinit(cell);
1069 fe_values.get_function_values(solution, solution_values);
1070
1071 for (const unsigned int q_point : fe_values.quadrature_point_indices())
1072 {
1073 integral_of_u += solution_values[q_point] * fe_values.JxW(q_point);
1074 volume_of_omega += 1 * fe_values.JxW(q_point);
1075 }
1076 }
1077 std::cout << " Mean value of u=" << integral_of_u / volume_of_omega
1078 << std::endl;
1079@endcode
1080In this code snippet, we also compute the volume (or, since we are currently
1081thinking about a two-dimensional situation: the area) @f$|\Omega|@f$ by computing
1082the integral @f$|\Omega| = \int_\Omega 1 \, dx@f$ in exactly the same way, via
1083quadrature. (We could avoid having to compute @f$|\Omega|@f$ by hand here, using the
1084fact that deal.II has a function for this: GridTools::volume(). That said,
1085it is efficient to compute the two integrals
1086concurrently in the same loop, and so that's what we do.)
1087
1088This program of course also solves the same Poisson equation in three space
1089dimensions. In this situation, the Poisson equation is often used as a model
1090for diffusion of either a physical species (say, of ink in a tank of water,
1091or a pollutant in the air) or of energy (specifically, of thermal energy in
1092a solid body). In that context, the quantity
1093@f[
1094 \Phi_h = \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1095@f]
1096is the *flux* of this species or energy across the boundary. (In actual
1097physical models, one would also have to multiply the right hand side by
1098a diffusivity or conductivity constant, but let us ignore this here.) In
1099much the same way as before, we compute such integrals by splitting
1100it over integrals of *faces* of cells, and then applying quadrature:
1101@f{align*}{
1102 \Phi_h
1103 &=
1104 \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1105 \\
1106 &=
1107 \sum_K
1108 \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
1109 \int_f \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1110 \\
1111 &=
1112 \sum_K
1113 \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
1114 \sum_q \nabla u_h(\mathbf x_q^f) \cdot \mathbf n(\mathbf x_q^f) w_q^f,
1115@f}
1116where now @f$\mathbf x_q^f@f$ are the quadrature points located on face @f$f@f$,
1117and @f$w_q^f@f$ are the weights associated with these faces. The second
1118of the sum symbols loops over all faces of cell @f$K@f$, but restricted to
1119those that are actually at the boundary.
1120
1121This all is easily implemented by the following code that replaces the use of the
1122FEValues class (which is used for integrating over cells -- i.e., domain integrals)
1123by the FEFaceValues class (which is used for integrating over faces -- i.e.,
1124boundary integrals):
1125@code
1126 QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1127 FEFaceValues<dim> fe_face_values(fe,
1128 face_quadrature_formula,
1129 update_gradients | update_normal_vectors |
1130 update_JxW_values);
1131
1132 std::vector<Tensor<1, dim>> solution_gradients(face_quadrature_formula.size());
1133 double flux = 0;
1134
1135 for (const auto &cell : dof_handler.active_cell_iterators())
1136 for (const auto &face : cell->face_iterators())
1137 if (face->at_boundary())
1138 {
1139 fe_face_values.reinit(cell, face);
1140 fe_face_values.get_function_gradients(solution, solution_gradients);
1141
1142 for (const unsigned int q_point :
1143 fe_face_values.quadrature_point_indices())
1144 {
1145 flux += solution_gradients[q_point] *
1146 fe_face_values.normal_vector(q_point) *
1147 fe_face_values.JxW(q_point);
1148 }
1149 }
1150 std::cout << " Flux=" << flux << std::endl;
1151@endcode
1152
1153If you add these two code snippets to the code, you will get output like the
1154following when you run the program:
1155@code
1156Solving problem in 2 space dimensions.
1157 Number of active cells: 256
1158 Total number of cells: 341
1159 Number of degrees of freedom: 289
1160 26 CG iterations needed to obtain convergence.
1161 Mean value of u=1.33303
1162 Flux=-3.68956
1163Solving problem in 3 space dimensions.
1164 Number of active cells: 4096
1165 Total number of cells: 4681
1166 Number of degrees of freedom: 4913
1167 30 CG iterations needed to obtain convergence.
1168 Mean value of u=1.58058
1169 Flux=-8.29435
1170@endcode
1171
1172This makes some sense: If you look, for example, at the 2d output above,
1173the solution varies between values of 1 and 2, but with a larger part of the
1174solution closer to one than two; so an average value of 1.33 for the mean value
1175is reasonable. For the flux, recall that @f$\nabla u \cdot \mathbf n@f$ is the
1176directional derivative in the normal direction -- in other words, how the
1177solution changes as we move from the interior of the domain towards the
1178boundary. If you look at the 2d solution, you will realize that for most parts
1179of the boundary, the solution *decreases* as we approach the boundary, so the
1180normal derivative is negative -- so if we integrate along the boundary, we
1181should expect (and obtain!) a negative value.
1182
1183
1184
1185<a name="step-4-extensions"></a>
1186<a name="step_4-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1187
1188
1189There are many ways with which one can play with this program. The simpler
1190ones include essentially all the possibilities already discussed in the
1191<a href="step_3.html#extensions" target="body">Possibilities for extensions in the documentation of step 3</a>,
1192except that you will have to think about whether something now also applies
1193to the 3d case discussed in the current program.
1194
1195It is also worthwhile considering the postprocessing options discussed
1196above. The documentation states two numbers (the mean value and the
1197normal flux) for both the 2d and 3d cases. Can we trust these
1198numbers? We have convinced ourselves that at least the mean value
1199is reasonable, and that the sign of the flux is probably correct.
1200But are these numbers accurate?
1201
1202A general rule is that we should never trust a number unless we have
1203verified it in some way. From the theory of finite element methods,
1204we know that as we make the mesh finer and finer, the numerical
1205solution @f$u_h@f$ we compute here must converge to the exact solution
1206@f$u@f$. As a consequence, we also expect that @f$\bar u_h \rightarrow \bar u@f$
1207and @f$\Phi_h \rightarrow \Phi@f$, but that does not mean that for any
1208given mesh @f$\bar u_h@f$ or @f$\Phi_h@f$ are particularly accurate approximations.
1209
1210To test this kind of thing, we have already considered the convergence of
1211a point value in @ref step_3 "step-3". We can do the same here by selecting how many
1212times the mesh is globally refined in the `make_grid()` function of this
1213program. For the mean value of the solution, we then get the following
1214numbers:
1215 <table align="center" class="doxtable">
1216 <tr> <th># of refinements</th>
1217 <th>@f$\bar u_h@f$ in 2d</th>
1218 <th>@f$\bar u_h@f$ in 3d</th>
1219 </tr>
1220 <tr> <td>4</td> <td>1.33303</td> <td>1.58058</td> </tr>
1221 <tr> <td>5</td> <td>1.33276</td> <td>1.57947</td> </tr>
1222 <tr> <td>6</td> <td>1.3327</td> <td>1.5792</td> </tr>
1223 <tr> <td>7</td> <td>1.33269</td> <td>1.57914</td> </tr>
1224 <tr> <td>8</td> <td>1.33268</td> <td></td> </tr>
1225 <tr> <td>9</td> <td>1.33268</td> <td></td> </tr>
1226 </table>
1227I did not have the patience to run the last two values for the 3d case --
1228one needs quite a fine mesh for this, with correspondingly long run times.
1229But we can be reasonably assured that values around 1.33 (for the 2d case)
1230and 1.58 (for the 3d case) are about right -- and at least for engineering
1231applications, three digits of accuracy are good enough.
1232
1233The situation looks very different for the flux. Here, we get results
1234such as the following:
1235 <table align="center" class="doxtable">
1236 <tr> <th># of refinements</th>
1237 <th>@f$\Phi_h@f$ in 2d</th>
1238 <th>@f$\Phi_h@f$ in 3d</th>
1239 </tr>
1240 <tr> <td>4</td> <td>-3.68956</td> <td>-8.29435</td> </tr>
1241 <tr> <td>5</td> <td>-4.90147</td> <td>-13.0691</td> </tr>
1242 <tr> <td>6</td> <td>-5.60745</td> <td>-15.9171</td> </tr>
1243 <tr> <td>7</td> <td>-5.99111</td> <td>-17.4918</td> </tr>
1244 <tr> <td>8</td> <td>-6.19196</td> <td></td> </tr>
1245 <tr> <td>9</td> <td>-6.29497</td> <td></td> </tr>
1246 <tr> <td>10</td> <td>-6.34721</td> <td></td> </tr>
1247 <tr> <td>11</td> <td>-6.37353</td> <td></td> </tr>
1248 </table>
1249So this is not great. For the 2d case, we might infer that perhaps
1250a value around -6.4 might be right if we just refine the mesh enough --
1251though 11 refinements already leads to some 4,194,304 cells. In any
1252case, the first number (the one shown in the beginning where we
1253discussed postprocessing) was off by almost a factor of 2!
1254
1255For the 3d case, the last number shown was on a mesh with 2,097,152
1256cells; the next one would have had 8 times as many cells. In any case, the
1257numbers mean that we can't even be sure
1258that the first digit of that last number is correct! In other words,
1259it was worth checking, or we would have just believed all of these
1260numbers. In fact, that last column isn't even doing a particularly
1261good job convincing us that the code might be correctly implemented.
1262
1263If you keep reading through the other tutorial programs, you will find many ways
1264to make these sorts of computations more accurate and to come to
1265believe that the flux actually does converge to its correct value.
1266For example, we can dramatically increase the accuracy of the computation
1267by using adaptive mesh refinement (@ref step_6 "step-6") near the boundary, and
1268in particular by using higher polynomial degree finite elements (also
1269@ref step_6 "step-6", but also @ref step_7 "step-7"). Using the latter, using cubic elements
1270(polynomial degree 3), we can actually compute the flux pretty
1271accurately even in 3d: @f$\Phi_h=-19.0148@f$ with 4 global refinement steps,
1272and @f$\Phi_h=-19.1533@f$ with 5 refinement steps. These numbers are already
1273pretty close together and give us a reasonable idea of the first
1274two correct digits of the "true" answer.
1275
1276@note We would be remiss to not also comment on the fact that there
1277 are good theoretical reasons why computing the flux accurately
1278 appears to be so much more difficult than the average value.
1279 This has to do with the fact that finite element theory
1280 provides us with the estimate
1281 @f$\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}@f$
1282 when using the linear elements this program uses -- that is, for
1283 every global mesh refinement, @f$h@f$ is reduced by a factor of two
1284 and the error goes down by a factor of 4. Now, the @f$L_2@f$ error is
1285 not equivalent to the error in the mean value, but the two are
1286 related: They are both integrals over the domain, using the *value*
1287 of the solution. We expect the mean value to converge no worse than
1288 the @f$L_2@f$ norm of the error. At the same time, theory also provides
1289 us with this estimate:
1290 @f$\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
1291 C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}@f$. The move from values to
1292 gradients reduces the convergence rates by one order, and the move
1293 from domain to boundary by another half order. Here, then, each
1294 refinement step reduces the error not by a factor of 4 any more,
1295 by only by a factor of @f$\sqrt{2} \approx 1.4@f$. It takes a lot
1296 of global refinement steps to reduce the error by, say, a factor
1297 ten or hundred, and this is reflected in the very slow convergence
1298 evidenced by the table. On the other hand, for cubic elements (i.e.,
1299 polynomial degree 3), we would get
1300 @f$\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}@f$
1301 and after reduction by 1.5 orders, we would still have
1302 @f$\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
1303 C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}@f$. This rate,
1304 @f${\cal O}(h^{2.5})@f$ is still quite rapid, and it is perhaps not
1305 surprising that we get much better answers with these higher
1306 order elements. This also illustrates that when trying to
1307 approximate anything that relates to a gradient of the solution,
1308 using linear elements (polynomial degree one) is really not a
1309 good choice at all.
1310
1311@note In this very specific case, it turns out that we can actually
1312 compute the exact value of @f$\Phi@f$. This is because for the Poisson
1313 equation we compute the solution of here, @f$-\Delta u = f@f$, we can
1314 integrate over the domain, @f$-\int_\Omega \Delta u = \int_\Omega f@f$,
1315 and then use that @f$\Delta = \text{div}\;\text{grad}@f$; this allows
1316 us to use the
1317 divergence theorem followed by multiplying by minus one to find
1318 @f$\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f@f$. The
1319 left hand side happens to be @f$\Phi@f$. For the specific right
1320 hand side @f$f(x_1,x_2)=4(x_1^4+x_2^4)@f$ we use in 2d, we then
1321 get @f$-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
1322 = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25@f$,
1323 which has a numerical value of exactly -6.4 -- right on with our
1324 guess above. In 3d, we can do the same and get that the exact
1325 value is
1326 @f$-\int_\Omega f =
1327 -\int_{-1}^{1} \int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4+x_3^4) \; dx_3 \; dx_2\; dx_1
1328 = -48\times\frac 25=-19.2@f$. What we found with cubic elements
1329 is then quite close to this exact value. Of course, in practice
1330 we almost never have exact values to compare with: If we could
1331 compute something on a piece of paper, we wouldn't have to solve
1332 the PDE numerically. But these sorts of situations make for excellent
1333 test cases that help us verify that our numerical solver works
1334 correctly. In many other cases, the literature contains
1335 numbers where others have already computed an answer accurately
1336 using their own software, and these are also often useful to
1337 compare against in verifying the correctness of our codes.
1338 *
1339 *
1340<a name="step_4-PlainProg"></a>
1341<h1> The plain program</h1>
1342@include "step-4.cc"
1343*/
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
Definition point.h:111
constexpr numbers::NumberTraits< Number >::real_type square() const
Point< 3 > center
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
double volume(const Triangulation< dim, spacedim > &tria)
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Definition advection.h:74
void apply_boundary_values(const std::map< types::global_dof_index, number > &boundary_values, SparseMatrix< number > &matrix, Vector< number > &solution, Vector< number > &right_hand_side, const bool eliminate_columns=true)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)