405 *
double return_value = 0.0;
406 *
for (
unsigned int i = 0; i < dim; ++i)
407 * return_value += 4.0 *
std::pow(p(i), 4.0);
409 *
return return_value;
415 * As boundary
values, we choose @f$x^2+y^2@f$ in 2
d, and @f$x^2+y^2+z^2@f$ in 3
d. This
416 * happens to be
equal to the square of the vector from the origin to the
417 *
point at which we would like to evaluate the function, irrespective of the
418 * dimension. So that is what we
return:
422 *
double BoundaryValues<dim>::value(
const Point<dim> &p,
423 *
const unsigned int )
const
433 * <a name=
"ImplementationofthecodeStep4codeclass"></a>
434 * <h3>Implementation of the <code>Step4</code>
class</h3>
438 * Next
for the implementation of the
class template that makes use of the
439 *
functions above. As before, we will write everything as templates that have
440 * a formal parameter <code>dim</code> that we assume unknown at the time we
441 * define the
template functions. Only later, the compiler will find a
442 * declaration of <code>Step4@<2@></code> (in the <code>main</code> function,
443 * actually) and compile the entire
class with <code>dim</code> replaced by 2,
444 * a process referred to as `instantiation of a
template'. When doing so, it
445 * will also replace instances of <code>RightHandSide@<dim@></code> by
446 * <code>RightHandSide@<2@></code> and instantiate the latter class from the
451 * In fact, the compiler will also find a declaration <code>Step4@<3@></code>
452 * in <code>main()</code>. This will cause it to again go back to the general
453 * <code>Step4@<dim@></code> template, replace all occurrences of
454 * <code>dim</code>, this time by 3, and compile the class a second time. Note
455 * that the two instantiations <code>Step4@<2@></code> and
456 * <code>Step4@<3@></code> are completely independent classes; their only
457 * common feature is that they are both instantiated from the same general
458 * template, but they are not convertible into each other, for example, and
459 * share no code (both instantiations are compiled completely independently).
467 * <a name="Step4Step4"></a>
468 * <h4>Step4::Step4</h4>
472 * After this introduction, here is the constructor of the <code>Step4</code>
473 * class. It specifies the desired polynomial degree of the finite elements
474 * and associates the DoFHandler to the triangulation just as in the previous
475 * example program, @ref step_3 "step-3":
479 * Step4<dim>::Step4()
481 * , dof_handler(triangulation)
488 * <a name="Step4make_grid"></a>
489 * <h4>Step4::make_grid</h4>
493 * Grid creation is something inherently dimension dependent. However, as long
494 * as the domains are sufficiently similar in 2d or 3d, the library can
495 * abstract for you. In our case, we would like to again solve on the square
496 * @f$[-1,1]\times [-1,1]@f$ in 2d, or on the cube @f$[-1,1] \times [-1,1] \times
497 * [-1,1]@f$ in 3d; both can be termed GridGenerator::hyper_cube(), so we may
498 * use the same function in whatever dimension we are. Of course, the
499 * functions that create a hypercube in two and three dimensions are very much
500 * different, but that is something you need not care about. Let the library
501 * handle the difficult things.
505 * void Step4<dim>::make_grid()
507 * GridGenerator::hyper_cube(triangulation, -1, 1);
508 * triangulation.refine_global(4);
510 * std::cout << " Number of active cells: " << triangulation.n_active_cells()
512 * << " Total number of cells: " << triangulation.n_cells()
519 * <a name="Step4setup_system"></a>
520 * <h4>Step4::setup_system</h4>
524 * This function looks exactly like in the previous example, although it
525 * performs actions that in their details are quite different if
526 * <code>dim</code> happens to be 3. The only significant difference from a
527 * user's perspective is the number of cells resulting, which is much higher
528 * in three than in two space dimensions!
532 *
void Step4<dim>::setup_system()
534 * dof_handler.distribute_dofs(fe);
536 * std::cout <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
541 * sparsity_pattern.copy_from(dsp);
543 * system_matrix.reinit(sparsity_pattern);
545 * solution.reinit(dof_handler.n_dofs());
546 * system_rhs.reinit(dof_handler.n_dofs());
553 * <a name=
"Step4assemble_system"></a>
554 * <h4>Step4::assemble_system</h4>
558 * Unlike in the previous example, we would now like to use a non-
constant
559 * right hand side function and non-zero boundary
values. Both are tasks that
560 * are readily achieved with only a few
new lines of code in the assemblage of
561 * the
matrix and right hand side.
565 * More interesting, though, is the way we
assemble matrix and right hand side
566 * vector dimension independently: there is simply no difference to the
567 * two-dimensional
case. Since the important objects used in
this function
568 * (quadrature formula,
FEValues) depend on the dimension by way of a
template
569 * parameter as well, they can take care of setting up properly everything
for
570 * the dimension
for which
this function is compiled. By declaring all classes
571 * which might depend on the dimension
using a
template parameter, the library
572 * can make nearly all work
for you and you don
't have to care about most
577 * void Step4<dim>::assemble_system()
579 * QGauss<dim> quadrature_formula(fe.degree + 1);
583 * We wanted to have a non-constant right hand side, so we use an object of
584 * the class declared above to generate the necessary data. Since this right
585 * hand side object is only used locally in the present function, we declare
586 * it here as a local variable:
589 * RightHandSide<dim> right_hand_side;
593 * Compared to the previous example, in order to evaluate the non-constant
594 * right hand side function we now also need the quadrature points on the
595 * cell we are presently on (previously, we only required values and
596 * gradients of the shape function from the FEValues object, as well as the
597 * quadrature weights, FEValues::JxW() ). We can tell the FEValues object to
598 * do for us by also giving it the #update_quadrature_points flag:
601 * FEValues<dim> fe_values(fe,
602 * quadrature_formula,
603 * update_values | update_gradients |
604 * update_quadrature_points | update_JxW_values);
608 * We then again define the same abbreviation as in the previous program.
609 * The value of this variable of course depends on the dimension which we
610 * are presently using, but the FiniteElement class does all the necessary
611 * work for you and you don't have to care about the dimension dependent
615 * const unsigned
int dofs_per_cell = fe.n_dofs_per_cell();
620 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
624 * Next, we again have to
loop over all cells and
assemble local
625 * contributions. Note, that a cell is a quadrilateral in two space
626 * dimensions, but a hexahedron in 3
d. In fact, the
627 * <code>active_cell_iterator</code> data type is something different,
628 * depending on the dimension we are in, but to the
outside world they look
629 * alike and you will probably never see a difference. In any
case, the real
630 * type is hidden by
using `
auto`:
633 *
for (
const auto &cell : dof_handler.active_cell_iterators())
641 * Now we have to
assemble the local
matrix and right hand side. This is
642 * done exactly like in the previous example, but now we revert the
643 * order of the loops (which we can safely
do since they are independent
644 * of each other) and
merge the loops
for the local
matrix and the local
645 * vector as far as possible to make things a bit faster.
649 * Assembling the right hand side presents the only significant
650 * difference to how we did things in @ref step_3
"step-3": Instead of
using a
651 *
constant right hand side with
value 1, we use the
object representing
652 * the right hand side and evaluate it at the quadrature points:
655 *
for (
const unsigned int q_index : fe_values.quadrature_point_indices())
656 * for (const unsigned
int i : fe_values.dof_indices())
658 * for (const unsigned
int j : fe_values.dof_indices())
660 * (fe_values.shape_grad(i, q_index) *
661 * fe_values.shape_grad(j, q_index) *
662 * fe_values.JxW(q_index));
664 *
const auto &x_q = fe_values.quadrature_point(q_index);
665 * cell_rhs(i) += (fe_values.shape_value(i, q_index) *
666 * right_hand_side.value(x_q) *
667 * fe_values.JxW(q_index));
671 * As a
final remark to these loops: when we
assemble the local
672 * contributions into <code>
cell_matrix(i,j)</code>, we have to multiply
675 * multiply it with the
scalar weights JxW. This is what actually
676 * happens: <code>fe_values.shape_grad(i,q_index)</code> returns a
677 * <code>dim</code> dimensional vector, represented by a
678 * <code>
Tensor@<1,dim@></code> object, and the
operator* that
679 * multiplies it with the result of
680 * <code>fe_values.shape_grad(j,q_index)</code> makes sure that the
681 * <code>dim</code> components of the two vectors are properly
682 * contracted, and the result is a
scalar floating
point number that
683 * then is multiplied with the weights. Internally,
this operator* makes
684 * sure that
this happens correctly
for all <code>dim</code> components
685 * of the vectors, whether <code>dim</code> be 2, 3, or any other space
686 * dimension; from a user
's perspective, this is not something worth
687 * bothering with, however, making things a lot simpler if one wants to
688 * write code dimension independently.
692 * With the local systems assembled, the transfer into the global matrix
693 * and right hand side is done exactly as before, but here we have again
694 * merged some loops for efficiency:
697 * cell->get_dof_indices(local_dof_indices);
698 * for (const unsigned int i : fe_values.dof_indices())
700 * for (const unsigned int j : fe_values.dof_indices())
701 * system_matrix.add(local_dof_indices[i],
702 * local_dof_indices[j],
703 * cell_matrix(i, j));
705 * system_rhs(local_dof_indices[i]) += cell_rhs(i);
711 * As the final step in this function, we wanted to have non-homogeneous
712 * boundary values in this example, unlike the one before. This is a simple
713 * task, we only have to replace the Functions::ZeroFunction used there by an
714 * object of the class which describes the boundary values we would like to
715 * use (i.e. the <code>BoundaryValues</code> class declared above):
719 * The function VectorTools::interpolate_boundary_values() will only work
720 * on faces that have been marked with boundary indicator 0 (because that's
721 * what we say the function should work on with the
second argument below).
722 * If there are faces with boundary
id other than 0, then the function
723 * interpolate_boundary_values will
do nothing on these faces. For
724 * the Laplace equation doing nothing is equivalent to assuming that
725 * on those parts of the boundary a zero Neumann boundary condition holds.
728 * std::map<types::global_dof_index, double> boundary_values;
731 * BoundaryValues<dim>(),
743 * <a name=
"Step4solve"></a>
744 * <h4>Step4::solve</h4>
748 * Solving the linear system of equations is something that looks almost
749 * identical in most programs. In particular, it is dimension independent, so
750 *
this function is copied verbatim from the previous example.
754 *
void Step4<dim>::solve()
762 * We have made one addition, though: since we suppress output from the
763 * linear solvers, we have to print the number of iterations by hand.
766 * std::cout <<
" " << solver_control.last_step()
767 * <<
" CG iterations needed to obtain convergence." << std::endl;
774 * <a name=
"Step4output_results"></a>
775 * <h4>Step4::output_results</h4>
779 * This function also does what the respective one did in @ref step_3
"step-3". No changes
780 * here
for dimension independence either.
784 * Since the program will
run both 2
d and 3
d versions of the Laplace solver,
785 * we use the dimension in the filename to generate distinct filenames
for
786 * each
run (in a better program, one would check whether <code>dim</code> can
787 * have other values than 2 or 3, but we neglect
this here
for the sake of
792 *
void Step4<dim>::output_results() const
797 * data_out.add_data_vector(solution,
"solution");
799 * data_out.build_patches();
801 * std::ofstream output(dim == 2 ?
"solution-2d.vtk" :
"solution-3d.vtk");
802 * data_out.write_vtk(output);
810 * <a name=
"Step4run"></a>
811 * <h4>Step4::run</h4>
815 * This is the function which has the top-
level control over everything. Apart
816 * from one line of additional output, it is the same as
for the previous
821 *
void Step4<dim>::run()
823 * std::cout <<
"Solving problem in " << dim <<
" space dimensions."
837 * <a name=
"Thecodemaincodefunction"></a>
838 * <h3>The <code>main</code> function</h3>
842 * And
this is the main function. It also looks mostly like in @ref step_3
"step-3", but
if
843 * you look at the code below, note how we
first create a variable of type
844 * <code>Step4@<2@></code> (forcing the compiler to compile the
class template
845 * with <code>dim</code> replaced by <code>2</code>) and run a 2d simulation,
846 * and then we
do the whole thing over in 3
d.
850 * In practice,
this is probably not what you would
do very frequently (you
851 * probably either want to solve a 2d problem, or one in 3d, but not both at
852 * the same time). However, it demonstrates the mechanism by which we can
853 * simply change which dimension we want in a single place, and thereby force
854 * the compiler to recompile the dimension independent
class templates for the
855 * dimension we request. The emphasis here lies on the fact that we only need
856 * to change a single place. This makes it rather trivial to debug the program
857 * in 2
d where computations are fast, and then
switch a single place to a 3 to
858 *
run the much more computing intensive program in 3
d for `real
'
863 * Each of the two blocks is enclosed in braces to make sure that the
864 * <code>laplace_problem_2d</code> variable goes out of scope (and releases
865 * the memory it holds) before we move on to allocate memory for the 3d
866 * case. Without the additional braces, the <code>laplace_problem_2d</code>
867 * variable would only be destroyed at the end of the function, i.e. after
868 * running the 3d problem, and would needlessly hog memory while the 3d run
869 * could actually use it.
875 * Step4<2> laplace_problem_2d;
876 * laplace_problem_2d.run();
880 * Step4<3> laplace_problem_3d;
881 * laplace_problem_3d.run();
887<a name="Results"></a><h1>Results</h1>
891The output of the program looks as follows (the number of iterations
892may vary by one or two, depending on your computer, since this is
893often dependent on the round-off accuracy of floating point
894operations, which differs between processors):
896Solving problem in 2 space dimensions.
897 Number of active cells: 256
898 Total number of cells: 341
899 Number of degrees of freedom: 289
900 26 CG iterations needed to obtain convergence.
901Solving problem in 3 space dimensions.
902 Number of active cells: 4096
903 Total number of cells: 4681
904 Number of degrees of freedom: 4913
905 30 CG iterations needed to obtain convergence.
907It is obvious that in three spatial dimensions the number of cells and
908therefore also the number of degrees of freedom is
909much higher. What cannot be seen here, is that besides this higher
910number of rows and columns in the matrix, there are also significantly
911more entries per row of the matrix in three space
912dimensions. Together, this leads to a much higher numerical effort for
913solving the system of equation, which you can feel in the run time of the two
914solution steps when you actually run the program.
918The program produces two files: <code>solution-2d.vtk</code> and
919<code>solution-3d.vtk</code>, which can be viewed using the programs
920VisIt or Paraview (in case you do not have these programs, you can easily
922output format in the program to something which you can view more
923easily). Visualizing solutions is a bit of an art, but it can also be fun, so
924you should play around with your favorite visualization tool to get familiar
925with its functionality. Here's what I have come up with
for the 2
d solution:
928 <img src=
"https://www.dealii.org/images/steps/developer/step-4.solution-2d.png" alt=
"">
931(See also <a href=
"http://www.math.colostate.edu/~bangerth/videos.676.11.html">video lecture 11</a>, <a href=
"http://www.math.colostate.edu/~bangerth/videos.676.32.html">video lecture 32</a>.)
932The picture shows the solution of the problem under consideration as
933a 3D plot. As can be seen, the solution is almost flat in the interior
934of the domain and has a higher curvature near the boundary. This, of
935course, is due to the fact that for Laplace's equation the curvature
936of the solution is
equal to the right hand side and that was chosen as
937a quartic polynomial which is nearly zero in the interior and is only
938rising sharply when approaching the boundaries of the domain; the
939maximal values of the right hand side function are at the corners of
940the domain, where also the solution is moving most rapidly.
941It is also nice to see that the solution follows the desired quadratic
942boundary values along the boundaries of the domain.
943It can also be useful to verify a computed solution against an analytical
944solution. For an explanation of this technique, see @ref step_7 "step-7".
946On the other hand, even though the picture does not show the mesh lines
947explicitly, you can see them as little kinks in the solution. This clearly
948indicates that the solution hasn't been computed to very high accuracy and
949that to get a better solution, we may have to compute on a finer mesh.
951In three spatial dimensions, visualization is a bit more difficult. The left
952picture shows the solution and the mesh it was computed on on the surface of
953the domain. This is nice, but it has the drawback that it completely hides
954what is happening on the inside. The picture on the right is an attempt at
955visualizing the interior as well, by showing surfaces where the solution has
956constant values (as indicated by the legend at the top left). Isosurface
957pictures look best if one makes the individual surfaces slightly transparent
958so that it is possible to see through them and see what's behind.
960<table width="60%" align="
center">
972A final remark on visualization: the idea of visualization is to give insight,
973which is not the same as displaying information. In particular, it is easy to
974overload a picture with information, but while it shows more information it
975makes it also more difficult to glean insight. As an example, the program I
976used to generate these pictures, VisIt, by default puts tick marks on every
977axis, puts a big fat label "X Axis" on the @f$x@f$ axis and similar for the other
978axes, shows the file name from which the data was taken in the top left and
979the name of the user doing so and the time and date on the bottom right. None
981here: the axes are equally easy to make out because the tripod at the bottom
982left is still visible, and we know from the program that the domain is
983@f$[-1,1]^3@f$, so there is no need for tick marks. As a consequence, I have
984switched off all the extraneous stuff in the picture: the art of visualization
985is to reduce the picture to those parts that are important to see what one
986wants to see, but no more.
990<a name="postprocessing"></a>
991<a name="PostprocessingWhattodowiththesolution"></a><h3> Postprocessing: What to do with the solution? </h3>
994This tutorial -- like most of the other programs -- principally only shows how
995to numerically approximate the solution of a partial differential equation, and
996then how to visualize this solution graphically. But
997solving a PDE is of course not the goal in most practical applications (unless
998you are a numerical methods developer and the *method* is the goal): We generally
999want to solve a PDE because we want to *extract information* from it. Examples
1000for what people are interested in from solutions include the following:
1001- Let's say you solve the equations of elasticity (which we will do in @ref step_8 "step-8"),
1002 then that's presumably because you want to know about the deformation of an
1003 elastic
object under a given load. From an engineering perspective, what you
1004 then presumably want to learn is the degree of deformation of the
object,
1005 say at a specific point; or you may want to know the maximum
1007 determine whether the applied load exceeds the safe maximal stress the
1008 material can withstand.
1009- If you are solving fluid flow problems (such as in @ref step_22 "step-22", @ref step_57 "step-57", @ref step_67 "step-67",
1010 and @ref step_69 "step-69"), then you might be interested in the fluid velocity at specific
1011 points, and oftentimes the forces the fluid exerts on the boundary of the
1012 fluid domain. The latter is important in many applications: If the fluid
1013 in question is the air flowing around an airplane, then we are typically
1014 interested in the drag and lift forces on the fuselage and wings. If the
1015 fluid is water flowing around a ship, then we typically care about
1016 the drag force on the ship.
1017- If you are solving the Maxwell equations of electromagnetics, you are
1018 typically interested in how much energy is radiated in certain directions
1019 (say, in order to know the range of information transmission via an
1020 antenna, or to determine the
1021 [radar cross section](https:
1022 of planes or ships).
1024The point here is that from an engineering perspective, *solving* the
1025PDE is only the
first step. The
second step is to *evaluate* the computed
1026solution in order to extract relevant
numbers that allow us to
1027either *optimize a design*, or to *make decisions*. This
second step is often
1028called "postprocessing the solution".
1030This program does not solve a solid or fluid mechanics problem, so we
1031should try to illustrate postprocessing with something that makes sense
1032in the context of the equation we solve here. The Poisson equation in two
1033space dimensions is a model for the vertical deformation of a membrane
1034that is clamped at the boundary and is subject to a vertical force. For this
1035kind of situation, it makes sense to evaluate the *average vertical
1038 \bar u_h = \frac{\int_\Omega u_h(\mathbf x) \,
dx}{|\Omega|},
1040where @f$|\Omega| = \int_\Omega 1 \,
dx@f$ is the area of the domain. To compute
1041@f$\bar u_h@f$, as usual we replace integrals over the domain by a
sum of integrals
1044 \int_\Omega u_h(\mathbf x) \,
dx
1046 \sum_K \int_K u_h(\mathbf x) \,
dx,
1048and then integrals over cells are approximated by quadrature:
1050 \int_\Omega u_h(\mathbf x) \,
dx
1052 \sum_K \int_K u_h(\mathbf x) \,
dx,
1055 \sum_K \sum_q u_h(\mathbf x_q^K) w_q^
K,
1057where @f$w_q^
K@f$ is the weight of the @f$q@f$th quadrature
point evaluated on
1058cell @f$K@f$. All of
this is as
always provided by the
FEValues class -- the
1059entry
point for all integrals in deal.II.
1061The actual implementation of this is straightforward once you know how
1062to get the
values of the solution @f$u@f$ at the quadrature points of a cell.
1063This functionality is provided by
FEValues::get_function_values(), a
1064function that takes a global vector of nodal
values as input and returns
1065a vector of function
values at the quadrature points of the current cell.
1066Using this function, to see how it all works together you can
1067place the following code snippet anywhere in the program after the
1068solution has been computed (the `output_results()` function seems like a good
1069place to also do postprocessing, for example):
1076 std::vector<double> solution_values(quadrature_formula.size());
1077 double integral_of_u = 0;
1078 double volume_of_omega = 0;
1080 for (
const auto &cell : dof_handler.active_cell_iterators())
1082 fe_values.reinit(cell);
1083 fe_values.get_function_values(solution, solution_values);
1085 for (
const unsigned int q_point : fe_values.quadrature_point_indices())
1087 integral_of_u += solution_values[q_point] * fe_values.JxW(q_point);
1088 volume_of_omega += 1 * fe_values.JxW(q_point);
1091 std::cout <<
" Mean value of u=" << integral_of_u / volume_of_omega
1094In
this code snippet, we also compute the
volume (or, since we are currently
1095thinking about a two-dimensional situation: the area) @f$|\Omega|@f$ by computing
1096the integral @f$|\Omega| = \int_\Omega 1 \,
dx@f$ in exactly the same way, via
1097quadrature. (We could avoid having to compute @f$|\Omega|@f$ by hand here, using the
1098fact that deal.II has a function for this:
GridTools::
volume(). That said,
1099it is efficient to compute the two integrals
1100concurrently in the same
loop, and so that
's what we do.)
1102This program of course also solves the same Poisson equation in three space
1103dimensions. In this situation, the Poisson equation is often used as a model
1104for diffusion of either a physical species (say, of ink in a tank of water,
1105or a pollutant in the air) or of energy (specifically, of thermal energy in
1106a solid body). In that context, the quantity
1108 \Phi_h = \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1110is the *flux* of this species or energy across the boundary. (In actual
1111physical models, one would also have to multiply the right hand side by
1112a diffusivity or conductivity constant, but let us ignore this here.) In
1113much the same way as before, we compute such integrals by splitting
1114it over integrals of *faces* of cells, and then applying quadrature:
1118 \int_{\partial\Omega} \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1122 \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
1123 \int_f \nabla u_h(\mathbf x) \cdot \mathbf n(\mathbf x) \; dx
1127 \sum_{f \in \text{faces of @f$K@f$}, f\subset\partial\Omega}
1128 \sum_q \nabla u_h(\mathbf x_q^f) \cdot \mathbf n(\mathbf x_q^f) w_q^f,
1130where now @f$\mathbf x_q^f@f$ are the quadrature points located on face @f$f@f$,
1131and @f$w_q^f@f$ are the weights associated with these faces. The second
1132of the sum symbols loops over all faces of cell @f$K@f$, but restricted to
1133those that are actually at the boundary.
1135This all is easily implemented by the following code that replaces the use of the
1136FEValues class (which is used for integrating over cells -- i.e., domain integrals)
1137by the FEFaceValues class (which is used for integrating over faces -- i.e.,
1140 QGauss<dim - 1> face_quadrature_formula(fe.degree + 1);
1141 FEFaceValues<dim> fe_face_values(fe,
1142 face_quadrature_formula,
1143 update_gradients | update_normal_vectors |
1146 std::vector<Tensor<1, dim>> solution_gradients(face_quadrature_formula.size());
1149 for (const auto &cell : dof_handler.active_cell_iterators())
1150 for (const auto &face : cell->face_iterators())
1151 if (face->at_boundary())
1153 fe_face_values.reinit(cell, face);
1154 fe_face_values.get_function_gradients(solution, solution_gradients);
1156 for (const unsigned int q_point :
1157 fe_face_values.quadrature_point_indices())
1159 flux += solution_gradients[q_point] *
1160 fe_face_values.normal_vector(q_point) *
1161 fe_face_values.JxW(q_point);
1164 std::cout << " Flux=" << flux << std::endl;
1167If you add these two code snippets to the code, you will get output like the
1168following when you run the program:
1170Solving problem in 2 space dimensions.
1171 Number of active cells: 256
1172 Total number of cells: 341
1173 Number of degrees of freedom: 289
1174 26 CG iterations needed to obtain convergence.
1175 Mean value of u=1.33303
1177Solving problem in 3 space dimensions.
1178 Number of active cells: 4096
1179 Total number of cells: 4681
1180 Number of degrees of freedom: 4913
1181 30 CG iterations needed to obtain convergence.
1182 Mean value of u=1.58058
1186This makes some sense: If you look, for example, at the 2d output above,
1187the solution varies between values of 1 and 2, but with a larger part of the
1188solution closer to one than two; so an average value of 1.33 for the mean value
1189is reasonable. For the flux, recall that @f$\nabla u \cdot \mathbf n@f$ is the
1190directional derivative in the normal direction -- in other words, how the
1191solution changes as we move from the interior of the domain towards the
1192boundary. If you look at the 2d solution, you will realize that for most parts
1193of the boundary, the solution *decreases* as we approach the boundary, so the
1194normal derivative is negative -- so if we integrate along the boundary, we
1195should expect (and obtain!) a negative value.
1199<a name="extensions"></a>
1200<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1203There are many ways with which one can play with this program. The simpler
1204ones include essentially all the possibilities already discussed in the
1205<a href="step_3.html#extensions" target="body">Possibilities for extensions in the documentation of step 3</a>,
1206except that you will have to think about whether something now also applies
1207to the 3d case discussed in the current program.
1209It is also worthwhile considering the postprocessing options discussed
1210above. The documentation states two numbers (the mean value and the
1211normal flux) for both the 2d and 3d cases. Can we trust these
1212numbers? We have convinced ourselves that at least the mean value
1213is reasonable, and that the sign of the flux is probably correct.
1214But are these numbers accurate?
1216A general rule is that we should never trust a number unless we have
1217verified it in some way. From the theory of finite element methods,
1218we know that as we make the mesh finer and finer, the numerical
1219solution @f$u_h@f$ we compute here must converge to the exact solution
1220@f$u@f$. As a consequence, we also expect that @f$\bar u_h \rightarrow \bar u@f$
1221and @f$\Phi_h \rightarrow \Phi@f$, but that does not mean that for any
1222given mesh @f$\bar u_h@f$ or @f$\Phi_h@f$ are particularly accurate approximations.
1224To test this kind of thing, we have already considered the convergence of
1225a point value in @ref step_3 "step-3". We can do the same here by selecting how many
1226times the mesh is globally refined in the `make_grid()` function of this
1227program. For the mean value of the solution, we then get the following
1229 <table align="center" class="doxtable">
1230 <tr> <th># of refinements</th>
1231 <th>@f$\bar u_h@f$ in 2d</th>
1232 <th>@f$\bar u_h@f$ in 3d</th>
1234 <tr> <td>4</td> <td>1.33303</td> <td>1.58058</td> </tr>
1235 <tr> <td>5</td> <td>1.33276</td> <td>1.57947</td> </tr>
1236 <tr> <td>6</td> <td>1.3327</td> <td>1.5792</td> </tr>
1237 <tr> <td>7</td> <td>1.33269</td> <td>1.57914</td> </tr>
1238 <tr> <td>8</td> <td>1.33268</td> <td></td> </tr>
1239 <tr> <td>9</td> <td>1.33268</td> <td></td> </tr>
1241I did not have the patience to run the last two values for the 3d case --
1242one needs quite a fine mesh for this, with correspondingly long run times.
1243But we can be reasonably assured that values around 1.33 (for the 2d case)
1244and 1.58 (for the 3d case) are about right -- and at least for engineering
1245applications, three digits of accuracy are good enough.
1247The situation looks very different for the flux. Here, we get results
1248such as the following:
1249 <table align="center" class="doxtable">
1250 <tr> <th># of refinements</th>
1251 <th>@f$\Phi_h@f$ in 2d</th>
1252 <th>@f$\Phi_h@f$ in 3d</th>
1254 <tr> <td>4</td> <td>-3.68956</td> <td>-8.29435</td> </tr>
1255 <tr> <td>5</td> <td>-4.90147</td> <td>-13.0691</td> </tr>
1256 <tr> <td>6</td> <td>-5.60745</td> <td>-15.9171</td> </tr>
1257 <tr> <td>7</td> <td>-5.99111</td> <td>-17.4918</td> </tr>
1258 <tr> <td>8</td> <td>-6.19196</td> <td></td> </tr>
1259 <tr> <td>9</td> <td>-6.29497</td> <td></td> </tr>
1260 <tr> <td>10</td> <td>-6.34721</td> <td></td> </tr>
1261 <tr> <td>11</td> <td>-6.37353</td> <td></td> </tr>
1263So this is not great. For the 2d case, we might infer that perhaps
1264a value around -6.4 might be right if we just refine the mesh enough --
1265though 11 refinements already leads to some 4,194,304 cells. In any
1266case, the first number (the one shown in the beginning where we
1267discussed postprocessing) was off by almost a factor of 2!
1269For the 3d case, the last number shown was on a mesh with 2,097,152
1270cells; the next one would have had 8 times as many cells. In any case, the
1271numbers mean that we can't even be sure
1272that the
first digit of that last number is correct! In other words,
1273it was worth checking, or we would have just believed all of these
1274numbers. In fact, that last column isn
't even doing a particularly
1275good job convincing us that the code might be correctly implemented.
1277If you keep reading through the other tutorial programs, you will find many ways
1278to make these sorts of computations more accurate and to come to
1279believe that the flux actually does converge to its correct value.
1280For example, we can dramatically increase the accuracy of the computation
1281by using adaptive mesh refinement (@ref step_6 "step-6") near the boundary, and
1282in particular by using higher polynomial degree finite elements (also
1283@ref step_6 "step-6", but also @ref step_7 "step-7"). Using the latter, using cubic elements
1284(polynomial degree 3), we can actually compute the flux pretty
1285accurately even in 3d: @f$\Phi_h=-19.0148@f$ with 4 global refinement steps,
1286and @f$\Phi_h=-19.1533@f$ with 5 refinement steps. These numbers are already
1287pretty close together and give us a reasonable idea of the first
1288two correct digits of the "true" answer.
1290@note We would be remiss to not also comment on the fact that there
1291 are good theoretical reasons why computing the flux accurately
1292 appears to be so much more difficult than the average value.
1293 This has to do with the fact that finite element theory
1294 provides us with the estimate
1295 @f$\|u-u_h\|_{L_2(\Omega)} \le C h^2 \|\nabla^2u\|_{L_2(\Omega)}@f$
1296 when using the linear elements this program uses -- that is, for
1297 every global mesh refinement, @f$h@f$ is reduced by a factor of two
1298 and the error goes down by a factor of 4. Now, the @f$L_2@f$ error is
1299 not equivalent to the error in the mean value, but the two are
1300 related: They are both integrals over the domain, using the *value*
1301 of the solution. We expect the mean value to converge no worse than
1302 the @f$L_2@f$ norm of the error. At the same time, theory also provides
1303 us with this estimate:
1304 @f$\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
1305 C h^{1/2} \|\nabla^2u\|_{L_2(\Omega)}@f$. The move from values to
1306 gradients reduces the convergence rates by one order, and the move
1307 from domain to boundary by another half order. Here, then, each
1308 refinement step reduces the error not by a factor of 4 any more,
1309 by only by a factor of @f$\sqrt{2} \approx 1.4@f$. It takes a lot
1310 of global refinement steps to reduce the error by, say, a factor
1311 ten or hundred, and this is reflected in the very slow convergence
1312 evidenced by the table. On the other hand, for cubic elements (i.e.,
1313 polynomial degree 3), we would get
1314 @f$\|u-u_h\|_{L_2(\Omega)} \le C h^4 \|\nabla^4u\|_{L_2(\Omega)}@f$
1315 and after reduction by 1.5 orders, we would still have
1316 @f$\|\nabla (u-u_h)\|_{L_2(\partial\Omega)} \le
1317 C h^{2+1/2} \|\nabla^4u\|_{L_2(\Omega)}@f$. This rate,
1318 @f${\cal O}(h^{2.5})@f$ is still quite rapid, and it is perhaps not
1319 surprising that we get much better answers with these higher
1320 order elements. This also illustrates that when trying to
1321 approximate anything that relates to a gradient of the solution,
1322 using linear elements (polynomial degree one) is really not a
1325@note In this very specific case, it turns out that we can actually
1326 compute the exact value of @f$\Phi@f$. This is because for the Poisson
1327 equation we compute the solution of here, @f$-\Delta u = f@f$, we can
1328 integrate over the domain, @f$-\int_\Omega \Delta u = \int_\Omega f@f$,
1329 and then use that @f$\Delta = \text{div}\;\text{grad}@f$; this allows
1331 divergence theorem followed by multiplying by minus one to find
1332 @f$\int_{\partial\Omega} \nabla u \cdot n = -\int_\Omega f@f$. The
1333 left hand side happens to be @f$\Phi@f$. For the specific right
1334 hand side @f$f(x_1,x_2)=4(x_1^4+x_2^4)@f$ we use in 2d, we then
1335 get @f$-\int_\Omega f = -\int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4) \; dx_2\; dx_1
1336 = -16 \left[\int_{-1}^{1} x^4 \; dx\right] = -16\times\frac 25@f$,
1337 which has a numerical value of exactly -6.4 -- right on with our
1338 guess above. In 3d, we can do the same and get that the exact
1341 -\int_{-1}^{1} \int_{-1}^{1} \int_{-1}^{1} 4(x_1^4+x_2^4+x_3^4) \; dx_3 \; dx_2\; dx_1
1342 = -48\times\frac 25=-19.2@f$. What we found with cubic elements
1343 is then quite close to this exact value. Of course, in practice
1344 we almost never have exact values to compare with: If we could
1345 compute something on a piece of paper, we wouldn't have to solve
1346 the PDE numerically. But these sorts of situations make for excellent
1347 test cases that help us verify that our numerical solver works
1348 correctly. In many other cases, the literature contains
1349 numbers where others have already computed an answer accurately
1350 using their own software, and these are also often useful to
1351 compare against in verifying the correctness of our codes.
1354<a name=
"PlainProg"></a>
1355<h1> The plain program</h1>
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
numbers::NumberTraits< Number >::real_type square() const
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
@ update_values
Shape function values.
@ update_JxW_values
Transformed quadrature weights.
CGAL::Exact_predicates_exact_constructions_kernel_with_sqrt K
@ matrix
Contents is actually a matrix.
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double > > &velocity, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
T sum(const T &t, const MPI_Comm mpi_communicator)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
DEAL_II_HOST constexpr TableIndices< 2 > merge(const TableIndices< 2 > &previous_indices, const unsigned int new_index, const unsigned int position)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void reinit(MatrixBlock< MatrixType > &v, const BlockSparsityPattern &p)
::VectorizedArray< Number, width > pow(const ::VectorizedArray< Number, width > &, const Number p)