679 *
const unsigned int component)
const
682 *
Assert(component == 0, ExcIndexRange(component, 0, 1));
691 * <a name=
"step_26-ThecodeHeatEquationcodeimplementation"></a>
692 * <h3>The <code>HeatEquation</code> implementation</h3>
696 * It is time now
for the implementation of the main
class. Let
's
697 * start with the constructor which selects a linear element, a time
698 * step constant at 1/500 (remember that one period of the source
699 * on the right hand side was set to 0.2 above, so we resolve each
700 * period with 100 time steps) and chooses the Crank Nicolson method
701 * by setting @f$\theta=1/2@f$.
705 * HeatEquation<dim>::HeatEquation()
707 * , dof_handler(triangulation)
708 * , time_step(1. / 500)
717 * <a name="step_26-codeHeatEquationsetup_systemcode"></a>
718 * <h4><code>HeatEquation::setup_system</code></h4>
722 * The next function is the one that sets up the DoFHandler object,
723 * computes the constraints, and sets the linear algebra objects
724 * to their correct sizes. We also compute the mass and Laplace
725 * matrix here by simply calling two functions in the library.
729 * Note that we do not take the hanging node constraints into account when
730 * assembling the matrices (both functions have an AffineConstraints argument
731 * that defaults to an empty object). This is because we are going to
732 * condense the constraints in run() after combining the matrices for the
737 * void HeatEquation<dim>::setup_system()
739 * dof_handler.distribute_dofs(fe);
741 * std::cout << std::endl
742 * << "===========================================" << std::endl
743 * << "Number of active cells: " << triangulation.n_active_cells()
745 * << "Number of degrees of freedom: " << dof_handler.n_dofs()
749 * constraints.clear();
750 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
751 * constraints.close();
753 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
754 * DoFTools::make_sparsity_pattern(dof_handler,
757 * /*keep_constrained_dofs = */ true);
758 * sparsity_pattern.copy_from(dsp);
760 * mass_matrix.reinit(sparsity_pattern);
761 * laplace_matrix.reinit(sparsity_pattern);
762 * system_matrix.reinit(sparsity_pattern);
764 * MatrixCreator::create_mass_matrix(dof_handler,
765 * QGauss<dim>(fe.degree + 1),
767 * MatrixCreator::create_laplace_matrix(dof_handler,
768 * QGauss<dim>(fe.degree + 1),
771 * solution.reinit(dof_handler.n_dofs());
772 * old_solution.reinit(dof_handler.n_dofs());
773 * system_rhs.reinit(dof_handler.n_dofs());
780 * <a name="step_26-codeHeatEquationsolve_time_stepcode"></a>
781 * <h4><code>HeatEquation::solve_time_step</code></h4>
785 * The next function is the one that solves the actual linear system
786 * for a single time step. There is nothing surprising here:
790 * void HeatEquation<dim>::solve_time_step()
792 * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
793 * SolverCG<Vector<double>> cg(solver_control);
795 * PreconditionSSOR<SparseMatrix<double>> preconditioner;
796 * preconditioner.initialize(system_matrix, 1.0);
798 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
800 * constraints.distribute(solution);
802 * std::cout << " " << solver_control.last_step() << " CG iterations."
811 * <a name="step_26-codeHeatEquationoutput_resultscode"></a>
812 * <h4><code>HeatEquation::output_results</code></h4>
816 * Neither is there anything new in generating graphical output other than the
817 * fact that we tell the DataOut object what the current time and time step
818 * number is, so that this can be written into the output file :
822 * void HeatEquation<dim>::output_results() const
824 * DataOut<dim> data_out;
826 * data_out.attach_dof_handler(dof_handler);
827 * data_out.add_data_vector(solution, "U");
829 * data_out.build_patches();
831 * data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
833 * const std::string filename =
834 * "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtk";
835 * std::ofstream output(filename);
836 * data_out.write_vtk(output);
843 * <a name="step_26-codeHeatEquationrefine_meshcode"></a>
844 * <h4><code>HeatEquation::refine_mesh</code></h4>
848 * This function is the interesting part of the program. It takes care of
849 * the adaptive mesh refinement. The three tasks
850 * this function performs is to first find out which cells to
851 * refine/coarsen, then to actually do the refinement and eventually
852 * transfer the solution vectors between the two different grids. The first
853 * task is simply achieved by using the well-established Kelly error
854 * estimator on the solution. The second task is to actually do the
855 * remeshing. That involves only basic functions as well, such as the
856 * <code>refine_and_coarsen_fixed_fraction</code> that refines those cells
857 * with the largest estimated error that together make up 60 per cent of the
858 * error, and coarsens those cells with the smallest error that make up for
859 * a combined 40 per cent of the error. Note that for problems such as the
860 * current one where the areas where something is going on are shifting
861 * around, we want to aggressively coarsen so that we can move cells
862 * around to where it is necessary.
866 * As already discussed in the introduction, too small a mesh leads to
867 * too small a time step, whereas too large a mesh leads to too little
868 * resolution. Consequently, after the first two steps, we have two
869 * loops that limit refinement and coarsening to an allowable range of
874 * void HeatEquation<dim>::refine_mesh(const unsigned int min_grid_level,
875 * const unsigned int max_grid_level)
877 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
879 * KellyErrorEstimator<dim>::estimate(
881 * QGauss<dim - 1>(fe.degree + 1),
882 * std::map<types::boundary_id, const Function<dim> *>(),
884 * estimated_error_per_cell);
886 * GridRefinement::refine_and_coarsen_fixed_fraction(triangulation,
887 * estimated_error_per_cell,
891 * if (triangulation.n_levels() > max_grid_level)
892 * for (const auto &cell :
893 * triangulation.active_cell_iterators_on_level(max_grid_level))
894 * cell->clear_refine_flag();
895 * for (const auto &cell :
896 * triangulation.active_cell_iterators_on_level(min_grid_level))
897 * cell->clear_coarsen_flag();
900 * These two loops above are slightly different but this is easily
901 * explained. In the first loop, instead of calling
902 * <code>triangulation.end()</code> we may as well have called
903 * <code>triangulation.end_active(max_grid_level)</code>. The two
904 * calls should yield the same iterator since iterators are sorted
905 * by level and there should not be any cells on levels higher than
906 * on level <code>max_grid_level</code>. In fact, this very piece
907 * of code makes sure that this is the case.
911 * As part of mesh refinement we need to transfer the solution vectors
912 * from the old mesh to the new one. To this end we use the
913 * SolutionTransfer class and we have to prepare the solution vectors that
914 * should be transferred to the new grid (we will lose the old grid once
915 * we have done the refinement so the transfer has to happen concurrently
916 * with refinement). At the point where we call this function, we will
917 * have just computed the solution, so we no longer need the old_solution
918 * variable (it will be overwritten by the solution just after the mesh
919 * may have been refined, i.e., at the end of the time step; see below).
920 * In other words, we only need the one solution vector, and we copy it
921 * to a temporary object where it is safe from being reset when we further
922 * down below call <code>setup_system()</code>.
926 * Consequently, we initialize a SolutionTransfer object by attaching
927 * it to the old DoF handler. We then prepare the triangulation and the
928 * data vector for refinement (in this order).
931 * SolutionTransfer<dim> solution_trans(dof_handler);
933 * Vector<double> previous_solution;
934 * previous_solution = solution;
935 * triangulation.prepare_coarsening_and_refinement();
936 * solution_trans.prepare_for_coarsening_and_refinement(previous_solution);
940 * Now everything is ready, so do the refinement and recreate the DoF
941 * structure on the new grid, and finally initialize the matrix structures
942 * and the new vectors in the <code>setup_system</code> function. Next, we
943 * actually perform the interpolation of the solution from old to new
944 * grid. The final step is to apply the hanging node constraints to the
945 * solution vector, i.e., to make sure that the values of degrees of
946 * freedom located on hanging nodes are so that the solution is
947 * continuous. This is necessary since SolutionTransfer only operates on
948 * cells locally, without regard to the neighborhood.
951 * triangulation.execute_coarsening_and_refinement();
954 * solution_trans.interpolate(previous_solution, solution);
955 * constraints.distribute(solution);
963 * <a name="step_26-codeHeatEquationruncode"></a>
964 * <h4><code>HeatEquation::run</code></h4>
968 * This is the main driver of the program, where we loop over all
969 * time steps. At the top of the function, we set the number of
970 * initial global mesh refinements and the number of initial cycles of
971 * adaptive mesh refinement by repeating the first time step a few
972 * times. Then we create a mesh, initialize the various objects we will
973 * work with, set a label for where we should start when re-running
974 * the first time step, and interpolate the initial solution onto
975 * out mesh (we choose the zero function here, which of course we could
976 * do in a simpler way by just setting the solution vector to zero). We
977 * also output the initial time step once.
981 * @note If you're an experienced programmer, you may be surprised
982 * that we use a <code>
goto</code> statement in
this piece of code!
983 * <code>
goto</code> statements are not particularly well liked any
984 * more since Edsgar Dijkstra, one of the greats of computer science,
985 * wrote a letter in 1968 called
"Go To Statement considered harmful"
986 * (see <a href=
"http://en.wikipedia.org/wiki/Considered_harmful">here</a>).
987 * The author of
this code subscribes to
this notion whole-heartedly:
988 * <code>goto</code> is hard to understand. In fact, deal.II contains
989 * virtually no occurrences: excluding code that was essentially
990 * transcribed from books and not counting duplicated code pieces,
991 * there are 3 locations in about 600,000 lines of code at the time
992 * this note is written; we also use it in 4 tutorial programs, in
993 * exactly the same context as here. Instead of trying to justify
994 * the occurrence here, let
's first look at the code and we'll come
995 * back to the issue at the
end of function.
999 *
void HeatEquation<dim>::run()
1001 *
const unsigned int initial_global_refinement = 2;
1002 *
const unsigned int n_adaptive_pre_refinement_steps = 4;
1009 *
unsigned int pre_refinement_step = 0;
1014 * start_time_iteration:
1017 * timestep_number = 0;
1019 * tmp.
reinit(solution.size());
1020 * forcing_terms.reinit(solution.size());
1026 * solution = old_solution;
1032 * Then we start the main
loop until the computed time exceeds our
1033 *
end time of 0.5. The
first task is to build the right hand
1034 * side of the linear system we need to solve in each time step.
1035 * Recall that it contains the term @f$MU^{n-1}-(1-\theta)k_n AU^{n-1}@f$.
1036 * We put these terms into the variable system_rhs, with the
1037 * help of a temporary vector:
1040 *
const double end_time = 0.5;
1041 *
while (time <= end_time)
1043 * time += time_step;
1044 * ++timestep_number;
1046 * std::cout <<
"Time step " << timestep_number <<
" at t=" << time
1051 * laplace_matrix.vmult(tmp, old_solution);
1052 * system_rhs.add(-(1 - theta) * time_step, tmp);
1056 * The
second piece is to compute the contributions of the source
1057 * terms. This corresponds to the term @f$k_n
1058 * \left[ (1-\theta)F^{n-1} + \theta
F^n \right]@f$. The following
1060 * vectors @f$F@f$, where we
set the time of the right hand side
1061 * (source) function before we evaluate it. The result of
this
1062 * all ends up in the forcing_terms variable:
1065 * RightHandSide<dim> rhs_function;
1066 * rhs_function.set_time(time);
1071 * forcing_terms = tmp;
1072 * forcing_terms *= time_step * theta;
1074 * rhs_function.set_time(time - time_step);
1080 * forcing_terms.add(time_step * (1 - theta), tmp);
1084 * Next, we add the forcing terms to the ones that
1085 * come from the time stepping, and also build the
matrix
1086 * @f$M+k_n\theta A@f$ that we have to
invert in each time step.
1087 * The
final piece of these operations is to eliminate
1088 * hanging node constrained degrees of freedom from the
1092 * system_rhs += forcing_terms;
1094 * system_matrix.copy_from(mass_matrix);
1095 * system_matrix.add(theta * time_step, laplace_matrix);
1097 * constraints.condense(system_matrix, system_rhs);
1101 * There is one more operation we need to
do before we
1102 * can solve it: boundary values. To
this end, we create
1103 * a boundary
value object,
set the proper time to the one
1104 * of the current time step, and evaluate it as we have
1105 * done many times before. The result is used to also
1106 *
set the correct boundary values in the linear system:
1110 * BoundaryValues<dim> boundary_values_function;
1111 * boundary_values_function.set_time(time);
1113 * std::map<types::global_dof_index, double> boundary_values;
1116 * boundary_values_function,
1127 * With
this out of the way, all we have to
do is solve the
1128 * system, generate graphical data, and...
1131 * solve_time_step();
1137 * ...take care of mesh refinement. Here, what we want to
do is
1138 * (i) refine the requested number of times at the very beginning
1139 * of the solution procedure, after which we jump to the top to
1140 * restart the time iteration, (ii)
refine every fifth time
1145 * The time
loop and, indeed, the main part of the program ends
1146 * with starting into the next time step by setting old_solution
1147 * to the solution we have just computed.
1150 *
if ((timestep_number == 1) &&
1151 * (pre_refinement_step < n_adaptive_pre_refinement_steps))
1153 * refine_mesh(initial_global_refinement,
1154 * initial_global_refinement +
1155 * n_adaptive_pre_refinement_steps);
1156 * ++pre_refinement_step;
1158 * std::cout << std::endl;
1160 *
goto start_time_iteration;
1162 *
else if ((timestep_number > 0) && (timestep_number % 5 == 0))
1164 * refine_mesh(initial_global_refinement,
1165 * initial_global_refinement +
1166 * n_adaptive_pre_refinement_steps);
1167 * tmp.reinit(solution.size());
1168 * forcing_terms.reinit(solution.size());
1171 * old_solution = solution;
1177 * Now that you have seen what the function does, let us come back to the issue
1178 * of the <code>
goto</code>. In essence, what the code does is
1179 * something like
this:
1180 * <div
class=CodeFragmentInTutorialComment>
1185 * start_time_iteration:
1186 *
for (timestep=1...)
1189 *
if (timestep==1 && not happy with the result)
1191 * adjust some data structures;
1192 *
goto start_time_iteration;
1199 * Here, the condition
"happy with the result" is whether we
'd like to keep
1200 * the current mesh or would rather refine the mesh and start over on the
1201 * new mesh. We could of course replace the use of the <code>goto</code>
1203 * <div class=CodeFragmentInTutorialComment>
1211 * if (not happy with the result)
1212 * adjust some data structures;
1219 * for (timestep=2...)
1227 * This has the advantage of getting rid of the <code>goto</code>
1228 * but the disadvantage of having to duplicate the code that implements
1229 * the "solve timestep" and "postprocess" operations in two different
1230 * places. This could be countered by putting these parts of the code
1231 * (sizable chunks in the actual implementation above) into their
1232 * own functions, but a <code>while(true)</code> loop with a
1233 * <code>break</code> statement is not really all that much easier
1234 * to read or understand than a <code>goto</code>.
1238 * In the end, one might simply agree that <i>in general</i>
1239 * <code>goto</code> statements are a bad idea but be pragmatic and
1240 * state that there may be occasions where they can help avoid code
1241 * duplication and awkward control flow. This may be one of these
1242 * places, and it matches the position Steve McConnell takes in his
1243 * excellent book "Code Complete" @cite CodeComplete about good
1244 * programming practices (see the mention of this book in the
1245 * introduction of @ref step_1 "step-1") that spends a surprising ten pages on the
1246 * question of <code>goto</code> in general.
1254 * <a name="step_26-Thecodemaincodefunction"></a>
1255 * <h3>The <code>main</code> function</h3>
1259 * Having made it this far, there is, again, nothing
1260 * much to discuss for the main function of this
1261 * program: it looks like all such functions since @ref step_6 "step-6".
1268 * using namespace Step26;
1270 * HeatEquation<2> heat_equation_solver;
1271 * heat_equation_solver.run();
1273 * catch (std::exception &exc)
1275 * std::cerr << std::endl
1277 * << "----------------------------------------------------"
1279 * std::cerr << "Exception on processing: " << std::endl
1280 * << exc.what() << std::endl
1281 * << "Aborting!" << std::endl
1282 * << "----------------------------------------------------"
1289 * std::cerr << std::endl
1291 * << "----------------------------------------------------"
1293 * std::cerr << "Unknown exception!" << std::endl
1294 * << "Aborting!" << std::endl
1295 * << "----------------------------------------------------"
1303<a name="step_26-Results"></a><h1>Results</h1>
1306As in many of the tutorials, the actual output of the program matters less
1307than how we arrived there. Nonetheless, here it is:
1309===========================================
1310Number of active cells: 48
1311Number of degrees of freedom: 65
1313Time step 1 at t=0.002
1316===========================================
1317Number of active cells: 60
1318Number of degrees of freedom: 81
1321Time step 1 at t=0.002
1324===========================================
1325Number of active cells: 105
1326Number of degrees of freedom: 136
1329Time step 1 at t=0.002
1334Time step 249 at t=0.498
1336Time step 250 at t=0.5
1339===========================================
1340Number of active cells: 1803
1341Number of degrees of freedom: 2109
1344Maybe of more interest is a visualization of the solution and the mesh on which
1347<img src="https://www.dealii.org/images/steps/developer/step-26.movie.gif" alt="Animation of the solution of step 26.">
1349The movie shows how the two sources switch on and off and how the mesh reacts
1350to this. It is quite obvious that the mesh as is is probably not the best we
1351could come up with. We'll get back to
this in the next section.
1354<a name=
"step-26-extensions"></a>
1355<a name=
"step_26-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1358There are at least two areas where one can improve
this program significantly:
1359adaptive time stepping and a better choice of the mesh.
1361<a name=
"step_26-Adaptivetimestepping"></a><h4>Adaptive time stepping</h4>
1364Having chosen an implicit time stepping scheme, we are not bound by any
1365CFL-like condition on the time step. Furthermore, because the time scales on
1366which change happens on a given cell in the heat equation are not bound to the
1367cells
diameter (unlike the
case with the wave equation, where we had a fixed
1368speed of information transport that couples the temporal and spatial scales),
1369we can choose the time step as we please. Or, better, choose it as we deem
1370necessary
for accuracy.
1372Looking at the solution, it is clear that the action does not happen uniformly
1373over time: a lot is changing around the times when we
switch on a source, things
1374become less dramatic once a source is on
for a little
while, and we enter a
1375long phase of decline when both sources are off. During these times, we could
1376surely get away with a larger time step than before without sacrificing too
1379The literature has many suggestions on how to choose the time step size
1380adaptively. Much can be learned,
for example, from the way ODE solvers choose
1381their time steps. One can also be inspired by a posteriori error estimators
1382that can, ideally, be written in a way that they consist of a temporal and a
1383spatial contribution to the overall error. If the temporal one is too large,
1384we should choose a smaller time step. Ideas in
this direction can be found,
1385for example, in the PhD thesis of a former principal developer of deal.II,
1386Ralf Hartmann, published by the University of Heidelberg, Germany, in 2002
1390<a name=
"step_26-Bettertimesteppingmethods"></a><h4>Better time stepping methods</h4>
1393We here use one of the simpler time stepping methods, namely the
second order
1394in time Crank-Nicolson method. This is surely already better than the even
1395more widely used (and even less accurate) implicit (=backward) Euler method,
1396but many other, more accurate methods such as BDF or
1397Runge-Kutta methods are available and should be used as they
do not represent
1398much additional effort. It is not difficult to implement
this for the current
1399program,
if one wanted; a more systematic treatment is also given in @ref step_52
"step-52".
1401As a
general rule, however, one should not be implementing time stepping
1402methods by hand, as we
do here,
for problems that
do not require
1403exploiting special properties of the equation and consequently require
1404specialized time stepping methods. (The heat equation does not fall into
1405this category, and
"standard" time stepping methods are all we need here.)
1406Rather, one should use one of the available
1407high-quality libraries
for time stepping,
for the same reasons as one should
1408not be implementing finite element methods by hand but use deal.II instead.
1409Indeed, deal.II has interfaces to two such time stepping library,
1411[
PETSc TS package](https:
1413point for the use of much better (and much more accurate) time steppers,
1415the methods one would then get also implement the automatic time step
1416control mentioned above. To see how this works, take a look at
1417@ref step_86
"step-86".
1420<a name=
"step_26-Betterrefinementcriteria"></a><h4>Better refinement criteria</h4>
1423If you look at the meshes in the movie above, it is clear that they are not
1424particularly well suited to the task at hand. In fact, they look rather
1427There are two factors at play. First, there are some islands where cells
1428have been refined but that are surrounded by non-refined cells (and there
1429are probably also a few occasional coarsened islands). These are not terrible,
1430as they most of the time
do not affect the approximation quality of the mesh,
1431but they also don
't help because so many of their additional degrees of
1432freedom are in fact constrained by hanging node constraints. That said,
1433this is easy to fix: the Triangulation class takes an argument to its
1434constructor indicating a level of "mesh smoothing". Passing one of many
1435possible flags, this instructs the triangulation to refine some additional
1436cells, or not to refine some cells, so that the resulting mesh does not have
1439The second problem is more severe: the mesh appears to lag the solution.
1440The underlying reason is that we only adapt the mesh once every fifth
1441time step, and only allow for a single refinement in these cases. Whenever a
1442source switches on, the solution had been very smooth in this area before and
1443the mesh was consequently rather coarse. This implies that the next time step
1444when we refine the mesh, we will get one refinement level more in this area,
1445and five time steps later another level, etc. But this is not enough: first,
1446we should refine immediately when a source switches on (after all, in the
1447current context we at least know what the right hand side is), and we should
1448allow for more than one refinement level. Of course, all of this can be done
1449using deal.II, it just requires a bit of algorithmic thinking in how to make
1453<a name="step_26-Positivitypreservation"></a><h4>Positivity preservation</h4>
1456To increase the accuracy and resolution of your simulation in time, one
1457typically decreases the time step size @f$k_n@f$. If you start playing around
1458with the time step in this particular example, you will notice that the
1459solution becomes partly negative, if @f$k_n@f$ is below a certain threshold.
1460This is not what we would expect to happen (in nature).
1462To get an idea of this behavior mathematically, let us consider a general,
1463fully discrete problem:
1465 A u^{n} = B u^{n-1}.
1467The general form of the @f$i@f$th equation then reads:
1469 a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
1470 \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
1472where @f$S_i@f$ is the set of degrees of freedom that DoF @f$i@f$ couples with (i.e.,
1473for which either the matrix @f$A@f$ or matrix @f$B@f$ has a nonzero entry at position
1474@f$(i,j)@f$). If all coefficients
1475fulfill the following conditions:
1477 a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
1481all solutions @f$u^{n}@f$ keep their sign from the previous ones @f$u^{n-1}@f$, and
1482consequently from the initial values @f$u^0@f$. See e.g.
1483<a href="http://bookstore.siam.org/cs14/">Kuzmin, Hämäläinen</a>
1484for more information on positivity preservation.
1486Depending on the PDE to solve and the time integration scheme used, one is
1487able to deduce conditions for the time step @f$k_n@f$. For the heat equation with
1488the Crank-Nicolson scheme,
1489<a href="https://doi.org/10.2478/cmam-2010-0025">Schatz et. al.</a> have
1490translated it to the following ones:
1492 (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
1494 \theta k \left| a_{ij} \right| &\geq m_{ij},\qquad j \neq i,
1496where @f$M = m_{ij}@f$ denotes the @ref GlossMassMatrix "mass matrix" and @f$A = a_{ij}@f$ the stiffness
1497matrix with @f$a_{ij} \leq 0@f$ for @f$j \neq i@f$, respectively. With
1498@f$a_{ij} \leq 0@f$, we can formulate bounds for the global time step @f$k@f$ as
1501 k_{\text{max}} &= \frac{ 1 }{ 1 - \theta }
1502 \min\left( \frac{ m_{ii} }{ a_{ii} } \right),~ \forall i,
1504 k_{\text{min}} &= \frac{ 1 }{ \theta }
1505 \max\left( \frac{ m_{ij} }{ \left|a_{ij}\right| } \right),~ j \neq i.
1507In other words, the time step is constrained by <i>both a lower
1508and upper bound</i> in case of a Crank-Nicolson scheme. These bounds should be
1509considered along with the CFL condition to ensure significance of the performed
1512Being unable to make the time step as small as we want to get more
1513accuracy without losing the positivity property is annoying. It raises
1514the question of whether we can at least <i>compute</i> the minimal time step
1515we can choose to ensure positivity preservation in this particular tutorial.
1517the SparseMatrix objects for both mass and stiffness that are created via
1518the MatrixCreator functions. Iterating through each entry via SparseMatrixIterators
1519lets us check for diagonal and off-diagonal entries to set a proper time step
1520dynamically. For quadratic matrices, the diagonal element is stored as the
1521first member of a row (see SparseMatrix documentation). An exemplary code
1522snippet on how to grab the entries of interest from the <code>mass_matrix</code>
1526Assert (mass_matrix.m() == mass_matrix.n(), ExcNotQuadratic());
1527const unsigned int num_rows = mass_matrix.m();
1528double mass_matrix_min_diag = std::numeric_limits<double>::max(),
1529 mass_matrix_max_offdiag = 0.;
1531SparseMatrixIterators::Iterator<double,true> row_it (&mass_matrix, 0);
1533for(unsigned int m = 0; m<num_rows; ++m)
1535 // check the diagonal element
1536 row_it = mass_matrix.begin(m);
1537 mass_matrix_min_diag = std::min(row_it->value(), mass_matrix_min_diag);
1540 // check the off-diagonal elements
1541 for(; row_it != mass_matrix.end(m); ++row_it)
1542 mass_matrix_max_offdiag = std::max(row_it->value(), mass_matrix_max_offdiag);
1546Using the information so computed, we can bound the time step via the formulas
1550<a name="step_26-PlainProg"></a>
1551<h1> The plain program</h1>
1552@include "step-26.cc"
void refine_global(const unsigned int times=1)
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void random(DoFHandler< dim, spacedim > &dof_handler)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)