681 *
const unsigned int component)
const
684 *
Assert(component == 0, ExcIndexRange(component, 0, 1));
693 * <a name=
"step_26-ThecodeHeatEquationcodeimplementation"></a>
694 * <h3>The <code>HeatEquation</code> implementation</h3>
698 * It is time now
for the implementation of the main
class. Let
's
699 * start with the constructor which selects a linear element, a time
700 * step constant at 1/500 (remember that one period of the source
701 * on the right hand side was set to 0.2 above, so we resolve each
702 * period with 100 time steps) and chooses the Crank Nicolson method
703 * by setting @f$\theta=1/2@f$.
707 * HeatEquation<dim>::HeatEquation()
709 * , dof_handler(triangulation)
710 * , time_step(1. / 500)
719 * <a name="step_26-codeHeatEquationsetup_systemcode"></a>
720 * <h4><code>HeatEquation::setup_system</code></h4>
724 * The next function is the one that sets up the DoFHandler object,
725 * computes the constraints, and sets the linear algebra objects
726 * to their correct sizes. We also compute the mass and Laplace
727 * matrix here by simply calling two functions in the library.
731 * Note that we do not take the hanging node constraints into account when
732 * assembling the matrices (both functions have an AffineConstraints argument
733 * that defaults to an empty object). This is because we are going to
734 * condense the constraints in run() after combining the matrices for the
739 * void HeatEquation<dim>::setup_system()
741 * dof_handler.distribute_dofs(fe);
743 * std::cout << std::endl
744 * << "===========================================" << std::endl
745 * << "Number of active cells: " << triangulation.n_active_cells()
747 * << "Number of degrees of freedom: " << dof_handler.n_dofs()
751 * constraints.clear();
752 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
753 * constraints.close();
755 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
756 * DoFTools::make_sparsity_pattern(dof_handler,
759 * /*keep_constrained_dofs = */ true);
760 * sparsity_pattern.copy_from(dsp);
762 * mass_matrix.reinit(sparsity_pattern);
763 * laplace_matrix.reinit(sparsity_pattern);
764 * system_matrix.reinit(sparsity_pattern);
766 * MatrixCreator::create_mass_matrix(dof_handler,
767 * QGauss<dim>(fe.degree + 1),
769 * MatrixCreator::create_laplace_matrix(dof_handler,
770 * QGauss<dim>(fe.degree + 1),
773 * solution.reinit(dof_handler.n_dofs());
774 * old_solution.reinit(dof_handler.n_dofs());
775 * system_rhs.reinit(dof_handler.n_dofs());
782 * <a name="step_26-codeHeatEquationsolve_time_stepcode"></a>
783 * <h4><code>HeatEquation::solve_time_step</code></h4>
787 * The next function is the one that solves the actual linear system
788 * for a single time step. There is nothing surprising here:
792 * void HeatEquation<dim>::solve_time_step()
794 * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
795 * SolverCG<Vector<double>> cg(solver_control);
797 * PreconditionSSOR<SparseMatrix<double>> preconditioner;
798 * preconditioner.initialize(system_matrix, 1.0);
800 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
802 * constraints.distribute(solution);
804 * std::cout << " " << solver_control.last_step() << " CG iterations."
813 * <a name="step_26-codeHeatEquationoutput_resultscode"></a>
814 * <h4><code>HeatEquation::output_results</code></h4>
818 * Neither is there anything new in generating graphical output other than the
819 * fact that we tell the DataOut object what the current time and time step
820 * number is, so that this can be written into the output file :
824 * void HeatEquation<dim>::output_results() const
826 * DataOut<dim> data_out;
828 * data_out.attach_dof_handler(dof_handler);
829 * data_out.add_data_vector(solution, "U");
831 * data_out.build_patches();
833 * data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
835 * const std::string filename =
836 * "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtk";
837 * std::ofstream output(filename);
838 * data_out.write_vtk(output);
845 * <a name="step_26-codeHeatEquationrefine_meshcode"></a>
846 * <h4><code>HeatEquation::refine_mesh</code></h4>
850 * This function is the interesting part of the program. It takes care of
851 * the adaptive mesh refinement. The three tasks
852 * this function performs is to first find out which cells to
853 * refine/coarsen, then to actually do the refinement and eventually
854 * transfer the solution vectors between the two different grids. The first
855 * task is simply achieved by using the well-established Kelly error
856 * estimator on the solution. The second task is to actually do the
857 * remeshing. That involves only basic functions as well, such as the
858 * <code>refine_and_coarsen_fixed_fraction</code> that refines those cells
859 * with the largest estimated error that together make up 60 per cent of the
860 * error, and coarsens those cells with the smallest error that make up for
861 * a combined 40 per cent of the error. Note that for problems such as the
862 * current one where the areas where something is going on are shifting
863 * around, we want to aggressively coarsen so that we can move cells
864 * around to where it is necessary.
868 * As already discussed in the introduction, too small a mesh leads to
869 * too small a time step, whereas too large a mesh leads to too little
870 * resolution. Consequently, after the first two steps, we have two
871 * loops that limit refinement and coarsening to an allowable range of
876 * void HeatEquation<dim>::refine_mesh(const unsigned int min_grid_level,
877 * const unsigned int max_grid_level)
879 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
881 * KellyErrorEstimator<dim>::estimate(
883 * QGauss<dim - 1>(fe.degree + 1),
884 * std::map<types::boundary_id, const Function<dim> *>(),
886 * estimated_error_per_cell);
888 * GridRefinement::refine_and_coarsen_fixed_fraction(triangulation,
889 * estimated_error_per_cell,
893 * if (triangulation.n_levels() > max_grid_level)
894 * for (const auto &cell :
895 * triangulation.active_cell_iterators_on_level(max_grid_level))
896 * cell->clear_refine_flag();
897 * for (const auto &cell :
898 * triangulation.active_cell_iterators_on_level(min_grid_level))
899 * cell->clear_coarsen_flag();
902 * These two loops above are slightly different but this is easily
903 * explained. In the first loop, instead of calling
904 * <code>triangulation.end()</code> we may as well have called
905 * <code>triangulation.end_active(max_grid_level)</code>. The two
906 * calls should yield the same iterator since iterators are sorted
907 * by level and there should not be any cells on levels higher than
908 * on level <code>max_grid_level</code>. In fact, this very piece
909 * of code makes sure that this is the case.
913 * As part of mesh refinement we need to transfer the solution vectors
914 * from the old mesh to the new one. To this end we use the
915 * SolutionTransfer class and we have to prepare the solution vectors that
916 * should be transferred to the new grid (we will lose the old grid once
917 * we have done the refinement so the transfer has to happen concurrently
918 * with refinement). At the point where we call this function, we will
919 * have just computed the solution, so we no longer need the old_solution
920 * variable (it will be overwritten by the solution just after the mesh
921 * may have been refined, i.e., at the end of the time step; see below).
922 * In other words, we only need the one solution vector, and we copy it
923 * to a temporary object where it is safe from being reset when we further
924 * down below call <code>setup_system()</code>.
928 * Consequently, we initialize a SolutionTransfer object by attaching
929 * it to the old DoF handler. We then prepare the triangulation and the
930 * data vector for refinement (in this order).
933 * SolutionTransfer<dim> solution_trans(dof_handler);
935 * Vector<double> previous_solution;
936 * previous_solution = solution;
937 * triangulation.prepare_coarsening_and_refinement();
938 * solution_trans.prepare_for_coarsening_and_refinement(previous_solution);
942 * Now everything is ready, so do the refinement and recreate the DoF
943 * structure on the new grid, and finally initialize the matrix structures
944 * and the new vectors in the <code>setup_system</code> function. Next, we
945 * actually perform the interpolation of the solution from old to new
946 * grid. The final step is to apply the hanging node constraints to the
947 * solution vector, i.e., to make sure that the values of degrees of
948 * freedom located on hanging nodes are so that the solution is
949 * continuous. This is necessary since SolutionTransfer only operates on
950 * cells locally, without regard to the neighborhood.
953 * triangulation.execute_coarsening_and_refinement();
956 * solution_trans.interpolate(solution);
957 * constraints.distribute(solution);
965 * <a name="step_26-codeHeatEquationruncode"></a>
966 * <h4><code>HeatEquation::run</code></h4>
970 * This is the main driver of the program, where we loop over all
971 * time steps. At the top of the function, we set the number of
972 * initial global mesh refinements and the number of initial cycles of
973 * adaptive mesh refinement by repeating the first time step a few
974 * times. Then we create a mesh, initialize the various objects we will
975 * work with, set a label for where we should start when re-running
976 * the first time step, and interpolate the initial solution onto
977 * out mesh (we choose the zero function here, which of course we could
978 * do in a simpler way by just setting the solution vector to zero). We
979 * also output the initial time step once.
983 * @note If you're an experienced programmer, you may be surprised
984 * that we use a <code>
goto</code> statement in
this piece of code!
985 * <code>
goto</code> statements are not particularly well liked any
986 * more since Edsgar Dijkstra, one of the greats of computer science,
987 * wrote a letter in 1968 called
"Go To Statement considered harmful"
988 * (see <a href=
"http://en.wikipedia.org/wiki/Considered_harmful">here</a>).
989 * The author of
this code subscribes to
this notion whole-heartedly:
990 * <code>goto</code> is hard to understand. In fact, deal.II contains
991 * virtually no occurrences: excluding code that was essentially
992 * transcribed from books and not counting duplicated code pieces,
993 * there are 3 locations in about 600,000 lines of code at the time
994 * this note is written; we also use it in 4 tutorial programs, in
995 * exactly the same context as here. Instead of trying to justify
996 * the occurrence here, let
's first look at the code and we'll come
997 * back to the issue at the
end of function.
1000 *
template <
int dim>
1001 *
void HeatEquation<dim>::run()
1003 *
const unsigned int initial_global_refinement = 2;
1004 *
const unsigned int n_adaptive_pre_refinement_steps = 4;
1011 *
unsigned int pre_refinement_step = 0;
1016 * start_time_iteration:
1019 * timestep_number = 0;
1021 * tmp.
reinit(solution.size());
1022 * forcing_terms.reinit(solution.size());
1028 * solution = old_solution;
1034 * Then we start the main
loop until the computed time exceeds our
1035 *
end time of 0.5. The
first task is to build the right hand
1036 * side of the linear system we need to solve in each time step.
1037 * Recall that it contains the term @f$MU^{n-1}-(1-\theta)k_n AU^{n-1}@f$.
1038 * We put these terms into the variable system_rhs, with the
1039 * help of a temporary vector:
1042 *
const double end_time = 0.5;
1043 *
while (time <= end_time)
1045 * time += time_step;
1046 * ++timestep_number;
1048 * std::cout <<
"Time step " << timestep_number <<
" at t=" << time
1053 * laplace_matrix.vmult(tmp, old_solution);
1054 * system_rhs.add(-(1 - theta) * time_step, tmp);
1058 * The
second piece is to compute the contributions of the source
1059 * terms. This corresponds to the term @f$k_n
1060 * \left[ (1-\theta)F^{n-1} + \theta
F^n \right]@f$. The following
1062 * vectors @f$F@f$, where we set the time of the right hand side
1063 * (source) function before we evaluate it. The result of
this
1064 * all ends up in the forcing_terms variable:
1067 * RightHandSide<dim> rhs_function;
1068 * rhs_function.set_time(time);
1073 * forcing_terms = tmp;
1074 * forcing_terms *= time_step * theta;
1076 * rhs_function.set_time(time - time_step);
1082 * forcing_terms.add(time_step * (1 - theta), tmp);
1086 * Next, we add the forcing terms to the ones that
1087 * come from the time stepping, and also build the
matrix
1088 * @f$M+k_n\theta A@f$ that we have to
invert in each time step.
1089 * The
final piece of these operations is to eliminate
1090 * hanging node constrained degrees of freedom from the
1094 * system_rhs += forcing_terms;
1096 * system_matrix.copy_from(mass_matrix);
1097 * system_matrix.add(theta * time_step, laplace_matrix);
1099 * constraints.condense(system_matrix, system_rhs);
1103 * There is one more operation we need to
do before we
1104 * can solve it: boundary
values. To
this end, we create
1105 * a boundary
value object, set the proper time to the one
1106 * of the current time step, and evaluate it as we have
1107 * done many times before. The result is used to also
1108 * set the correct boundary
values in the linear system:
1112 * BoundaryValues<dim> boundary_values_function;
1113 * boundary_values_function.set_time(time);
1115 * std::map<types::global_dof_index, double> boundary_values;
1118 * boundary_values_function,
1129 * With
this out of the way, all we have to
do is solve the
1130 * system, generate graphical
data, and...
1133 * solve_time_step();
1139 * ...take care of mesh refinement. Here, what we want to
do is
1140 * (i) refine the requested number of times at the very beginning
1141 * of the solution procedure, after which we jump to the top to
1142 * restart the time iteration, (ii)
refine every fifth time
1147 * The time
loop and, indeed, the main part of the program ends
1148 * with starting into the next time step by setting old_solution
1149 * to the solution we have just computed.
1152 *
if ((timestep_number == 1) &&
1153 * (pre_refinement_step < n_adaptive_pre_refinement_steps))
1155 * refine_mesh(initial_global_refinement,
1156 * initial_global_refinement +
1157 * n_adaptive_pre_refinement_steps);
1158 * ++pre_refinement_step;
1160 * std::cout << std::endl;
1162 *
goto start_time_iteration;
1164 *
else if ((timestep_number > 0) && (timestep_number % 5 == 0))
1166 * refine_mesh(initial_global_refinement,
1167 * initial_global_refinement +
1168 * n_adaptive_pre_refinement_steps);
1169 * tmp.reinit(solution.size());
1170 * forcing_terms.reinit(solution.size());
1173 * old_solution = solution;
1179 * Now that you have seen what the function does, let us come back to the issue
1180 * of the <code>
goto</code>. In essence, what the code does is
1181 * something like
this:
1182 * <div
class=CodeFragmentInTutorialComment>
1187 * start_time_iteration:
1188 *
for (timestep=1...)
1191 *
if (timestep==1 && not happy with the result)
1193 * adjust some
data structures;
1194 *
goto start_time_iteration;
1201 * Here, the condition
"happy with the result" is whether we
'd like to keep
1202 * the current mesh or would rather refine the mesh and start over on the
1203 * new mesh. We could of course replace the use of the <code>goto</code>
1205 * <div class=CodeFragmentInTutorialComment>
1213 * if (not happy with the result)
1214 * adjust some data structures;
1221 * for (timestep=2...)
1229 * This has the advantage of getting rid of the <code>goto</code>
1230 * but the disadvantage of having to duplicate the code that implements
1231 * the "solve timestep" and "postprocess" operations in two different
1232 * places. This could be countered by putting these parts of the code
1233 * (sizable chunks in the actual implementation above) into their
1234 * own functions, but a <code>while(true)</code> loop with a
1235 * <code>break</code> statement is not really all that much easier
1236 * to read or understand than a <code>goto</code>.
1240 * In the end, one might simply agree that <i>in general</i>
1241 * <code>goto</code> statements are a bad idea but be pragmatic and
1242 * state that there may be occasions where they can help avoid code
1243 * duplication and awkward control flow. This may be one of these
1244 * places, and it matches the position Steve McConnell takes in his
1245 * excellent book "Code Complete" @cite CodeComplete about good
1246 * programming practices (see the mention of this book in the
1247 * introduction of @ref step_1 "step-1") that spends a surprising ten pages on the
1248 * question of <code>goto</code> in general.
1256 * <a name="step_26-Thecodemaincodefunction"></a>
1257 * <h3>The <code>main</code> function</h3>
1261 * Having made it this far, there is, again, nothing
1262 * much to discuss for the main function of this
1263 * program: it looks like all such functions since @ref step_6 "step-6".
1270 * using namespace Step26;
1272 * HeatEquation<2> heat_equation_solver;
1273 * heat_equation_solver.run();
1275 * catch (std::exception &exc)
1277 * std::cerr << std::endl
1279 * << "----------------------------------------------------"
1281 * std::cerr << "Exception on processing: " << std::endl
1282 * << exc.what() << std::endl
1283 * << "Aborting!" << std::endl
1284 * << "----------------------------------------------------"
1291 * std::cerr << std::endl
1293 * << "----------------------------------------------------"
1295 * std::cerr << "Unknown exception!" << std::endl
1296 * << "Aborting!" << std::endl
1297 * << "----------------------------------------------------"
1305<a name="step_26-Results"></a><h1>Results</h1>
1308As in many of the tutorials, the actual output of the program matters less
1309than how we arrived there. Nonetheless, here it is:
1311===========================================
1312Number of active cells: 48
1313Number of degrees of freedom: 65
1315Time step 1 at t=0.002
1318===========================================
1319Number of active cells: 60
1320Number of degrees of freedom: 81
1323Time step 1 at t=0.002
1326===========================================
1327Number of active cells: 105
1328Number of degrees of freedom: 136
1331Time step 1 at t=0.002
1336Time step 249 at t=0.498
1338Time step 250 at t=0.5
1341===========================================
1342Number of active cells: 1803
1343Number of degrees of freedom: 2109
1346Maybe of more interest is a visualization of the solution and the mesh on which
1349<img src="https://www.dealii.org/images/steps/developer/step-26.movie.gif" alt="Animation of the solution of step 26.">
1351The movie shows how the two sources switch on and off and how the mesh reacts
1352to this. It is quite obvious that the mesh as is is probably not the best we
1353could come up with. We'll get back to
this in the next section.
1356<a name=
"step-26-extensions"></a>
1357<a name=
"step_26-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1360There are at least two areas where one can improve
this program significantly:
1361adaptive time stepping and a better choice of the mesh.
1363<a name=
"step_26-Adaptivetimestepping"></a><h4>Adaptive time stepping</h4>
1366Having chosen an implicit time stepping scheme, we are not bound by any
1367CFL-like condition on the time step. Furthermore, because the time scales on
1368which change happens on a given cell in the heat equation are not bound to the
1369cells
diameter (unlike the
case with the wave equation, where we had a fixed
1370speed of information transport that couples the temporal and spatial scales),
1371we can choose the time step as we please. Or, better, choose it as we deem
1372necessary
for accuracy.
1374Looking at the solution, it is clear that the action does not happen uniformly
1375over time: a lot is changing around the times when we
switch on a source, things
1376become less dramatic once a source is on
for a little
while, and we enter a
1377long phase of decline when both sources are off. During these times, we could
1378surely get away with a larger time step than before without sacrificing too
1381The literature has many suggestions on how to choose the time step
size
1382adaptively. Much can be learned,
for example, from the way ODE solvers choose
1383their time steps. One can also be inspired by a posteriori error estimators
1384that can, ideally, be written in a way that they consist of a temporal and a
1385spatial contribution to the overall error. If the temporal one is too large,
1386we should choose a smaller time step. Ideas in
this direction can be found,
1387for example, in the PhD thesis of a former principal developer of deal.II,
1388Ralf Hartmann, published by the University of Heidelberg, Germany, in 2002
1392<a name=
"step_26-Bettertimesteppingmethods"></a><h4>Better time stepping methods</h4>
1395We here use one of the simpler time stepping methods, namely the
second order
1396in time Crank-Nicolson method. This is surely already better than the even
1397more widely used (and even less accurate) implicit (=backward) Euler method,
1398but many other, more accurate methods such as BDF or
1399Runge-Kutta methods are available and should be used as they
do not represent
1400much additional effort. It is not difficult to implement
this for the current
1401program,
if one wanted; a more systematic treatment is also given in @ref step_86
"step-86".
1403As a
general rule, however, one should not be implementing time stepping
1404methods by hand, as we
do here,
for problems that
do not require
1405exploiting special properties of the equation and consequently require
1406specialized time stepping methods. (The heat equation does not fall into
1407this category, and
"standard" time stepping methods are all we need here.)
1408Rather, one should use one of the available
1409high-quality libraries
for time stepping,
for the same reasons as one should
1410not be implementing finite element methods by hand but use deal.II instead.
1411Indeed, deal.II has interfaces to two such time stepping library,
1413[
PETSc TS package](https:
1415point for the use of much better (and much more accurate) time steppers,
1417the methods one would then get also implement the automatic time step
1418control mentioned above. To see how this works, take a look at
1419@ref step_86
"step-86".
1422<a name=
"step_26-Betterrefinementcriteria"></a><h4>Better refinement criteria</h4>
1425If you look at the meshes in the movie above, it is clear that they are not
1426particularly well suited to the task at hand. In fact, they look rather
1429There are two factors at play. First, there are some islands where cells
1430have been refined but that are surrounded by non-refined cells (and there
1431are probably also a few occasional coarsened islands). These are not terrible,
1432as they most of the time
do not affect the approximation quality of the mesh,
1433but they also don
't help because so many of their additional degrees of
1434freedom are in fact constrained by hanging node constraints. That said,
1435this is easy to fix: the Triangulation class takes an argument to its
1436constructor indicating a level of "mesh smoothing". Passing one of many
1437possible flags, this instructs the triangulation to refine some additional
1438cells, or not to refine some cells, so that the resulting mesh does not have
1441The second problem is more severe: the mesh appears to lag the solution.
1442The underlying reason is that we only adapt the mesh once every fifth
1443time step, and only allow for a single refinement in these cases. Whenever a
1444source switches on, the solution had been very smooth in this area before and
1445the mesh was consequently rather coarse. This implies that the next time step
1446when we refine the mesh, we will get one refinement level more in this area,
1447and five time steps later another level, etc. But this is not enough: first,
1448we should refine immediately when a source switches on (after all, in the
1449current context we at least know what the right hand side is), and we should
1450allow for more than one refinement level. Of course, all of this can be done
1451using deal.II, it just requires a bit of algorithmic thinking in how to make
1455<a name="step_26-Positivitypreservation"></a><h4>Positivity preservation</h4>
1458To increase the accuracy and resolution of your simulation in time, one
1459typically decreases the time step size @f$k_n@f$. If you start playing around
1460with the time step in this particular example, you will notice that the
1461solution becomes partly negative, if @f$k_n@f$ is below a certain threshold.
1462This is not what we would expect to happen (in nature).
1464To get an idea of this behavior mathematically, let us consider a general,
1465fully discrete problem:
1467 A u^{n} = B u^{n-1}.
1469The general form of the @f$i@f$th equation then reads:
1471 a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
1472 \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
1474where @f$S_i@f$ is the set of degrees of freedom that DoF @f$i@f$ couples with (i.e.,
1475for which either the matrix @f$A@f$ or matrix @f$B@f$ has a nonzero entry at position
1476@f$(i,j)@f$). If all coefficients
1477fulfill the following conditions:
1479 a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
1483all solutions @f$u^{n}@f$ keep their sign from the previous ones @f$u^{n-1}@f$, and
1484consequently from the initial values @f$u^0@f$. See e.g.
1485<a href="http://bookstore.siam.org/cs14/">Kuzmin, Hämäläinen</a>
1486for more information on positivity preservation.
1488Depending on the PDE to solve and the time integration scheme used, one is
1489able to deduce conditions for the time step @f$k_n@f$. For the heat equation with
1490the Crank-Nicolson scheme,
1491<a href="https://doi.org/10.2478/cmam-2010-0025">Schatz et. al.</a> have
1492translated it to the following ones:
1494 (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
1496 \theta k \left| a_{ij} \right| &\geq m_{ij},\qquad j \neq i,
1498where @f$M = m_{ij}@f$ denotes the @ref GlossMassMatrix "mass matrix" and @f$A = a_{ij}@f$ the stiffness
1499matrix with @f$a_{ij} \leq 0@f$ for @f$j \neq i@f$, respectively. With
1500@f$a_{ij} \leq 0@f$, we can formulate bounds for the global time step @f$k@f$ as
1503 k_{\text{max}} &= \frac{ 1 }{ 1 - \theta }
1504 \min\left( \frac{ m_{ii} }{ a_{ii} } \right),~ \forall i,
1506 k_{\text{min}} &= \frac{ 1 }{ \theta }
1507 \max\left( \frac{ m_{ij} }{ \left|a_{ij}\right| } \right),~ j \neq i.
1509In other words, the time step is constrained by <i>both a lower
1510and upper bound</i> in case of a Crank-Nicolson scheme. These bounds should be
1511considered along with the CFL condition to ensure significance of the performed
1514Being unable to make the time step as small as we want to get more
1515accuracy without losing the positivity property is annoying. It raises
1516the question of whether we can at least <i>compute</i> the minimal time step
1517we can choose to ensure positivity preservation in this particular tutorial.
1519the SparseMatrix objects for both mass and stiffness that are created via
1520the MatrixCreator functions. Iterating through each entry via SparseMatrixIterators
1521lets us check for diagonal and off-diagonal entries to set a proper time step
1522dynamically. For quadratic matrices, the diagonal element is stored as the
1523first member of a row (see SparseMatrix documentation). An exemplary code
1524snippet on how to grab the entries of interest from the <code>mass_matrix</code>
1528Assert (mass_matrix.m() == mass_matrix.n(), ExcNotQuadratic());
1529const unsigned int num_rows = mass_matrix.m();
1530double mass_matrix_min_diag = std::numeric_limits<double>::max(),
1531 mass_matrix_max_offdiag = 0.;
1533SparseMatrixIterators::Iterator<double,true> row_it (&mass_matrix, 0);
1535for(unsigned int m = 0; m<num_rows; ++m)
1537 // check the diagonal element
1538 row_it = mass_matrix.begin(m);
1539 mass_matrix_min_diag = std::min(row_it->value(), mass_matrix_min_diag);
1542 // check the off-diagonal elements
1543 for(; row_it != mass_matrix.end(m); ++row_it)
1544 mass_matrix_max_offdiag = std::max(row_it->value(), mass_matrix_max_offdiag);
1548Using the information so computed, we can bound the time step via the formulas
1552<a name="step_26-PlainProg"></a>
1553<h1> The plain program</h1>
1554@include "step-26.cc"
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
#define Assert(cond, exc)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
std::vector< index_type > data
void random(DoFHandler< dim, spacedim > &dof_handler)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)