679 *
const unsigned int component)
const
682 *
Assert(component == 0, ExcIndexRange(component, 0, 1));
691 * <a name=
"ThecodeHeatEquationcodeimplementation"></a>
692 * <h3>The <code>HeatEquation</code> implementation</h3>
696 * It is time now
for the implementation of the main
class. Let
's
697 * start with the constructor which selects a linear element, a time
698 * step constant at 1/500 (remember that one period of the source
699 * on the right hand side was set to 0.2 above, so we resolve each
700 * period with 100 time steps) and chooses the Crank Nicolson method
701 * by setting @f$\theta=1/2@f$.
705 * HeatEquation<dim>::HeatEquation()
707 * , dof_handler(triangulation)
708 * , time_step(1. / 500)
717 * <a name="codeHeatEquationsetup_systemcode"></a>
718 * <h4><code>HeatEquation::setup_system</code></h4>
722 * The next function is the one that sets up the DoFHandler object,
723 * computes the constraints, and sets the linear algebra objects
724 * to their correct sizes. We also compute the mass and Laplace
725 * matrix here by simply calling two functions in the library.
729 * Note that we do not take the hanging node constraints into account when
730 * assembling the matrices (both functions have an AffineConstraints argument
731 * that defaults to an empty object). This is because we are going to
732 * condense the constraints in run() after combining the matrices for the
737 * void HeatEquation<dim>::setup_system()
739 * dof_handler.distribute_dofs(fe);
741 * std::cout << std::endl
742 * << "===========================================" << std::endl
743 * << "Number of active cells: " << triangulation.n_active_cells()
745 * << "Number of degrees of freedom: " << dof_handler.n_dofs()
749 * constraints.clear();
750 * DoFTools::make_hanging_node_constraints(dof_handler, constraints);
751 * constraints.close();
753 * DynamicSparsityPattern dsp(dof_handler.n_dofs());
754 * DoFTools::make_sparsity_pattern(dof_handler,
757 * /*keep_constrained_dofs = */ true);
758 * sparsity_pattern.copy_from(dsp);
760 * mass_matrix.reinit(sparsity_pattern);
761 * laplace_matrix.reinit(sparsity_pattern);
762 * system_matrix.reinit(sparsity_pattern);
764 * MatrixCreator::create_mass_matrix(dof_handler,
765 * QGauss<dim>(fe.degree + 1),
767 * MatrixCreator::create_laplace_matrix(dof_handler,
768 * QGauss<dim>(fe.degree + 1),
771 * solution.reinit(dof_handler.n_dofs());
772 * old_solution.reinit(dof_handler.n_dofs());
773 * system_rhs.reinit(dof_handler.n_dofs());
780 * <a name="codeHeatEquationsolve_time_stepcode"></a>
781 * <h4><code>HeatEquation::solve_time_step</code></h4>
785 * The next function is the one that solves the actual linear system
786 * for a single time step. There is nothing surprising here:
790 * void HeatEquation<dim>::solve_time_step()
792 * SolverControl solver_control(1000, 1e-8 * system_rhs.l2_norm());
793 * SolverCG<Vector<double>> cg(solver_control);
795 * PreconditionSSOR<SparseMatrix<double>> preconditioner;
796 * preconditioner.initialize(system_matrix, 1.0);
798 * cg.solve(system_matrix, solution, system_rhs, preconditioner);
800 * constraints.distribute(solution);
802 * std::cout << " " << solver_control.last_step() << " CG iterations."
811 * <a name="codeHeatEquationoutput_resultscode"></a>
812 * <h4><code>HeatEquation::output_results</code></h4>
816 * Neither is there anything new in generating graphical output other than the
817 * fact that we tell the DataOut object what the current time and time step
818 * number is, so that this can be written into the output file :
822 * void HeatEquation<dim>::output_results() const
824 * DataOut<dim> data_out;
826 * data_out.attach_dof_handler(dof_handler);
827 * data_out.add_data_vector(solution, "U");
829 * data_out.build_patches();
831 * data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
833 * const std::string filename =
834 * "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtk";
835 * std::ofstream output(filename);
836 * data_out.write_vtk(output);
843 * <a name="codeHeatEquationrefine_meshcode"></a>
844 * <h4><code>HeatEquation::refine_mesh</code></h4>
848 * This function is the interesting part of the program. It takes care of
849 * the adaptive mesh refinement. The three tasks
850 * this function performs is to first find out which cells to
851 * refine/coarsen, then to actually do the refinement and eventually
852 * transfer the solution vectors between the two different grids. The first
853 * task is simply achieved by using the well-established Kelly error
854 * estimator on the solution. The second task is to actually do the
855 * remeshing. That involves only basic functions as well, such as the
856 * <code>refine_and_coarsen_fixed_fraction</code> that refines those cells
857 * with the largest estimated error that together make up 60 per cent of the
858 * error, and coarsens those cells with the smallest error that make up for
859 * a combined 40 per cent of the error. Note that for problems such as the
860 * current one where the areas where something is going on are shifting
861 * around, we want to aggressively coarsen so that we can move cells
862 * around to where it is necessary.
866 * As already discussed in the introduction, too small a mesh leads to
867 * too small a time step, whereas too large a mesh leads to too little
868 * resolution. Consequently, after the first two steps, we have two
869 * loops that limit refinement and coarsening to an allowable range of
874 * void HeatEquation<dim>::refine_mesh(const unsigned int min_grid_level,
875 * const unsigned int max_grid_level)
877 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
879 * KellyErrorEstimator<dim>::estimate(
881 * QGauss<dim - 1>(fe.degree + 1),
882 * std::map<types::boundary_id, const Function<dim> *>(),
884 * estimated_error_per_cell);
886 * GridRefinement::refine_and_coarsen_fixed_fraction(triangulation,
887 * estimated_error_per_cell,
891 * if (triangulation.n_levels() > max_grid_level)
892 * for (const auto &cell :
893 * triangulation.active_cell_iterators_on_level(max_grid_level))
894 * cell->clear_refine_flag();
895 * for (const auto &cell :
896 * triangulation.active_cell_iterators_on_level(min_grid_level))
897 * cell->clear_coarsen_flag();
900 * These two loops above are slightly different but this is easily
901 * explained. In the first loop, instead of calling
902 * <code>triangulation.end()</code> we may as well have called
903 * <code>triangulation.end_active(max_grid_level)</code>. The two
904 * calls should yield the same iterator since iterators are sorted
905 * by level and there should not be any cells on levels higher than
906 * on level <code>max_grid_level</code>. In fact, this very piece
907 * of code makes sure that this is the case.
911 * As part of mesh refinement we need to transfer the solution vectors
912 * from the old mesh to the new one. To this end we use the
913 * SolutionTransfer class and we have to prepare the solution vectors that
914 * should be transferred to the new grid (we will lose the old grid once
915 * we have done the refinement so the transfer has to happen concurrently
916 * with refinement). At the point where we call this function, we will
917 * have just computed the solution, so we no longer need the old_solution
918 * variable (it will be overwritten by the solution just after the mesh
919 * may have been refined, i.e., at the end of the time step; see below).
920 * In other words, we only need the one solution vector, and we copy it
921 * to a temporary object where it is safe from being reset when we further
922 * down below call <code>setup_system()</code>.
926 * Consequently, we initialize a SolutionTransfer object by attaching
927 * it to the old DoF handler. We then prepare the triangulation and the
928 * data vector for refinement (in this order).
931 * SolutionTransfer<dim> solution_trans(dof_handler);
933 * Vector<double> previous_solution;
934 * previous_solution = solution;
935 * triangulation.prepare_coarsening_and_refinement();
936 * solution_trans.prepare_for_coarsening_and_refinement(previous_solution);
940 * Now everything is ready, so do the refinement and recreate the DoF
941 * structure on the new grid, and finally initialize the matrix structures
942 * and the new vectors in the <code>setup_system</code> function. Next, we
943 * actually perform the interpolation of the solution from old to new
944 * grid. The final step is to apply the hanging node constraints to the
945 * solution vector, i.e., to make sure that the values of degrees of
946 * freedom located on hanging nodes are so that the solution is
947 * continuous. This is necessary since SolutionTransfer only operates on
948 * cells locally, without regard to the neighborhood.
951 * triangulation.execute_coarsening_and_refinement();
954 * solution_trans.interpolate(previous_solution, solution);
955 * constraints.distribute(solution);
963 * <a name="codeHeatEquationruncode"></a>
964 * <h4><code>HeatEquation::run</code></h4>
968 * This is the main driver of the program, where we loop over all
969 * time steps. At the top of the function, we set the number of
970 * initial global mesh refinements and the number of initial cycles of
971 * adaptive mesh refinement by repeating the first time step a few
972 * times. Then we create a mesh, initialize the various objects we will
973 * work with, set a label for where we should start when re-running
974 * the first time step, and interpolate the initial solution onto
975 * out mesh (we choose the zero function here, which of course we could
976 * do in a simpler way by just setting the solution vector to zero). We
977 * also output the initial time step once.
981 * @note If you're an experienced programmer, you may be surprised
982 * that we use a <code>
goto</code> statement in
this piece of code!
983 * <code>
goto</code> statements are not particularly well liked any
984 * more since Edsgar Dijkstra, one of the greats of computer science,
985 * wrote a letter in 1968 called
"Go To Statement considered harmful"
986 * (see <a href=
"http://en.wikipedia.org/wiki/Considered_harmful">here</a>).
987 * The author of
this code subscribes to
this notion whole-heartedly:
988 * <code>goto</code> is hard to understand. In fact, deal.II contains
989 * virtually no occurrences: excluding code that was essentially
990 * transcribed from books and not counting duplicated code pieces,
991 * there are 3 locations in about 600,000 lines of code at the time
992 * this note is written; we also use it in 4 tutorial programs, in
993 * exactly the same context as here. Instead of trying to justify
994 * the occurrence here, let
's first look at the code and we'll come
995 * back to the issue at the
end of function.
999 *
void HeatEquation<dim>::run()
1001 *
const unsigned int initial_global_refinement = 2;
1002 *
const unsigned int n_adaptive_pre_refinement_steps = 4;
1009 *
unsigned int pre_refinement_step = 0;
1014 * start_time_iteration:
1017 * timestep_number = 0;
1019 * tmp.
reinit(solution.size());
1020 * forcing_terms.reinit(solution.size());
1026 * solution = old_solution;
1032 * Then we start the main
loop until the computed time exceeds our
1033 *
end time of 0.5. The
first task is to build the right hand
1034 * side of the linear system we need to solve in each time step.
1035 * Recall that it contains the term @f$MU^{n-1}-(1-\theta)k_n AU^{n-1}@f$.
1036 * We put these terms into the variable system_rhs, with the
1037 * help of a temporary vector:
1040 *
while (time <= 0.5)
1042 * time += time_step;
1043 * ++timestep_number;
1045 * std::cout <<
"Time step " << timestep_number <<
" at t=" << time
1050 * laplace_matrix.vmult(tmp, old_solution);
1051 * system_rhs.add(-(1 - theta) * time_step, tmp);
1055 * The
second piece is to compute the contributions of the source
1056 * terms. This corresponds to the term @f$k_n
1057 * \left[ (1-\theta)F^{n-1} + \theta
F^n \right]@f$. The following
1059 * vectors @f$F@f$, where we
set the time of the right hand side
1060 * (source) function before we evaluate it. The result of
this
1061 * all ends up in the forcing_terms variable:
1064 * RightHandSide<dim> rhs_function;
1065 * rhs_function.set_time(time);
1070 * forcing_terms = tmp;
1071 * forcing_terms *= time_step * theta;
1073 * rhs_function.set_time(time - time_step);
1079 * forcing_terms.add(time_step * (1 - theta), tmp);
1083 * Next, we add the forcing terms to the ones that
1084 * come from the time stepping, and also build the
matrix
1085 * @f$M+k_n\theta A@f$ that we have to
invert in each time step.
1086 * The
final piece of these operations is to eliminate
1087 * hanging node constrained degrees of freedom from the
1091 * system_rhs += forcing_terms;
1093 * system_matrix.copy_from(mass_matrix);
1094 * system_matrix.add(theta * time_step, laplace_matrix);
1096 * constraints.condense(system_matrix, system_rhs);
1100 * There is one more operation we need to
do before we
1101 * can solve it: boundary
values. To
this end, we create
1102 * a boundary
value object,
set the proper time to the one
1103 * of the current time step, and evaluate it as we have
1104 * done many times before. The result is used to also
1105 *
set the correct boundary
values in the linear system:
1109 * BoundaryValues<dim> boundary_values_function;
1110 * boundary_values_function.set_time(time);
1112 * std::map<types::global_dof_index, double> boundary_values;
1115 * boundary_values_function,
1126 * With
this out of the way, all we have to
do is solve the
1127 * system, generate graphical data, and...
1130 * solve_time_step();
1136 * ...take care of mesh refinement. Here, what we want to
do is
1137 * (i) refine the requested number of times at the very beginning
1138 * of the solution procedure, after which we jump to the top to
1139 * restart the time iteration, (ii)
refine every fifth time
1144 * The time
loop and, indeed, the main part of the program ends
1145 * with starting into the next time step by setting old_solution
1146 * to the solution we have just computed.
1149 *
if ((timestep_number == 1) &&
1150 * (pre_refinement_step < n_adaptive_pre_refinement_steps))
1152 * refine_mesh(initial_global_refinement,
1153 * initial_global_refinement +
1154 * n_adaptive_pre_refinement_steps);
1155 * ++pre_refinement_step;
1157 * tmp.reinit(solution.size());
1158 * forcing_terms.reinit(solution.size());
1160 * std::cout << std::endl;
1162 *
goto start_time_iteration;
1164 *
else if ((timestep_number > 0) && (timestep_number % 5 == 0))
1166 * refine_mesh(initial_global_refinement,
1167 * initial_global_refinement +
1168 * n_adaptive_pre_refinement_steps);
1169 * tmp.reinit(solution.size());
1170 * forcing_terms.reinit(solution.size());
1173 * old_solution = solution;
1179 * Now that you have seen what the function does, let us come back to the issue
1180 * of the <code>
goto</code>. In essence, what the code does is
1181 * something like
this:
1182 * <div
class=CodeFragmentInTutorialComment>
1187 * start_time_iteration:
1188 *
for (timestep=1...)
1191 *
if (timestep==1 && not happy with the result)
1193 * adjust some data structures;
1194 *
goto start_time_iteration;
1201 * Here, the condition
"happy with the result" is whether we
'd like to keep
1202 * the current mesh or would rather refine the mesh and start over on the
1203 * new mesh. We could of course replace the use of the <code>goto</code>
1205 * <div class=CodeFragmentInTutorialComment>
1213 * if (not happy with the result)
1214 * adjust some data structures;
1221 * for (timestep=2...)
1229 * This has the advantage of getting rid of the <code>goto</code>
1230 * but the disadvantage of having to duplicate the code that implements
1231 * the "solve timestep" and "postprocess" operations in two different
1232 * places. This could be countered by putting these parts of the code
1233 * (sizable chunks in the actual implementation above) into their
1234 * own functions, but a <code>while(true)</code> loop with a
1235 * <code>break</code> statement is not really all that much easier
1236 * to read or understand than a <code>goto</code>.
1240 * In the end, one might simply agree that <i>in general</i>
1241 * <code>goto</code> statements are a bad idea but be pragmatic and
1242 * state that there may be occasions where they can help avoid code
1243 * duplication and awkward control flow. This may be one of these
1244 * places, and it matches the position Steve McConnell takes in his
1245 * excellent book "Code Complete" @cite CodeComplete about good
1246 * programming practices (see the mention of this book in the
1247 * introduction of @ref step_1 "step-1") that spends a surprising ten pages on the
1248 * question of <code>goto</code> in general.
1256 * <a name="Thecodemaincodefunction"></a>
1257 * <h3>The <code>main</code> function</h3>
1261 * Having made it this far, there is, again, nothing
1262 * much to discuss for the main function of this
1263 * program: it looks like all such functions since @ref step_6 "step-6".
1270 * using namespace Step26;
1272 * HeatEquation<2> heat_equation_solver;
1273 * heat_equation_solver.run();
1275 * catch (std::exception &exc)
1277 * std::cerr << std::endl
1279 * << "----------------------------------------------------"
1281 * std::cerr << "Exception on processing: " << std::endl
1282 * << exc.what() << std::endl
1283 * << "Aborting!" << std::endl
1284 * << "----------------------------------------------------"
1291 * std::cerr << std::endl
1293 * << "----------------------------------------------------"
1295 * std::cerr << "Unknown exception!" << std::endl
1296 * << "Aborting!" << std::endl
1297 * << "----------------------------------------------------"
1305<a name="Results"></a><h1>Results</h1>
1308As in many of the tutorials, the actual output of the program matters less
1309than how we arrived there. Nonetheless, here it is:
1311===========================================
1312Number of active cells: 48
1313Number of degrees of freedom: 65
1315Time step 1 at t=0.002
1318===========================================
1319Number of active cells: 60
1320Number of degrees of freedom: 81
1323Time step 1 at t=0.002
1326===========================================
1327Number of active cells: 105
1328Number of degrees of freedom: 136
1331Time step 1 at t=0.002
1336Time step 249 at t=0.498
1338Time step 250 at t=0.5
1341===========================================
1342Number of active cells: 1803
1343Number of degrees of freedom: 2109
1346Maybe of more interest is a visualization of the solution and the mesh on which
1349<img src="https://www.dealii.org/images/steps/developer/step-26.movie.gif" alt="Animation of the solution of step 26.">
1351The movie shows how the two sources switch on and off and how the mesh reacts
1352to this. It is quite obvious that the mesh as is is probably not the best we
1353could come up with. We'll get back to
this in the next section.
1356<a name=
"extensions"></a>
1357<a name=
"Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1360There are at least two areas where one can improve
this program significantly:
1361adaptive time stepping and a better choice of the mesh.
1363<a name=
"Adaptivetimestepping"></a><h4>Adaptive time stepping</h4>
1366Having chosen an implicit time stepping scheme, we are not bound by any
1367CFL-like condition on the time step. Furthermore, because the time scales on
1368which change happens on a given cell in the heat equation are not bound to the
1369cells
diameter (unlike the
case with the wave equation, where we had a fixed
1370speed of information transport that couples the temporal and spatial scales),
1371we can choose the time step as we please. Or, better, choose it as we deem
1372necessary
for accuracy.
1374Looking at the solution, it is clear that the action does not happen uniformly
1375over time: a lot is changing around the times when we
switch on a source, things
1376become less dramatic once a source is on
for a little
while, and we enter a
1377long phase of decline when both sources are off. During these times, we could
1378surely get away with a larger time step than before without sacrificing too
1381The literature has many suggestions on how to choose the time step size
1382adaptively. Much can be learned,
for example, from the way ODE solvers choose
1383their time steps. One can also be inspired by a posteriori error estimators
1384that can, ideally, be written in a way that they consist of a temporal and a
1385spatial contribution to the overall error. If the temporal one is too large,
1386we should choose a smaller time step. Ideas in
this direction can be found,
1387for example, in the PhD thesis of a former principal developer of deal.II,
1388Ralf Hartmann, published by the University of Heidelberg, Germany, in 2002
1392<a name=
"Bettertimesteppingmethods"></a><h4>Better time stepping methods</h4>
1395We here use one of the simpler time stepping methods, namely the
second order
1396in time Crank-Nicolson method. This is surely already better than the even
1397more widely used (and even less accurate) implicit Euler method,
1398but many other, more accurate methods such as
1399Runge-Kutta methods are available and should be used as they
do not represent
1400much additional effort. It is not difficult to implement
this for the current
1401program,
if one wanted; a more systematic treatment is also given in @ref step_52
"step-52".
1403As a
general rule, however, one should not be implementing time stepping
1404methods by hand, as we
do here,
for problems that
do not require
1405exploiting special properties of the equation and consequently require
1406specialized time stepping methods. (The heat equation does not fall into
1407this category, and
"standard" time stepping methods are all we need here.)
1408Rather, one should use one of the available
1409high-quality libraries
for time stepping,
for the same reasons as one should
1410not be implementing finite element methods by hand but use deal.II instead.
1411Indeed, deal.II has interfaces to one such time stepping library,
1414point for the use of much better (and much more accurate) time steppers;
1415the methods one would then get also implement the automatic time step
1416control mentioned above.
1419<a name=
"Betterrefinementcriteria"></a><h4>Better refinement criteria</h4>
1422If you look at the meshes in the movie above, it is clear that they are not
1423particularly well suited to the task at hand. In fact, they look rather
1426There are two factors at play. First, there are some islands where cells
1427have been refined but that are surrounded by non-refined cells (and there
1428are probably also a few occasional coarsened islands). These are not terrible,
1429as they most of the time
do not affect the approximation quality of the mesh,
1430but they also don
't help because so many of their additional degrees of
1431freedom are in fact constrained by hanging node constraints. That said,
1432this is easy to fix: the Triangulation class takes an argument to its
1433constructor indicating a level of "mesh smoothing". Passing one of many
1434possible flags, this instructs the triangulation to refine some additional
1435cells, or not to refine some cells, so that the resulting mesh does not have
1438The second problem is more severe: the mesh appears to lag the solution.
1439The underlying reason is that we only adapt the mesh once every fifth
1440time step, and only allow for a single refinement in these cases. Whenever a
1441source switches on, the solution had been very smooth in this area before and
1442the mesh was consequently rather coarse. This implies that the next time step
1443when we refine the mesh, we will get one refinement level more in this area,
1444and five time steps later another level, etc. But this is not enough: first,
1445we should refine immediately when a source switches on (after all, in the
1446current context we at least know what the right hand side is), and we should
1447allow for more than one refinement level. Of course, all of this can be done
1448using deal.II, it just requires a bit of algorithmic thinking in how to make
1452<a name="Positivitypreservation"></a><h4>Positivity preservation</h4>
1455To increase the accuracy and resolution of your simulation in time, one
1456typically decreases the time step size @f$k_n@f$. If you start playing around
1457with the time step in this particular example, you will notice that the
1458solution becomes partly negative, if @f$k_n@f$ is below a certain threshold.
1459This is not what we would expect to happen (in nature).
1461To get an idea of this behavior mathematically, let us consider a general,
1462fully discrete problem:
1464 A u^{n} = B u^{n-1}.
1466The general form of the @f$i@f$th equation then reads:
1468 a_{ii} u^{n}_i &= b_{ii} u^{n-1}_i +
1469 \sum\limits_{j \in S_i} \left( b_{ij} u^{n-1}_j - a_{ij} u^{n}_j \right),
1471where @f$S_i@f$ is the set of degrees of freedom that DoF @f$i@f$ couples with (i.e.,
1472for which either the matrix @f$A@f$ or matrix @f$B@f$ has a nonzero entry at position
1473@f$(i,j)@f$). If all coefficients
1474fulfill the following conditions:
1476 a_{ii} &> 0, & b_{ii} &\geq 0, & a_{ij} &\leq 0, & b_{ij} &\geq 0,
1480all solutions @f$u^{n}@f$ keep their sign from the previous ones @f$u^{n-1}@f$, and
1481consequently from the initial values @f$u^0@f$. See e.g.
1482<a href="http://bookstore.siam.org/cs14/">Kuzmin, Hämäläinen</a>
1483for more information on positivity preservation.
1485Depending on the PDE to solve and the time integration scheme used, one is
1486able to deduce conditions for the time step @f$k_n@f$. For the heat equation with
1487the Crank-Nicolson scheme,
1488<a href="https://doi.org/10.2478/cmam-2010-0025">Schatz et. al.</a> have
1489translated it to the following ones:
1491 (1 - \theta) k a_{ii} &\leq m_{ii},\qquad \forall i,
1493 \theta k \left| a_{ij} \right| &\geq m_{ij},\qquad j \neq i,
1495where @f$M = m_{ij}@f$ denotes the @ref GlossMassMatrix "mass matrix" and @f$A = a_{ij}@f$ the stiffness
1496matrix with @f$a_{ij} \leq 0@f$ for @f$j \neq i@f$, respectively. With
1497@f$a_{ij} \leq 0@f$, we can formulate bounds for the global time step @f$k@f$ as
1500 k_{\text{max}} &= \frac{ 1 }{ 1 - \theta }
1501 \min\left( \frac{ m_{ii} }{ a_{ii} } \right),~ \forall i,
1503 k_{\text{min}} &= \frac{ 1 }{ \theta }
1504 \max\left( \frac{ m_{ij} }{ \left|a_{ij}\right| } \right),~ j \neq i.
1506In other words, the time step is constrained by <i>both a lower
1507and upper bound</i> in case of a Crank-Nicolson scheme. These bounds should be
1508considered along with the CFL condition to ensure significance of the performed
1511Being unable to make the time step as small as we want to get more
1512accuracy without losing the positivity property is annoying. It raises
1513the question of whether we can at least <i>compute</i> the minimal time step
1514we can choose to ensure positivity preservation in this particular tutorial.
1516the SparseMatrix objects for both mass and stiffness that are created via
1517the MatrixCreator functions. Iterating through each entry via SparseMatrixIterators
1518lets us check for diagonal and off-diagonal entries to set a proper time step
1519dynamically. For quadratic matrices, the diagonal element is stored as the
1520first member of a row (see SparseMatrix documentation). An exemplary code
1521snippet on how to grab the entries of interest from the <code>mass_matrix</code>
1525Assert (mass_matrix.m() == mass_matrix.n(), ExcNotQuadratic());
1526const unsigned int num_rows = mass_matrix.m();
1527double mass_matrix_min_diag = std::numeric_limits<double>::max(),
1528 mass_matrix_max_offdiag = 0.;
1530SparseMatrixIterators::Iterator<double,true> row_it (&mass_matrix, 0);
1532for(unsigned int m = 0; m<num_rows; ++m)
1534 // check the diagonal element
1535 row_it = mass_matrix.begin(m);
1536 mass_matrix_min_diag = std::min(row_it->value(), mass_matrix_min_diag);
1539 // check the off-diagonal elements
1540 for(; row_it != mass_matrix.end(m); ++row_it)
1541 mass_matrix_max_offdiag = std::max(row_it->value(), mass_matrix_max_offdiag);
1545Using the information so computed, we can bound the time step via the formulas
1549<a name="PlainProg"></a>
1550<h1> The plain program</h1>
1551@include "step-26.cc"
virtual void reinit(const size_type N, const bool omit_zeroing_entries=false)
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
void loop(ITERATOR begin, std_cxx20::type_identity_t< ITERATOR > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(DOFINFO &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(DOFINFO &, DOFINFO &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, ASSEMBLER &assembler, const LoopControl &lctrl=LoopControl())
void random(DoFHandler< dim, spacedim > &dof_handler)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ general
No special properties.
void mass_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const double factor=1.)
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
DEAL_II_HOST constexpr SymmetricTensor< 2, dim, Number > invert(const SymmetricTensor< 2, dim, Number > &)