798 * , initial_global_refinement(5)
799 * , max_delta_refinement_level(2)
800 * , mesh_adaptation_frequency(0)
801 * , right_hand_side_function(
"/Heat Equation/Right hand side")
802 * , initial_value_function(
"/Heat Equation/Initial value")
803 * , boundary_values_function(
"/Heat Equation/Boundary values")
805 * enter_subsection(
"Time stepper");
807 * enter_my_subsection(this->prm);
809 * time_stepper_data.add_parameters(this->prm);
811 * leave_my_subsection(this->prm);
813 * leave_subsection();
815 * add_parameter(
"Initial global refinement",
816 * initial_global_refinement,
817 *
"Number of times the mesh is refined globally before "
818 *
"starting the time stepping.");
819 * add_parameter(
"Maximum delta refinement level",
820 * max_delta_refinement_level,
821 *
"Maximum number of local refinement levels.");
822 * add_parameter(
"Mesh adaptation frequency",
823 * mesh_adaptation_frequency,
824 *
"When to adapt the mesh.");
832 * <a name=
"step_86-TheHeatEquationsetup_systemfunction"></a>
833 * <h4>The HeatEquation::setup_system() function</h4>
837 * This function is not very different from what we do in many other programs
838 * (including @ref step_26 "step-26"). We enumerate degrees of freedom, output some
839 * information about then, build constraint objects (recalling that we
840 * put hanging node constraints into their separate
object), and then
841 * also build an AffineConstraint
object that contains both the hanging
842 * node constraints as well as constraints corresponding to zero Dirichlet
843 * boundary conditions. This last
object is needed since we impose the
844 * constraints through algebraic equations. While technically it would be
845 * possible to use the time derivative of the boundary function as a boundary
846 * conditions for the time derivative of the solution, this is not done here.
847 * Instead, we impose the boundary conditions through algebraic equations, and
848 * therefore the time derivative of the boundary conditions is not part of
849 * the algebraic system, and we need zero boundary conditions on the time
850 * derivative of the solution when computing the residual. We use the
851 * `homogeneous_constraints`
object for this purpose.
855 * Finally, we create the actual non-homogeneous `current_constraints` by
856 * calling `update_current_constraints). These are also used during the
857 * assembly and during the residual evaluation.
861 *
void HeatEquation<dim>::setup_system(const
double time)
865 * dof_handler.distribute_dofs(fe);
869 * <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
873 * locally_owned_dofs = dof_handler.locally_owned_dofs();
874 * locally_relevant_dofs =
878 * hanging_nodes_constraints.clear();
879 * hanging_nodes_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
881 * hanging_nodes_constraints);
882 * hanging_nodes_constraints.make_consistent_in_parallel(locally_owned_dofs,
883 * locally_relevant_dofs,
885 * hanging_nodes_constraints.close();
888 * homogeneous_constraints.clear();
889 * homogeneous_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
890 * homogeneous_constraints.merge(hanging_nodes_constraints);
894 * homogeneous_constraints);
895 * homogeneous_constraints.make_consistent_in_parallel(locally_owned_dofs,
896 * locally_relevant_dofs,
898 * homogeneous_constraints.close();
901 * update_current_constraints(time);
906 * The
final block of code resets and initializes the
matrix object with
907 * the appropriate sparsity pattern. Recall that we
do not store solution
908 * vectors in
this class (the time integrator
object does that internally)
909 * and so
do not have to resize and initialize them either.
915 * homogeneous_constraints,
918 * locally_owned_dofs,
920 * locally_relevant_dofs);
922 * jacobian_matrix.reinit(locally_owned_dofs,
923 * locally_owned_dofs,
932 * <a name=
"step_86-TheHeatEquationoutput_resultsfunction"></a>
933 * <h4>The HeatEquation::output_results() function</h4>
937 * This function is called from "monitor" function that is called in turns
938 * by the time stepper in each time step. We use it to write the solution to a
939 * file, and provide graphical output through paraview or visit. We also write
940 * a pvd file, which groups all metadata about the `.vtu` files into a single
941 * file that can be used to load the full time dependent solution in paraview.
945 *
void HeatEquation<dim>::output_results(const
double time,
946 * const
unsigned int timestep_number,
953 * data_out.add_data_vector(y,
"U");
954 * data_out.build_patches();
958 *
const std::string filename =
960 * data_out.write_vtu_in_parallel(filename, mpi_communicator);
964 *
static std::vector<std::pair<double, std::string>> times_and_names;
965 * times_and_names.emplace_back(time, filename);
967 * std::ofstream pvd_output(
"solution.pvd");
977 * <a name=
"step_86-TheHeatEquationimplicit_functionfunction"></a>
978 * <h4>The HeatEquation::implicit_function() function</h4>
982 * As discussed in the introduction, we describe the ODE system to the time
983 * stepper via its residual,
985 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) -
F(t).
987 * The following function computes it, given vectors
for @f$U,\dot U@f$ that
988 * we will denote by `y` and `y_dot` because that
's how they are called
989 * in the documentation of the PETScWrappers::TimeStepper class.
993 * At the top of the function, we do the usual set up when computing
994 * integrals. We face two minor difficulties here: the first is that
995 * the `y` and `y_dot` vectors we get as input are read only, but we
996 * need to make sure they satisfy the correct boundary conditions and so
997 * have to set elements in these vectors. The second is that we need to
998 * compute the residual, and therefore in general we need to evaluate solution
999 * values and gradients inside locally owned cells, and for this need access
1000 * to degrees of freedom which may be owned by neighboring processors. To
1001 * address these issues, we create (non-ghosted) writable copies of the input
1002 * vectors, apply boundary conditions and hanging node current_constraints;
1003 * and then copy these vectors to ghosted vectors before we can do anything
1004 * sensible with them.
1007 * template <int dim>
1009 * HeatEquation<dim>::implicit_function(const double time,
1010 * const PETScWrappers::MPI::Vector &y,
1011 * const PETScWrappers::MPI::Vector &y_dot,
1012 * PETScWrappers::MPI::Vector &residual)
1014 * TimerOutput::Scope t(computing_timer, "implicit function");
1016 * PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1017 * mpi_communicator);
1018 * PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1019 * mpi_communicator);
1021 * tmp_solution_dot = y_dot;
1023 * update_current_constraints(time);
1024 * current_constraints.distribute(tmp_solution);
1025 * homogeneous_constraints.distribute(tmp_solution_dot);
1027 * PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1028 * locally_relevant_dofs,
1029 * mpi_communicator);
1030 * PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1031 * locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1032 * locally_relevant_solution = tmp_solution;
1033 * locally_relevant_solution_dot = tmp_solution_dot;
1036 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1037 * FEValues<dim> fe_values(fe,
1038 * quadrature_formula,
1039 * update_values | update_gradients |
1040 * update_quadrature_points | update_JxW_values);
1042 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1043 * const unsigned int n_q_points = quadrature_formula.size();
1045 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1047 * std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1048 * std::vector<double> solution_dot_values(n_q_points);
1050 * Vector<double> cell_residual(dofs_per_cell);
1052 * right_hand_side_function.set_time(time);
1056 * Now for computing the actual residual. Recall that we wan to compute the
1059 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1061 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1062 * is not efficient. Instead, recall (by writing out how the elements of
1063 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1064 * @f$i@f$th element of the residual vector is given by
1067 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1068 * {\partial U_j(t)}{\partial t}
1069 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1070 * \varphi_j(\mathbf x, t) U_j(t)
1071 * - \int_\Omega \varphi_i f(\mathbf x, t)
1073 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1074 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1076 * - \int_\Omega \varphi_i f(\mathbf x, t).
1078 * We can compute these integrals efficiently by breaking them up into
1079 * a sum over all cells and then applying quadrature. For the integrand,
1080 * we need to evaluate the solution and its gradient at the quadrature
1081 * points within each locally owned cell, and for this, we need also
1082 * access to degrees of freedom that may be owned by neighboring
1083 * processors. We therefore use the locally_relevant_solution and and
1084 * locally_relevant_solution_dot vectors.
1088 * for (const auto &cell : dof_handler.active_cell_iterators())
1089 * if (cell->is_locally_owned())
1091 * fe_values.reinit(cell);
1093 * fe_values.get_function_gradients(locally_relevant_solution,
1094 * solution_gradients);
1095 * fe_values.get_function_values(locally_relevant_solution_dot,
1096 * solution_dot_values);
1098 * cell->get_dof_indices(local_dof_indices);
1100 * cell_residual = 0;
1101 * for (const unsigned int q : fe_values.quadrature_point_indices())
1102 * for (const unsigned int i : fe_values.dof_indices())
1104 * cell_residual(i) +=
1105 * (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1106 * solution_dot_values[q] // u(x_q)
1108 * fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1109 * solution_gradients[q] // grad u(x_q)
1111 * fe_values.shape_value(i, q) * // phi_i(x_q) *
1112 * right_hand_side_function.value(
1113 * fe_values.quadrature_point(q))) // f(x_q)]
1114 * * fe_values.JxW(q); // * dx
1116 * current_constraints.distribute_local_to_global(cell_residual,
1117 * local_dof_indices,
1120 * residual.compress(VectorOperation::add);
1124 * The end result of the operations above is a vector that contains the
1125 * residual vector, having taken into account the constraints due to
1126 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1127 * used `current_constraints.distribute_local_to_global()` to add the
1128 * local contributions to the global vector. At the end of the day, the
1129 * residual vector @f$r@f$ will be used in the solution of linear systems
1130 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1131 * below. We want to achieve that for algebraic components, the algebraic
1132 * components of @f$z@f$ have predictable values that achieve the purposes
1133 * discussed in the following. We do this by ensuring that the entries
1134 * corresponding to algebraic components in the residual @f$r@f$ have specific
1135 * values, and then we will do the same in the next function for the
1136 * matrix; for this, you will have to know that the rows and columns
1137 * of the matrix corresponding to constrained entries are zero with the
1138 * exception of the diagonal entries. We will manually set that diagonal
1139 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1143 * From the point of view of the residual vector, if the input `y`
1144 * vector does not contain the correct values on constrained degrees of
1145 * freedom (hanging nodes or boundary conditions), we need to communicate
1146 * this to the time stepper, and we do so by setting the residual to the
1147 * actual difference between the input `y` vector and the our local copy of
1148 * it, in which we have applied the constraints (see the top of the
1149 * function where we called `current_constraints.distribute(tmp_solution)`
1150 * and a similar operation on the time derivative). Since we have made a
1151 * copy of the input vector for this purpose, we use it to compute the
1152 * residual value. However, there is a difference between hanging nodes
1153 * constraints and boundary conditions: we do not want to make hanging node
1154 * constraints actually depend on their dependent degrees of freedom, since
1155 * this would imply that we are actually solving for the dependent degrees
1156 * of freedom. This is not what we are actually doing, however, since
1157 * hanging nodes are not actually solved for. They are eliminated from the
1158 * system by the call to AffineConstraints::distribute_local_to_global()
1159 * above. From the point of view of the Jacobian matrix, we are effectively
1160 * setting hanging nodes to an artificial value (usually zero), and we
1161 * simply want to make sure that we solve for those degrees of freedom a
1162 * posteriori, by calling the function AffineConstraints::distribute().
1166 * Here we therefore check that the residual is equal to the input value on
1167 * the constrained dofs corresponding to hanging nodes (i.e., those for
1168 * which the lines of the `current_constraints` contain at least one other
1169 * entry), and to the difference between the input vector and the actual
1170 * solution on those constraints that correspond to boundary conditions.
1173 * for (const auto &c : current_constraints.get_lines())
1174 * if (locally_owned_dofs.is_element(c.index))
1176 * if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1177 * residual[c.index] = y[c.index] - tmp_solution[c.index];
1178 * else /* has dependencies -> a hanging node */
1179 * residual[c.index] = y[c.index];
1181 * residual.compress(VectorOperation::insert);
1188 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1189 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1193 * The next operation is to compute the "Jacobian", which PETSc TS defines
1196 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1197 * R}{\partial \dot y}
1199 * which, for the current linear problem, is simply
1201 * J_\alpha = A + \alpha M
1203 * and which is in particular independent of time and the current solution
1204 * vectors @f$y@f$ and @f$\dot y@f$.
1208 * Having seen the assembly of matrices before, there is little that should
1209 * surprise you in the actual assembly here:
1212 * template <int dim>
1213 * void HeatEquation<dim>::assemble_implicit_jacobian(
1214 * const double /* time */,
1215 * const PETScWrappers::MPI::Vector & /* y */,
1216 * const PETScWrappers::MPI::Vector & /* y_dot */,
1217 * const double alpha)
1219 * TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1221 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1222 * FEValues<dim> fe_values(fe,
1223 * quadrature_formula,
1224 * update_values | update_gradients |
1225 * update_quadrature_points | update_JxW_values);
1227 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1229 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1231 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1233 * jacobian_matrix = 0;
1234 * for (const auto &cell : dof_handler.active_cell_iterators())
1235 * if (cell->is_locally_owned())
1237 * fe_values.reinit(cell);
1239 * cell->get_dof_indices(local_dof_indices);
1242 * for (const unsigned int q : fe_values.quadrature_point_indices())
1243 * for (const unsigned int i : fe_values.dof_indices())
1244 * for (const unsigned int j : fe_values.dof_indices())
1246 * cell_matrix(i, j) +=
1247 * (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1248 * fe_values.shape_grad(j, q) // grad phi_j(x_q)
1250 * fe_values.shape_value(i, q) * // phi_i(x_q) *
1251 * fe_values.shape_value(j, q) // phi_j(x_q)
1253 * fe_values.JxW(q); // * dx
1255 * current_constraints.distribute_local_to_global(cell_matrix,
1256 * local_dof_indices,
1259 * jacobian_matrix.compress(VectorOperation::add);
1263 * The only interesting part is the following. Recall that we modified the
1264 * residual vector's entries corresponding to the algebraic components of
1265 * the solution in the previous function. The outcome of calling
1266 * `current_constraints.distribute_local_to_global()` a few lines
1267 * above is that the global
matrix has zero rows and columns
for the
1268 * algebraic (constrained) components of the solution; the function
1270 * same size as the remaining
diagonal entries of the
matrix. What
1271 *
this diagonal value is is unknown to us -- in other cases where
1272 * we call `current_constraints.distribute_local_to_global()` on
1273 * both the left side
matrix and the right side vector, as in most
1275 * hand side values are chosen in such a way that the result of
1276 * solving a linear system is what we want it to be, but the scaling
1277 * is done automatically.
1281 * This is not good enough
for us here, because we are building the
1282 * right hand side independently from the
matrix in different
functions.
1283 * Thus,
for any constrained degree of freedom, we
set the
diagonal of the
1284 * Jacobian to be one. This leaves the Jacobian
matrix invertible,
1285 * consistent with what the time stepper expects, and it also makes sure
1286 * that
if we did not make a mistake in the residual and/or in the Jacbian
1287 *
matrix, then asking the time stepper to
check the Jacobian with a finite
1288 * difference method will produce the correct result. This can be activated
1289 * at
run time via passing the `-snes_test_jacobian` option on the command
1293 *
for (
const auto &c : current_constraints.get_lines())
1302 * <a name=
"step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1303 * <h4>The HeatEquation::solve_with_jacobian() function</h4>
1307 * This is the function that actually solves the linear system with the
1308 * Jacobian matrix we have previously built (in a call to the previous
1309 * function during the current time step or another earlier one -- time
1310 * steppers are quite sophisticated in determining internally whether it is
1311 * necessary to update the Jacobian matrix, or whether one can reuse it for
1312 * another time step without rebuilding it; this is similar to how one can
1313 * re-use the Newton matrix for several Newton steps, see for example the
1314 * discussion in @ref step_77 "step-77"). We could in principle not provide this function to
1315 * the time stepper, and instead select a specific solver on the command line
1316 * by using the `-ksp_*` options of
PETSc. However, by providing this
1317 * function, we can use a specific solver and preconditioner for the linear
1318 * system, and still have the possibility to change them on the command line.
1322 * Providing a specific solver is more in line with the way we usually do
1323 * things in other deal.II examples, while letting
PETSc choose a generic
1324 * solver, and changing it on the command line via `-ksp_type` is more in line
1325 * with the way
PETSc is usually used, and it can be a convenient approach
1326 * when we are experimenting to find an optimal solver for our problem. Both
1327 * options are available here, since we can still change both the solver and
1328 * the preconditioner on the command line via `-user_ksp_type` and
1329 * `-user_pc_type` options.
1333 * In any case, recall that the Jacobian we built in the previous function is
1334 * always of the form
1336 * J_\alpha = \alpha M + A
1338 * where @f$M@f$ is a mass matrix and @f$A@f$ a Laplace matrix. @f$M@f$ is symmetric and
1339 * positive definite; @f$A@f$ is symmetric and at least positive semidefinite;
1340 * @f$\alpha> 0@f$. As a consequence, the Jacobian matrix is a symmetric and
1341 * positive definite matrix, which we can efficiently solve with the Conjugate
1342 * Gradient method, along with either SSOR or (if available) the algebraic
1343 * multigrid implementation provided by
PETSc (via the Hypre package) as
1344 * preconditioner. In practice, if you wanted to solve "real" problems, one
1345 * would spend some time finding which preconditioner is optimal, perhaps
1346 * using
PETSc's ability to read solver and preconditioner choices from the
1347 * command line. But this is not the focus of this tutorial program, and so
1348 * we just go with the following:
1351 * template <
int dim>
1358 * #
if defined(PETSC_HAVE_HYPRE)
1360 * preconditioner.
initialize(jacobian_matrix);
1369 * cg.set_prefix(
"user_");
1371 * cg.solve(jacobian_matrix, dst, src, preconditioner);
1373 * pcout <<
" " << solver_control.last_step() <<
" linear iterations."
1381 * <a name=
"step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1382 * <h4>The HeatEquation::prepare_for_coarsening_and_refinement() function</h4>
1386 * The next block of functions deals with mesh refinement. We split this
1387 * process up into a "decide whether and what you want to refine" and a
1388 * "please transfer these vectors from old to new mesh" phase, where the
first
1389 * also deals with marking cells for refinement. (The decision whether or
1390 * not to refine is done in the lambda function that calls the current
1395 * Breaking things into a "mark cells" function and into a "execute mesh
1396 * adaptation and transfer solution vectors" function is awkward, though
1397 * conceptually not difficult to understand. These two pieces of code
1398 * should really be part of the same function, as they are in @ref step_26 "step-26".
1399 * The issue is with what
PETScWrappers::TimeStepper provides us with in these
1400 * callbacks. Specifically, the "decide whether and what you want to refine"
1401 * callback has access to the current solution, and so can evaluate
1402 * (spatial) error estimators to decide which cells to refine. The
1403 *
second callback that transfers vectors from old to new mesh gets a bunch
1404 * of vectors, but without the semantic information on which of these is
1405 * the current solution vector. As a consequence, it cannot do the marking
1406 * of cells for refinement or coarsening, and we have to do that from the
1411 * In practice, however, the problem is minor. The
first of these
1412 * two functions is essentially identical to the
1413 *
first half of the corresponding function in @ref step_26 "step-26", with the only
1415 * function instead of the serial one. Once again, we make sure that we never
1416 * fall below the minimum refinement
level, and above the maximum one, that we
1417 * can select from the parameter file.
1420 * template <
int dim>
1421 *
void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1425 * locally_relevant_dofs,
1426 * mpi_communicator);
1427 * locally_relevant_solution = y;
1433 * locally_relevant_solution,
1434 * estimated_error_per_cell);
1439 *
const unsigned int max_grid_level =
1440 * initial_global_refinement + max_delta_refinement_level;
1441 *
const unsigned int min_grid_level = initial_global_refinement;
1444 *
for (
const auto &cell :
1445 *
triangulation.active_cell_iterators_on_level(max_grid_level))
1446 * cell->clear_refine_flag();
1447 *
for (
const auto &cell :
1448 *
triangulation.active_cell_iterators_on_level(min_grid_level))
1449 * cell->clear_coarsen_flag();
1456 * <a name=
"step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1457 * <h4>The HeatEquation::transfer_solution_vectors_to_new_mesh() function</h4>
1461 * The following function then is the
second half of the correspond function
1462 * in @ref step_26 "step-26". It is called by the time stepper whenever it requires to
1463 * transfer the solution and any intermediate stage vectors to a new mesh. We
1464 * must make sure that all input vectors are transformed into ghosted vectors
1465 * before the actual transfer is executed, and that we distribute the hanging
1466 * node constraints on the output vectors as soon as we have interpolated the
1467 * vectors to the new mesh -- i.e., that all constraints are satisfied on
1468 * the vectors we transfer.
1472 * We have no way to enforce boundary conditions at this stage, however. This
1473 * is because the various vectors may correspond to solutions at previous time
1474 * steps if the method used here is a multistep time integrator, and so may
1475 * correspond to different time points that we are not privy to.
1479 * While this could be a problem if we used the values of the solution and of
1480 * the intermediate stages on the constrained degrees of freedom to compute
1481 * the errors, we do not do so. Instead, we compute the errors on the
1482 * differential equation, and ignore algebraic constraints; therefore we do no
1483 * need to guarantee that the boundary conditions are satisfied also in the
1484 * intermediate stages.
1488 * We have at our disposal the hanging node current_constraints alone, though,
1489 * and therefore we enforce them on the output vectors, even if this is not
1493 * template <
int dim>
1494 *
void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1495 * const
double time,
1500 * solution_trans(dof_handler);
1502 * std::vector<PETScWrappers::MPI::Vector> all_in_ghosted(all_in.size());
1503 * std::vector<const PETScWrappers::MPI::Vector *> all_in_ghosted_ptr(
1505 * std::vector<PETScWrappers::MPI::Vector *> all_out_ptr(all_in.size());
1506 *
for (
unsigned int i = 0; i < all_in.size(); ++i)
1508 * all_in_ghosted[i].reinit(locally_owned_dofs,
1509 * locally_relevant_dofs,
1510 * mpi_communicator);
1511 * all_in_ghosted[i] = all_in[i];
1512 * all_in_ghosted_ptr[i] = &all_in_ghosted[i];
1516 * solution_trans.prepare_for_coarsening_and_refinement(all_in_ghosted_ptr);
1519 * setup_system(time);
1521 * all_out.resize(all_in.size());
1522 *
for (
unsigned int i = 0; i < all_in.size(); ++i)
1524 * all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1525 * all_out_ptr[i] = &all_out[i];
1527 * solution_trans.interpolate(all_out_ptr);
1530 * hanging_nodes_constraints.distribute(v);
1537 * <a name=
"step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1538 * <h4>The HeatEquation::update_current_constraints() function</h4>
1542 * Since regenerating the constraints at each time step may be expensive, we
1543 * make sure that we only do so when the time changes. We track time change by
1544 * checking if the time of the boundary_values_function has changed, with
1545 * respect to the time of the last call to this function. This will work most
1546 * of the times, but not the very
first time we call this function, since the
1547 * time then may be zero and the time of the `boundary_values_function` is
1548 * zero at construction time. We therefore also check if the number
1549 * constraints in `current_constraints`, and if these are empty, we regenerate
1550 * the constraints regardless of the time variable.
1553 * template <
int dim>
1554 *
void HeatEquation<dim>::update_current_constraints(const
double time)
1556 *
if (current_constraints.n_constraints() == 0 ||
1557 * time != boundary_values_function.get_time())
1561 * boundary_values_function.set_time(time);
1562 * current_constraints.clear();
1563 * current_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1564 * current_constraints.merge(hanging_nodes_constraints);
1567 * boundary_values_function,
1568 * current_constraints);
1569 * current_constraints.make_consistent_in_parallel(locally_owned_dofs,
1570 * locally_relevant_dofs,
1571 * mpi_communicator);
1572 * current_constraints.close();
1580 * <a name=
"step_86-TheHeatEquationrunfunction"></a>
1581 * <h4>The HeatEquation::run() function</h4>
1585 * We have finally arrived at the main function of the class. At the top, it
1586 * creates the mesh and sets up the variables that make up the linear system:
1589 * template <
int dim>
1590 *
void HeatEquation<dim>::run()
1599 * We then
set up the time stepping
object and associate the
matrix we will
1600 * build whenever requested
for both the Jacobian
matrix (see the definition
1601 * above of what the
"Jacobian" actually refers to) and
for the
matrix
1602 * that will be used as a preconditioner
for the Jacobian.
1607 * petsc_ts(time_stepper_data);
1609 * petsc_ts.set_matrices(jacobian_matrix, jacobian_matrix);
1614 * The real work setting up the time stepping
object starts here. As
1616 *
class is used is by inverting control: At the
end of this function, we
1618 *
run the
loop over time steps, and at the appropriate places *call back*
1619 * into user code for whatever functionality is required. What we need to
1620 * do is to hook up these callback
functions by assigning appropriate
1625 * We start by creating
lambda functions that provide information about
1626 * the "implicit function" (i.e., that part of the right hand side of the
1627 * ODE system that we want to treat implicitly -- which in our case is
1628 * the entire right hand side), a function that assembles the Jacobian
1629 *
matrix, and a function that solves a linear system with the Jacobian.
1633 * const PETScWrappers::MPI::Vector &y,
1634 * const PETScWrappers::MPI::Vector &y_dot,
1635 * PETScWrappers::MPI::Vector &res) {
1639 * petsc_ts.setup_jacobian = [&](
const double time,
1642 *
const double alpha) {
1643 * this->assemble_implicit_jacobian(time, y, y_dot, alpha);
1648 * this->solve_with_jacobian(src, dst);
1653 * The next two callbacks deal with identifying and setting variables
1654 * that are considered
"algebraic" (rather than
"differential"), i.e.,
for
1655 * which we know what values they are supposed to have rather than letting
1656 * their values be determined by the differential equation. We need to
1657 * instruct the time stepper to ignore these components when computing the
1658 * error in the residuals, and we
do so by
first providing a function that
1659 * returns an
IndexSet with the indices of these algebraic components of the
1660 * solution vector (or rather, that subset of the locally-owned part of the
1661 * vector that is algebraic, in
case we are running in
parallel). This
first
1662 * of the following two
functions does that. Specifically, both nodes at
1663 * which Dirichlet boundary conditions are applied, and hanging nodes are
1664 * algebraically constrained. This function then returns a
set of
1665 * indices that is initially empty (but knows about the size of the index
1666 * space) and which we then construct as the
union of boundary and hanging
1671 * Following
this, we then also need a function that, given a solution
1672 * vector `y` and the current time, sets the algebraic components of that
1673 * vector to their correct
value. This comes down to ensuring that we have
1674 * up to date constraints in the `constraints` variable, and then applying
1675 * these constraints to the solution vector via
1677 * *could* have achieved the same in `solve_with_jacobian()`. Whenever the
1678 * time stepper solves a linear system, it follows up the call to the solver
1679 * by calling the callback to
set algebraic components correct. We could
1680 * also have put the calls to `update_current_constraints()` and
1681 * `distribute()` into the `solve_with_jacobian` function, but by not doing
1682 * so, we can also replace the `solve_with_jacobian` function with a call to
1683 * a
PETSc solver, and we would still have the current_constraints correctly
1684 * applied to the solution vector.)
1687 * petsc_ts.algebraic_components = [&]() {
1688 *
IndexSet algebraic_set(dof_handler.n_dofs());
1690 * algebraic_set.add_indices(
1692 *
return algebraic_set;
1695 * petsc_ts.update_constrained_components =
1698 * update_current_constraints(time);
1699 * current_constraints.distribute(y);
1705 * The next two callbacks relate to mesh refinement. As discussed in the
1707 * situation where we want to change the mesh. All we have to provide
1708 * is a callback that returns `
true`
if we are at a
point where we want
1709 * to
refine the mesh (and `
false` otherwise) and that
if we want to
1710 *
do mesh refinement does some prep work
for that in the form of
1711 * calling the `prepare_for_coarsening_and_refinement` function.
1715 * If the
first callback below returns `
true`, then
PETSc TS will
1716 *
do some clean-up operations, and call the
second of the
1718 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`) with a
1719 * collection of vectors that need to be interpolated from the old
1720 * to the
new mesh. This may include the current solution, perhaps
1721 * the current time derivative of the solution, and in the
case of
1723 * integrators](https:
1724 * the solutions of some previous time steps. We hand all of these over to
1725 * the `
interpolate()` member function of this class.
1728 * petsc_ts.decide_and_prepare_for_remeshing =
1729 * [&](const double ,
1730 * const unsigned
int step_number,
1732 * if (step_number > 0 && this->mesh_adaptation_frequency > 0 &&
1733 * step_number % this->mesh_adaptation_frequency == 0)
1735 * pcout <<
std::endl <<
"Adapting the mesh..." <<
std::endl;
1736 * this->prepare_for_coarsening_and_refinement(y);
1743 * petsc_ts.transfer_solution_vectors_to_new_mesh =
1744 * [&](
const double time,
1745 *
const std::vector<PETScWrappers::MPI::Vector> &all_in,
1746 * std::vector<PETScWrappers::MPI::Vector> &all_out) {
1747 * this->transfer_solution_vectors_to_new_mesh(time, all_in, all_out);
1752 * The
final callback is a
"monitor" that is called in each
1753 * time step. Here we use it to create a graphical output. Perhaps a better
1754 * scheme would output the solution at fixed time intervals, rather
1755 * than in every time step, but
this is not the main
point of
1756 *
this program and so we go with the easy approach:
1759 * petsc_ts.monitor = [&](
const double time,
1761 *
const unsigned int step_number) {
1762 * pcout <<
"Time step " << step_number <<
" at t=" << time << std::endl;
1763 * this->output_results(time, step_number, y);
1769 * With all of
this out of the way, the rest of the function is
1770 * anticlimactic: We just have to initialize the solution vector with
1771 * the
initial conditions and call the function that does the time
1772 * stepping, and everything
else will happen automatically:
1778 * petsc_ts.solve(solution);
1786 * <a name=
"step_86-Themainfunction"></a>
1787 * <h3>The main() function</h3>
1791 * The rest of the program is as it always looks. We read run-time parameters
1793 * we showed in @ref step_60 "step-60" and @ref step_70 "step-70".
1796 *
int main(
int argc,
char **argv)
1800 *
using namespace Step86;
1803 * HeatEquation<2> heat_equation_solver(MPI_COMM_WORLD);
1805 *
const std::string input_filename =
1806 * (argc > 1 ? argv[1] :
"heat_equation.prm");
1808 * heat_equation_solver.run();
1810 *
catch (std::exception &exc)
1812 * std::cerr << std::endl
1814 * <<
"----------------------------------------------------"
1816 * std::cerr <<
"Exception on processing: " << std::endl
1817 * << exc.what() << std::endl
1818 * <<
"Aborting!" << std::endl
1819 * <<
"----------------------------------------------------"
1826 * std::cerr << std::endl
1828 * <<
"----------------------------------------------------"
1830 * std::cerr <<
"Unknown exception!" << std::endl
1831 * <<
"Aborting!" << std::endl
1832 * <<
"----------------------------------------------------"
1840<a name=
"step_86-Results"></a><h1>Results</h1>
1843When you
run this program with the input file as is, you get output as follows:
1846Number of active cells: 768
1847Number of degrees of freedom: 833
1850 5 linear iterations.
1851 8 linear iterations.
1852Time step 1 at t=0.025
1853 6 linear iterations.
1854Time step 2 at t=0.05
1855 5 linear iterations.
1856 8 linear iterations.
1857Time step 3 at t=0.075
1858 6 linear iterations.
1860 6 linear iterations.
1861Time step 5 at t=0.125
1862 6 linear iterations.
1863Time step 6 at t=0.15
1864 6 linear iterations.
1865Time step 7 at t=0.175
1866 6 linear iterations.
1868 6 linear iterations.
1869Time step 9 at t=0.225
1870 6 linear iterations.
1874Number of active cells: 1050
1875Number of degrees of freedom: 1155
1877Time step 10 at t=0.25
1878 5 linear iterations.
1879 8 linear iterations.
1880Time step 11 at t=0.275
1881 5 linear iterations.
1882 7 linear iterations.
1886Time step 195 at t=4.875
1887 6 linear iterations.
1888Time step 196 at t=4.9
1889 6 linear iterations.
1890Time step 197 at t=4.925
1891 6 linear iterations.
1892Time step 198 at t=4.95
1893 6 linear iterations.
1894Time step 199 at t=4.975
1895 5 linear iterations.
1899Number of active cells: 1380
1900Number of degrees of freedom: 1547
1905+---------------------------------------------+------------+------------+
1906| Total wallclock time elapsed since start | 43.2s | |
1908| Section | no. calls | wall time | % of total |
1909+---------------------------------+-----------+------------+------------+
1910|
assemble implicit Jacobian | 226 | 9.93s | 23% |
1911| implicit function | 426 | 16.2s | 37% |
1912| output results | 201 | 9.74s | 23% |
1913|
set algebraic components | 200 | 0.0496s | 0.11% |
1914| setup system | 21 | 0.799s | 1.8% |
1915| solve with Jacobian | 226 | 0.56s | 1.3% |
1916| update current constraints | 201 | 1.53s | 3.5% |
1917+---------------------------------+-----------+------------+------------+
1921We can generate output
for this in the form of a video:
1924 <iframe width=
"560" height=
"315" src=
"https://www.youtube.com/embed/fhJVkcdRksM"
1926 allow=
"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1927 allowfullscreen></iframe>
1930The solution here is driven by boundary values (the initial conditions are zero,
1931and so is the right hand side of the equation). It takes a little bit of time
1932for the boundary values to diffuse into the domain, and so the temperature
1933(the solution of the heat equation) in the interior of the domain has a slight
1934lag compared to the temperature at the boundary.
1937The more interesting component of
this program is how easy it is to play with
1938the details of the time stepping algorithm. Recall that the solution above
1939is controlled by the following parameters:
1941 subsection Time stepper
1942 subsection Running parameters
1946 set match
final time =
false
1947 set maximum number of steps = -1
1948 set options prefix =
1949 set solver type = beuler
1952 subsection Error control
1953 set absolute error tolerance = -1
1955 set ignore algebraic lte =
true
1956 set maximum step size = -1
1957 set minimum step size = -1
1958 set relative error tolerance = -1
1962Of particular interest
for us here is to
set the time stepping algorithm
1963and the adaptive time step control method. The latter is
set to
"none"
1965[several alternative choices](https:
1966for this parameter. For example, we can
set parameters as follows,
1968 subsection Time stepper
1969 subsection Running parameters
1973 set match
final time =
false
1974 set maximum number of steps = -1
1975 set options prefix =
1976 set solver type = bdf
1979 subsection Error control
1980 set absolute error tolerance = 1
e-2
1981 set relative error tolerance = 1
e-2
1982 set adaptor type = basic
1983 set ignore algebraic lte =
true
1984 set maximum step size = 1
1985 set minimum step size = 0.01
1989What we
do here is
set the
initial time step size to 0.025, and choose relatively
1990large absolute and relative error tolerances of 0.01
for the time step size
1991adaptation algorithm (
for which we choose
"basic"). We ask
PETSc TS to use a
1993method, and we get the following as output:
1996===========================================
1997Number of active cells: 768
1998Number of degrees of freedom: 833
2001 5 linear iterations.
2002 5 linear iterations.
2003 5 linear iterations.
2004 4 linear iterations.
2005 6 linear iterations.
2006Time step 1 at t=0.01
2007 5 linear iterations.
2008 5 linear iterations.
2009Time step 2 at t=0.02
2010 5 linear iterations.
2011 5 linear iterations.
2012Time step 3 at t=0.03
2013 5 linear iterations.
2014 5 linear iterations.
2015Time step 4 at t=0.042574
2016 5 linear iterations.
2017 5 linear iterations.
2018Time step 5 at t=0.0588392
2019 5 linear iterations.
2020 5 linear iterations.
2021Time step 6 at t=0.0783573
2022 5 linear iterations.
2023 7 linear iterations.
2024 5 linear iterations.
2025 7 linear iterations.
2026Time step 7 at t=0.100456
2027 5 linear iterations.
2028 7 linear iterations.
2029 5 linear iterations.
2030 7 linear iterations.
2031Time step 8 at t=0.124982
2032 5 linear iterations.
2033 8 linear iterations.
2034 5 linear iterations.
2035 7 linear iterations.
2036Time step 9 at t=0.153156
2037 6 linear iterations.
2038 5 linear iterations.
2042Time step 206 at t=4.96911
2043 5 linear iterations.
2044 5 linear iterations.
2045Time step 207 at t=4.99398
2046 5 linear iterations.
2047 5 linear iterations.
2048Time step 208 at t=5.01723
2051+---------------------------------------------+------------+------------+
2052| Total wallclock time elapsed since start | 117s | |
2054| Section | no. calls | wall time | % of total |
2055+---------------------------------+-----------+------------+------------+
2056|
assemble implicit Jacobian | 593 | 35.6s | 31% |
2057| implicit function | 1101 | 56.3s | 48% |
2058| output results | 209 | 12.5s | 11% |
2059|
set algebraic components | 508 | 0.156s | 0.13% |
2060| setup system | 21 | 1.11s | 0.95% |
2061| solve with Jacobian | 593 | 1.97s | 1.7% |
2062| update current constraints | 509 | 4.53s | 3.9% |
2063+---------------------------------+-----------+------------+------------+
2065What is happening here is that apparently
PETSc TS is not happy with our
2066choice of
initial time step size, and after several linear solves has
2067reduced it to the minimum we allow it to, 0.01. The following time steps
2068then
run at a time step size of 0.01 until it decides to make it slightly
2069larger again and (apparently) switches to a higher order method that
2070requires more linear solves per time step but allows
for a larger time
2071step closer to our
initial choice 0.025 again. It does not quite hit the
2072final time of @f$T=5@f$ with its time step choices, but we
've got only
2073ourselves to blame for that by setting
2075 set match final time = false
2079Not all combinations of methods, time step adaptation algorithms, and
2080other parameters are valid, but the main messages from the experiment
2081above that you should take away are:
2082- It would undoubtedly be quite time consuming to implement many of the
2083 methods that PETSc TS offers for time stepping -- but with a program
2084 such as the one here, we don't need to: We can just select from the
2085 many methods
PETSc TS already has.
2086- Adaptive time step control is difficult; adaptive choice of which
2087 method or order to choose is perhaps even more difficult.
None of the
2088 time dependent programs that came before the current one (say, @ref step_23
"step-23",
2089 @ref step_26
"step-26", @ref step_31
"step-31", @ref step_58
"step-58", and a good number of others) have either.
2090 Moreover,
while deal.II is good at spatial adaptation of meshes, it
2091 is not a library written by experts in time step adaptation, and so will
2092 likely not gain
this ability either. But, again, it doesn
't
2093 have to: We can rely on a library written by experts in that area.
2097<a name="step-86-extensions"></a>
2098<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2101The program actually runs in parallel, even though we have not used
2102that above. Specifically, if you have configured deal.II to use MPI,
2103then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2104program with 8 processes.
2106For the program as currently written (and in debug mode), this makes
2107little difference: It will run about twice as fast, but take about 8
2108times as much CPU time. That is because the problem is just so small:
2109Generally between 1000 and 2000 degrees of freedom. A good rule of
2110thumb is that programs really only benefit from parallel computing if
2111you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2112process*. But it is not difficult to adapt the program at hand here to
2113run with a much finer mesh, or perhaps in 3d, so that one is beyond
2114that limit and sees the benefits of parallel computing.
2118<a name="step_86-PlainProg"></a>
2119<h1> The plain program</h1>
2120@include "step-86.cc"
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
unsigned int n_levels() const
virtual void execute_coarsening_and_refinement() override
virtual bool prepare_coarsening_and_refinement() override
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > ×_and_names)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation