800 * , initial_global_refinement(5)
801 * , max_delta_refinement_level(2)
802 * , mesh_adaptation_frequency(0)
803 * , right_hand_side_function(
"/Heat Equation/Right hand side")
804 * , initial_value_function(
"/Heat Equation/Initial value")
805 * , boundary_values_function(
"/Heat Equation/Boundary values")
807 * enter_subsection(
"Time stepper");
809 * enter_my_subsection(this->prm);
811 * time_stepper_data.add_parameters(this->prm);
813 * leave_my_subsection(this->prm);
815 * leave_subsection();
817 * add_parameter(
"Initial global refinement",
818 * initial_global_refinement,
819 *
"Number of times the mesh is refined globally before "
820 *
"starting the time stepping.");
821 * add_parameter(
"Maximum delta refinement level",
822 * max_delta_refinement_level,
823 *
"Maximum number of local refinement levels.");
824 * add_parameter(
"Mesh adaptation frequency",
825 * mesh_adaptation_frequency,
826 *
"When to adapt the mesh.");
834 * <a name=
"step_86-TheHeatEquationsetup_systemfunction"></a>
835 * <h4>The HeatEquation::setup_system() function</h4>
839 * This function is not very different from what we do in many other programs
840 * (including @ref step_26 "step-26"). We enumerate degrees of freedom, output some
841 * information about then, build constraint objects (recalling that we
842 * put hanging node constraints into their separate
object), and then
843 * also build an AffineConstraint
object that contains both the hanging
844 * node constraints as well as constraints corresponding to zero Dirichlet
845 * boundary conditions. This last
object is needed since we impose the
846 * constraints through algebraic equations. While technically it would be
847 * possible to use the time derivative of the boundary function as a boundary
848 * conditions for the time derivative of the solution, this is not done here.
849 * Instead, we impose the boundary conditions through algebraic equations, and
850 * therefore the time derivative of the boundary conditions is not part of
851 * the algebraic system, and we need zero boundary conditions on the time
852 * derivative of the solution when computing the residual. We use the
853 * `homogeneous_constraints`
object for this purpose.
857 * Finally, we create the actual non-homogeneous `current_constraints` by
858 * calling `update_current_constraints). These are also used during the
859 * assembly and during the residual evaluation.
863 *
void HeatEquation<dim>::setup_system(const
double time)
867 * dof_handler.distribute_dofs(fe);
869 * <<
"Number of active cells: " <<
triangulation.n_active_cells()
871 * <<
"Number of degrees of freedom: " << dof_handler.n_dofs()
875 * locally_owned_dofs = dof_handler.locally_owned_dofs();
876 * locally_relevant_dofs =
880 * hanging_nodes_constraints.clear();
881 * hanging_nodes_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
883 * hanging_nodes_constraints);
884 * hanging_nodes_constraints.make_consistent_in_parallel(locally_owned_dofs,
885 * locally_relevant_dofs,
887 * hanging_nodes_constraints.close();
890 * homogeneous_constraints.clear();
891 * homogeneous_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
892 * homogeneous_constraints.merge(hanging_nodes_constraints);
896 * homogeneous_constraints);
897 * homogeneous_constraints.make_consistent_in_parallel(locally_owned_dofs,
898 * locally_relevant_dofs,
900 * homogeneous_constraints.close();
903 * update_current_constraints(time);
908 * The
final block of code resets and initializes the
matrix object with
909 * the appropriate sparsity pattern. Recall that we
do not store solution
910 * vectors in
this class (the time integrator
object does that internally)
911 * and so
do not have to resize and initialize them either.
917 * homogeneous_constraints,
920 * locally_owned_dofs,
922 * locally_relevant_dofs);
924 * jacobian_matrix.reinit(locally_owned_dofs,
925 * locally_owned_dofs,
934 * <a name=
"step_86-TheHeatEquationoutput_resultsfunction"></a>
935 * <h4>The HeatEquation::output_results() function</h4>
939 * This function is called from "monitor" function that is called in turns
940 * by the time stepper in each time step. We use it to write the solution to a
941 * file, and provide graphical output through paraview or visit. We also write
942 * a pvd file, which groups all metadata about the `.vtu` files into a single
943 * file that can be used to load the full time dependent solution in paraview.
947 *
void HeatEquation<dim>::output_results(const
double time,
948 * const
unsigned int timestep_number,
955 * data_out.add_data_vector(y,
"U");
956 * data_out.build_patches();
960 *
const std::string filename =
962 * data_out.write_vtu_in_parallel(filename, mpi_communicator);
966 *
static std::vector<std::pair<double, std::string>> times_and_names;
967 * times_and_names.emplace_back(time, filename);
969 * std::ofstream pvd_output(
"solution.pvd");
979 * <a name=
"step_86-TheHeatEquationimplicit_functionfunction"></a>
980 * <h4>The HeatEquation::implicit_function() function</h4>
984 * As discussed in the introduction, we describe the ODE system to the time
985 * stepper via its residual,
987 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) -
F(t).
989 * The following function computes it, given vectors
for @f$U,\dot U@f$ that
990 * we will denote by `y` and `y_dot` because that
's how they are called
991 * in the documentation of the PETScWrappers::TimeStepper class.
995 * At the top of the function, we do the usual set up when computing
996 * integrals. We face two minor difficulties here: the first is that
997 * the `y` and `y_dot` vectors we get as input are read only, but we
998 * need to make sure they satisfy the correct boundary conditions and so
999 * have to set elements in these vectors. The second is that we need to
1000 * compute the residual, and therefore in general we need to evaluate solution
1001 * values and gradients inside locally owned cells, and for this need access
1002 * to degrees of freedom which may be owned by neighboring processors. To
1003 * address these issues, we create (non-ghosted) writable copies of the input
1004 * vectors, apply boundary conditions and hanging node current_constraints;
1005 * and then copy these vectors to ghosted vectors before we can do anything
1006 * sensible with them.
1009 * template <int dim>
1011 * HeatEquation<dim>::implicit_function(const double time,
1012 * const PETScWrappers::MPI::Vector &y,
1013 * const PETScWrappers::MPI::Vector &y_dot,
1014 * PETScWrappers::MPI::Vector &residual)
1016 * TimerOutput::Scope t(computing_timer, "implicit function");
1018 * PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1019 * mpi_communicator);
1020 * PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1021 * mpi_communicator);
1023 * tmp_solution_dot = y_dot;
1025 * update_current_constraints(time);
1026 * current_constraints.distribute(tmp_solution);
1027 * homogeneous_constraints.distribute(tmp_solution_dot);
1029 * PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1030 * locally_relevant_dofs,
1031 * mpi_communicator);
1032 * PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1033 * locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1034 * locally_relevant_solution = tmp_solution;
1035 * locally_relevant_solution_dot = tmp_solution_dot;
1038 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1039 * FEValues<dim> fe_values(fe,
1040 * quadrature_formula,
1041 * update_values | update_gradients |
1042 * update_quadrature_points | update_JxW_values);
1044 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1045 * const unsigned int n_q_points = quadrature_formula.size();
1047 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1049 * std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1050 * std::vector<double> solution_dot_values(n_q_points);
1052 * Vector<double> cell_residual(dofs_per_cell);
1054 * right_hand_side_function.set_time(time);
1058 * Now for computing the actual residual. Recall that we wan to compute the
1061 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1063 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1064 * is not efficient. Instead, recall (by writing out how the elements of
1065 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1066 * @f$i@f$th element of the residual vector is given by
1069 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1070 * {\partial U_j(t)}{\partial t}
1071 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1072 * \varphi_j(\mathbf x, t) U_j(t)
1073 * - \int_\Omega \varphi_i f(\mathbf x, t)
1075 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1076 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1078 * - \int_\Omega \varphi_i f(\mathbf x, t).
1080 * We can compute these integrals efficiently by breaking them up into
1081 * a sum over all cells and then applying quadrature. For the integrand,
1082 * we need to evaluate the solution and its gradient at the quadrature
1083 * points within each locally owned cell, and for this, we need also
1084 * access to degrees of freedom that may be owned by neighboring
1085 * processors. We therefore use the locally_relevant_solution and and
1086 * locally_relevant_solution_dot vectors.
1090 * for (const auto &cell : dof_handler.active_cell_iterators())
1091 * if (cell->is_locally_owned())
1093 * fe_values.reinit(cell);
1095 * fe_values.get_function_gradients(locally_relevant_solution,
1096 * solution_gradients);
1097 * fe_values.get_function_values(locally_relevant_solution_dot,
1098 * solution_dot_values);
1100 * cell->get_dof_indices(local_dof_indices);
1102 * cell_residual = 0;
1103 * for (const unsigned int q : fe_values.quadrature_point_indices())
1104 * for (const unsigned int i : fe_values.dof_indices())
1106 * cell_residual(i) +=
1107 * (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1108 * solution_dot_values[q] // u(x_q)
1110 * fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1111 * solution_gradients[q] // grad u(x_q)
1113 * fe_values.shape_value(i, q) * // phi_i(x_q) *
1114 * right_hand_side_function.value(
1115 * fe_values.quadrature_point(q))) // f(x_q)]
1116 * * fe_values.JxW(q); // * dx
1118 * current_constraints.distribute_local_to_global(cell_residual,
1119 * local_dof_indices,
1122 * residual.compress(VectorOperation::add);
1126 * The end result of the operations above is a vector that contains the
1127 * residual vector, having taken into account the constraints due to
1128 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1129 * used `current_constraints.distribute_local_to_global()` to add the
1130 * local contributions to the global vector. At the end of the day, the
1131 * residual vector @f$r@f$ will be used in the solution of linear systems
1132 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1133 * below. We want to achieve that for algebraic components, the algebraic
1134 * components of @f$z@f$ have predictable values that achieve the purposes
1135 * discussed in the following. We do this by ensuring that the entries
1136 * corresponding to algebraic components in the residual @f$r@f$ have specific
1137 * values, and then we will do the same in the next function for the
1138 * matrix; for this, you will have to know that the rows and columns
1139 * of the matrix corresponding to constrained entries are zero with the
1140 * exception of the diagonal entries. We will manually set that diagonal
1141 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1145 * From the point of view of the residual vector, if the input `y`
1146 * vector does not contain the correct values on constrained degrees of
1147 * freedom (hanging nodes or boundary conditions), we need to communicate
1148 * this to the time stepper, and we do so by setting the residual to the
1149 * actual difference between the input `y` vector and the our local copy of
1150 * it, in which we have applied the constraints (see the top of the
1151 * function where we called `current_constraints.distribute(tmp_solution)`
1152 * and a similar operation on the time derivative). Since we have made a
1153 * copy of the input vector for this purpose, we use it to compute the
1154 * residual value. However, there is a difference between hanging nodes
1155 * constraints and boundary conditions: we do not want to make hanging node
1156 * constraints actually depend on their dependent degrees of freedom, since
1157 * this would imply that we are actually solving for the dependent degrees
1158 * of freedom. This is not what we are actually doing, however, since
1159 * hanging nodes are not actually solved for. They are eliminated from the
1160 * system by the call to AffineConstraints::distribute_local_to_global()
1161 * above. From the point of view of the Jacobian matrix, we are effectively
1162 * setting hanging nodes to an artificial value (usually zero), and we
1163 * simply want to make sure that we solve for those degrees of freedom a
1164 * posteriori, by calling the function AffineConstraints::distribute().
1168 * Here we therefore check that the residual is equal to the input value on
1169 * the constrained dofs corresponding to hanging nodes (i.e., those for
1170 * which the lines of the `current_constraints` contain at least one other
1171 * entry), and to the difference between the input vector and the actual
1172 * solution on those constraints that correspond to boundary conditions.
1175 * for (const auto &c : current_constraints.get_lines())
1176 * if (locally_owned_dofs.is_element(c.index))
1178 * if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1179 * residual[c.index] = y[c.index] - tmp_solution[c.index];
1180 * else /* has dependencies -> a hanging node */
1181 * residual[c.index] = y[c.index];
1183 * residual.compress(VectorOperation::insert);
1190 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1191 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1195 * The next operation is to compute the "Jacobian", which PETSc TS defines
1198 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1199 * R}{\partial \dot y}
1201 * which, for the current linear problem, is simply
1203 * J_\alpha = A + \alpha M
1205 * and which is in particular independent of time and the current solution
1206 * vectors @f$y@f$ and @f$\dot y@f$.
1210 * Having seen the assembly of matrices before, there is little that should
1211 * surprise you in the actual assembly here:
1214 * template <int dim>
1215 * void HeatEquation<dim>::assemble_implicit_jacobian(
1216 * const double /* time */,
1217 * const PETScWrappers::MPI::Vector & /* y */,
1218 * const PETScWrappers::MPI::Vector & /* y_dot */,
1219 * const double alpha)
1221 * TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1223 * const QGauss<dim> quadrature_formula(fe.degree + 1);
1224 * FEValues<dim> fe_values(fe,
1225 * quadrature_formula,
1226 * update_values | update_gradients |
1227 * update_quadrature_points | update_JxW_values);
1229 * const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1231 * std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1233 * FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1235 * jacobian_matrix = 0;
1236 * for (const auto &cell : dof_handler.active_cell_iterators())
1237 * if (cell->is_locally_owned())
1239 * fe_values.reinit(cell);
1241 * cell->get_dof_indices(local_dof_indices);
1244 * for (const unsigned int q : fe_values.quadrature_point_indices())
1245 * for (const unsigned int i : fe_values.dof_indices())
1246 * for (const unsigned int j : fe_values.dof_indices())
1248 * cell_matrix(i, j) +=
1249 * (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1250 * fe_values.shape_grad(j, q) // grad phi_j(x_q)
1252 * fe_values.shape_value(i, q) * // phi_i(x_q) *
1253 * fe_values.shape_value(j, q) // phi_j(x_q)
1255 * fe_values.JxW(q); // * dx
1257 * current_constraints.distribute_local_to_global(cell_matrix,
1258 * local_dof_indices,
1261 * jacobian_matrix.compress(VectorOperation::add);
1265 * The only interesting part is the following. Recall that we modified the
1266 * residual vector's entries corresponding to the algebraic components of
1267 * the solution in the previous function. The outcome of calling
1268 * `current_constraints.distribute_local_to_global()` a few lines
1269 * above is that the global
matrix has zero rows and columns
for the
1270 * algebraic (constrained) components of the solution; the function
1273 *
this diagonal value is is unknown to us -- in other cases where
1274 * we call `current_constraints.distribute_local_to_global()` on
1275 * both the left side
matrix and the right side vector, as in most
1277 * hand side
values are chosen in such a way that the result of
1278 * solving a linear system is what we want it to be, but the scaling
1279 * is done automatically.
1283 * This is not good enough
for us here, because we are building the
1284 * right hand side independently from the
matrix in different
functions.
1285 * Thus,
for any constrained degree of freedom, we set the
diagonal of the
1286 * Jacobian to be one. This leaves the Jacobian
matrix invertible,
1287 * consistent with what the time stepper expects, and it also makes sure
1288 * that
if we did not make a mistake in the residual and/or in the Jacbian
1289 *
matrix, then asking the time stepper to
check the Jacobian with a finite
1290 * difference method will produce the correct result. This can be activated
1291 * at
run time via passing the `-snes_test_jacobian` option on the command
1295 *
for (
const auto &c : current_constraints.get_lines())
1296 * jacobian_matrix.set(c.
index, c.
index, 1.0);
1304 * <a name=
"step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1305 * <h4>The HeatEquation::solve_with_jacobian() function</h4>
1309 * This is the function that actually solves the linear system with the
1310 * Jacobian matrix we have previously built (in a call to the previous
1311 * function during the current time step or another earlier one -- time
1312 * steppers are quite sophisticated in determining internally whether it is
1313 * necessary to update the Jacobian matrix, or whether one can reuse it for
1314 * another time step without rebuilding it; this is similar to how one can
1315 * re-use the Newton matrix for several Newton steps, see for example the
1316 * discussion in @ref step_77 "step-77"). We could in principle not provide this function to
1317 * the time stepper, and instead select a specific solver on the command line
1318 * by using the `-ksp_*` options of
PETSc. However, by providing this
1319 * function, we can use a specific solver and preconditioner for the linear
1320 * system, and still have the possibility to change them on the command line.
1324 * Providing a specific solver is more in line with the way we usually do
1325 * things in other deal.II examples, while letting
PETSc choose a generic
1326 * solver, and changing it on the command line via `-ksp_type` is more in line
1327 * with the way
PETSc is usually used, and it can be a convenient approach
1328 * when we are experimenting to find an optimal solver for our problem. Both
1329 * options are available here, since we can still change both the solver and
1330 * the preconditioner on the command line via `-user_ksp_type` and
1331 * `-user_pc_type` options.
1335 * In any case, recall that the Jacobian we built in the previous function is
1336 * always of the form
1338 * J_\alpha = \alpha M + A
1340 * where @f$M@f$ is a mass matrix and @f$A@f$ a Laplace matrix. @f$M@f$ is symmetric and
1341 * positive definite; @f$A@f$ is symmetric and at least positive semidefinite;
1342 * @f$\alpha> 0@f$. As a consequence, the Jacobian matrix is a symmetric and
1343 * positive definite matrix, which we can efficiently solve with the Conjugate
1344 * Gradient method, along with either SSOR or (if available) the algebraic
1345 * multigrid implementation provided by
PETSc (via the Hypre package) as
1346 * preconditioner. In practice, if you wanted to solve "real" problems, one
1347 * would spend some time finding which preconditioner is optimal, perhaps
1348 * using
PETSc's ability to read solver and preconditioner choices from the
1349 * command line. But this is not the focus of this tutorial program, and so
1350 * we just go with the following:
1353 * template <
int dim>
1360 * #
if defined(PETSC_HAVE_HYPRE)
1362 * preconditioner.
initialize(jacobian_matrix);
1371 * cg.set_prefix(
"user_");
1373 * cg.solve(jacobian_matrix, dst, src, preconditioner);
1375 * pcout <<
" " << solver_control.last_step() <<
" linear iterations."
1383 * <a name=
"step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1384 * <h4>The HeatEquation::prepare_for_coarsening_and_refinement() function</h4>
1388 * The next block of functions deals with mesh refinement. We split this
1389 * process up into a "decide whether and what you want to refine" and a
1390 * "please transfer these vectors from old to new mesh" phase, where the
first
1391 * also deals with marking cells for refinement. (The decision whether or
1392 * not to refine is done in the lambda function that calls the current
1397 * Breaking things into a "mark cells" function and into a "execute mesh
1398 * adaptation and transfer solution vectors" function is awkward, though
1399 * conceptually not difficult to understand. These two pieces of code
1400 * should really be part of the same function, as they are in @ref step_26 "step-26".
1401 * The issue is with what
PETScWrappers::TimeStepper provides us with in these
1402 * callbacks. Specifically, the "decide whether and what you want to refine"
1403 * callback has access to the current solution, and so can evaluate
1404 * (spatial) error estimators to decide which cells to refine. The
1405 *
second callback that transfers vectors from old to new mesh gets a bunch
1406 * of vectors, but without the semantic information on which of these is
1407 * the current solution vector. As a consequence, it cannot do the marking
1408 * of cells for refinement or coarsening, and we have to do that from the
1413 * In practice, however, the problem is minor. The
first of these
1414 * two functions is essentially identical to the
1415 *
first half of the corresponding function in @ref step_26 "step-26", with the only
1417 * function instead of the serial one. Once again, we make sure that we never
1418 * fall below the minimum refinement
level, and above the maximum one, that we
1419 * can select from the parameter file.
1422 * template <
int dim>
1423 *
void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1427 * locally_relevant_dofs,
1428 * mpi_communicator);
1429 * locally_relevant_solution = y;
1435 * locally_relevant_solution,
1436 * estimated_error_per_cell);
1441 *
const unsigned int max_grid_level =
1442 * initial_global_refinement + max_delta_refinement_level;
1443 *
const unsigned int min_grid_level = initial_global_refinement;
1446 *
for (
const auto &cell :
1447 *
triangulation.active_cell_iterators_on_level(max_grid_level))
1448 * cell->clear_refine_flag();
1449 *
for (
const auto &cell :
1450 *
triangulation.active_cell_iterators_on_level(min_grid_level))
1451 * cell->clear_coarsen_flag();
1458 * <a name=
"step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1459 * <h4>The HeatEquation::transfer_solution_vectors_to_new_mesh() function</h4>
1463 * The following function then is the
second half of the correspond function
1464 * in @ref step_26 "step-26". It is called by the time stepper whenever it requires to
1465 * transfer the solution and any intermediate stage vectors to a new mesh. We
1466 * must make sure that all input vectors are transformed into ghosted vectors
1467 * before the actual transfer is executed, and that we distribute the hanging
1468 * node constraints on the output vectors as soon as we have interpolated the
1469 * vectors to the new mesh -- i.e., that all constraints are satisfied on
1470 * the vectors we transfer.
1474 * We have no way to enforce boundary conditions at this stage, however. This
1475 * is because the various vectors may correspond to solutions at previous time
1476 * steps if the method used here is a multistep time integrator, and so may
1477 * correspond to different time points that we are not privy to.
1481 * While this could be a problem if we used the values of the solution and of
1482 * the intermediate stages on the constrained degrees of freedom to compute
1483 * the errors, we do not do so. Instead, we compute the errors on the
1484 * differential equation, and ignore algebraic constraints; therefore we do no
1485 * need to guarantee that the boundary conditions are satisfied also in the
1486 * intermediate stages.
1490 * We have at our disposal the hanging node current_constraints alone, though,
1491 * and therefore we enforce them on the output vectors, even if this is not
1495 * template <
int dim>
1496 *
void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1497 * const
double time,
1502 * solution_trans(dof_handler);
1504 * std::vector<PETScWrappers::MPI::Vector> all_in_ghosted(all_in.size());
1505 * std::vector<const PETScWrappers::MPI::Vector *> all_in_ghosted_ptr(
1507 * std::vector<PETScWrappers::MPI::Vector *> all_out_ptr(all_in.size());
1508 *
for (
unsigned int i = 0; i < all_in.size(); ++i)
1510 * all_in_ghosted[i].reinit(locally_owned_dofs,
1511 * locally_relevant_dofs,
1512 * mpi_communicator);
1513 * all_in_ghosted[i] = all_in[i];
1514 * all_in_ghosted_ptr[i] = &all_in_ghosted[i];
1518 * solution_trans.prepare_for_coarsening_and_refinement(all_in_ghosted_ptr);
1521 * setup_system(time);
1523 * all_out.resize(all_in.size());
1524 *
for (
unsigned int i = 0; i < all_in.size(); ++i)
1526 * all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1527 * all_out_ptr[i] = &all_out[i];
1529 * solution_trans.interpolate(all_out_ptr);
1532 * hanging_nodes_constraints.distribute(v);
1539 * <a name=
"step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1540 * <h4>The HeatEquation::update_current_constraints() function</h4>
1544 * Since regenerating the constraints at each time step may be expensive, we
1545 * make sure that we only do so when the time changes. We track time change by
1546 * checking if the time of the boundary_values_function has changed, with
1547 * respect to the time of the last call to this function. This will work most
1548 * of the times, but not the very
first time we call this function, since the
1549 * time then may be zero and the time of the `boundary_values_function` is
1550 * zero at construction time. We therefore also check if the number
1551 * constraints in `current_constraints`, and if these are empty, we regenerate
1552 * the constraints regardless of the time variable.
1555 * template <
int dim>
1556 *
void HeatEquation<dim>::update_current_constraints(const
double time)
1558 *
if (current_constraints.n_constraints() == 0 ||
1559 * time != boundary_values_function.get_time())
1563 * boundary_values_function.set_time(time);
1564 * current_constraints.clear();
1565 * current_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1566 * current_constraints.merge(hanging_nodes_constraints);
1569 * boundary_values_function,
1570 * current_constraints);
1571 * current_constraints.make_consistent_in_parallel(locally_owned_dofs,
1572 * locally_relevant_dofs,
1573 * mpi_communicator);
1574 * current_constraints.close();
1582 * <a name=
"step_86-TheHeatEquationrunfunction"></a>
1583 * <h4>The HeatEquation::run() function</h4>
1587 * We have finally arrived at the main function of the class. At the top, it
1588 * creates the mesh and sets up the variables that make up the linear system:
1591 * template <
int dim>
1592 *
void HeatEquation<dim>::run()
1601 * We then set up the time stepping
object and associate the
matrix we will
1602 * build whenever requested
for both the Jacobian
matrix (see the definition
1603 * above of what the
"Jacobian" actually refers to) and
for the
matrix
1604 * that will be used as a preconditioner
for the Jacobian.
1609 * petsc_ts(time_stepper_data);
1611 * petsc_ts.set_matrices(jacobian_matrix, jacobian_matrix);
1616 * The real work setting up the time stepping
object starts here. As
1618 *
class is used is by inverting control: At the
end of this function, we
1620 *
run the
loop over time steps, and at the appropriate places *call back*
1621 * into user code for whatever functionality is required. What we need to
1622 * do is to hook up these callback
functions by assigning appropriate
1627 * We start by creating
lambda functions that provide information about
1628 * the "implicit function" (i.e., that part of the right hand side of the
1629 * ODE system that we want to treat implicitly -- which in our case is
1630 * the entire right hand side), a function that assembles the Jacobian
1631 *
matrix, and a function that solves a linear system with the Jacobian.
1635 * const PETScWrappers::MPI::Vector &y,
1636 * const PETScWrappers::MPI::Vector &y_dot,
1637 * PETScWrappers::MPI::Vector &res) {
1641 * petsc_ts.setup_jacobian = [&](
const double time,
1644 *
const double alpha) {
1645 * this->assemble_implicit_jacobian(time, y, y_dot, alpha);
1650 * this->solve_with_jacobian(src, dst);
1655 * The next two callbacks deal with identifying and setting variables
1656 * that are considered
"algebraic" (rather than
"differential"), i.e.,
for
1657 * which we know what values they are supposed to have rather than letting
1658 * their values be determined by the differential equation. We need to
1659 * instruct the time stepper to ignore these components when computing the
1660 * error in the residuals, and we
do so by
first providing a function that
1661 * returns an
IndexSet with the indices of these algebraic components of the
1662 * solution vector (or rather, that subset of the locally-owned part of the
1663 * vector that is algebraic, in
case we are running in
parallel). This
first
1664 * of the following two
functions does that. Specifically, both nodes at
1665 * which Dirichlet boundary conditions are applied, and hanging nodes are
1666 * algebraically constrained. This function then returns a set of
1667 * indices that is initially empty (but knows about the
size of the index
1668 * space) and which we then construct as the
union of boundary and hanging
1673 * Following
this, we then also need a function that, given a solution
1674 * vector `y` and the current time, sets the algebraic components of that
1675 * vector to their correct
value. This comes down to ensuring that we have
1676 * up to date constraints in the `constraints` variable, and then applying
1677 * these constraints to the solution vector via
1679 * *could* have achieved the same in `solve_with_jacobian()`. Whenever the
1680 * time stepper solves a linear system, it follows up the call to the solver
1681 * by calling the callback to set algebraic components correct. We could
1682 * also have put the calls to `update_current_constraints()` and
1683 * `distribute()` into the `solve_with_jacobian` function, but by not doing
1684 * so, we can also replace the `solve_with_jacobian` function with a call to
1685 * a
PETSc solver, and we would still have the current_constraints correctly
1686 * applied to the solution vector.)
1689 * petsc_ts.algebraic_components = [&]() {
1690 *
IndexSet algebraic_set(dof_handler.n_dofs());
1692 * algebraic_set.add_indices(
1694 *
return algebraic_set;
1697 * petsc_ts.update_constrained_components =
1700 * update_current_constraints(time);
1701 * current_constraints.distribute(y);
1707 * The next two callbacks relate to mesh refinement. As discussed in the
1709 * situation where we want to change the mesh. All we have to provide
1710 * is a callback that returns `
true`
if we are at a
point where we want
1711 * to
refine the mesh (and `
false` otherwise) and that
if we want to
1712 *
do mesh refinement does some prep work
for that in the form of
1713 * calling the `prepare_for_coarsening_and_refinement` function.
1717 * If the
first callback below returns `
true`, then
PETSc TS will
1718 *
do some clean-up operations, and call the
second of the
1720 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`) with a
1721 * collection of vectors that need to be interpolated from the old
1722 * to the
new mesh. This may include the current solution, perhaps
1723 * the current time derivative of the solution, and in the
case of
1725 * integrators](https:
1726 * the solutions of some previous time steps. We hand all of these over to
1727 * the `
interpolate()` member function of this class.
1730 * petsc_ts.decide_and_prepare_for_remeshing =
1731 * [&](const double ,
1732 * const unsigned
int step_number,
1734 * if (step_number > 0 && this->mesh_adaptation_frequency > 0 &&
1735 * step_number % this->mesh_adaptation_frequency == 0)
1737 * pcout <<
std::endl <<
"Adapting the mesh..." <<
std::endl;
1738 * this->prepare_for_coarsening_and_refinement(y);
1745 * petsc_ts.transfer_solution_vectors_to_new_mesh =
1746 * [&](
const double time,
1747 *
const std::vector<PETScWrappers::MPI::Vector> &all_in,
1748 * std::vector<PETScWrappers::MPI::Vector> &all_out) {
1749 * this->transfer_solution_vectors_to_new_mesh(time, all_in, all_out);
1754 * The
final callback is a
"monitor" that is called in each
1755 * time step. Here we use it to create a graphical output. Perhaps a better
1756 * scheme would output the solution at fixed time intervals, rather
1757 * than in every time step, but
this is not the main
point of
1758 *
this program and so we go with the easy approach:
1761 * petsc_ts.monitor = [&](
const double time,
1763 *
const unsigned int step_number) {
1764 * pcout <<
"Time step " << step_number <<
" at t=" << time << std::endl;
1765 * this->output_results(time, step_number, y);
1771 * With all of
this out of the way, the rest of the function is
1772 * anticlimactic: We just have to initialize the solution vector with
1773 * the
initial conditions and call the function that does the time
1774 * stepping, and everything
else will happen automatically:
1780 * petsc_ts.solve(solution);
1788 * <a name=
"step_86-Themainfunction"></a>
1789 * <h3>The main() function</h3>
1793 * The rest of the program is as it always looks. We read run-time parameters
1795 * we showed in @ref step_60 "step-60" and @ref step_70 "step-70".
1798 *
int main(
int argc,
char **argv)
1802 *
using namespace Step86;
1805 * HeatEquation<2> heat_equation_solver(MPI_COMM_WORLD);
1807 *
const std::string input_filename =
1808 * (argc > 1 ? argv[1] :
"heat_equation.prm");
1810 * heat_equation_solver.run();
1812 *
catch (std::exception &exc)
1814 * std::cerr << std::endl
1816 * <<
"----------------------------------------------------"
1818 * std::cerr <<
"Exception on processing: " << std::endl
1819 * << exc.what() << std::endl
1820 * <<
"Aborting!" << std::endl
1821 * <<
"----------------------------------------------------"
1828 * std::cerr << std::endl
1830 * <<
"----------------------------------------------------"
1832 * std::cerr <<
"Unknown exception!" << std::endl
1833 * <<
"Aborting!" << std::endl
1834 * <<
"----------------------------------------------------"
1842<a name=
"step_86-Results"></a><h1>Results</h1>
1845When you
run this program with the input file as is, you get output as follows:
1848Number of active cells: 768
1849Number of degrees of freedom: 833
1852 5 linear iterations.
1853 8 linear iterations.
1854Time step 1 at t=0.025
1855 6 linear iterations.
1856Time step 2 at t=0.05
1857 5 linear iterations.
1858 8 linear iterations.
1859Time step 3 at t=0.075
1860 6 linear iterations.
1862 6 linear iterations.
1863Time step 5 at t=0.125
1864 6 linear iterations.
1865Time step 6 at t=0.15
1866 6 linear iterations.
1867Time step 7 at t=0.175
1868 6 linear iterations.
1870 6 linear iterations.
1871Time step 9 at t=0.225
1872 6 linear iterations.
1876Number of active cells: 1050
1877Number of degrees of freedom: 1155
1879Time step 10 at t=0.25
1880 5 linear iterations.
1881 8 linear iterations.
1882Time step 11 at t=0.275
1883 5 linear iterations.
1884 7 linear iterations.
1888Time step 195 at t=4.875
1889 6 linear iterations.
1890Time step 196 at t=4.9
1891 6 linear iterations.
1892Time step 197 at t=4.925
1893 6 linear iterations.
1894Time step 198 at t=4.95
1895 6 linear iterations.
1896Time step 199 at t=4.975
1897 5 linear iterations.
1901Number of active cells: 1380
1902Number of degrees of freedom: 1547
1907+---------------------------------------------+------------+------------+
1908| Total wallclock time elapsed since start | 43.2s | |
1910| Section | no. calls | wall time | % of total |
1911+---------------------------------+-----------+------------+------------+
1912|
assemble implicit Jacobian | 226 | 9.93s | 23% |
1913| implicit function | 426 | 16.2s | 37% |
1914| output results | 201 | 9.74s | 23% |
1915| set algebraic components | 200 | 0.0496s | 0.11% |
1916| setup system | 21 | 0.799s | 1.8% |
1917| solve with Jacobian | 226 | 0.56s | 1.3% |
1918| update current constraints | 201 | 1.53s | 3.5% |
1919+---------------------------------+-----------+------------+------------+
1923We can generate output
for this in the form of a video:
1926 <iframe width=
"560" height=
"315" src=
"https://www.youtube.com/embed/fhJVkcdRksM"
1928 allow=
"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1929 allowfullscreen></iframe>
1932The solution here is driven by boundary
values (the initial conditions are zero,
1933and so is the right hand side of the equation). It takes a little bit of time
1934for the boundary
values to diffuse into the domain, and so the temperature
1935(the solution of the heat equation) in the interior of the domain has a slight
1936lag compared to the temperature at the boundary.
1939The more interesting component of
this program is how easy it is to play with
1940the details of the time stepping algorithm. Recall that the solution above
1941is controlled by the following parameters:
1943 subsection Time stepper
1944 subsection Running parameters
1948 set match
final time =
false
1949 set maximum number of steps = -1
1950 set options prefix =
1951 set solver type = beuler
1954 subsection Error control
1955 set absolute error tolerance = -1
1956 set adaptor type =
none
1957 set ignore algebraic lte =
true
1958 set maximum step
size = -1
1959 set minimum step
size = -1
1960 set relative error tolerance = -1
1964Of particular interest
for us here is to set the time stepping algorithm
1965and the adaptive time step control method. The latter is set to
"none"
1967[several alternative choices](https:
1968for this parameter. For example, we can set parameters as follows,
1970 subsection Time stepper
1971 subsection Running parameters
1975 set match
final time =
false
1976 set maximum number of steps = -1
1977 set options prefix =
1978 set solver type = bdf
1981 subsection Error control
1982 set absolute error tolerance = 1
e-2
1983 set relative error tolerance = 1
e-2
1984 set adaptor type = basic
1985 set ignore algebraic lte =
true
1986 set maximum step
size = 1
1987 set minimum step
size = 0.01
1991What we
do here is set the
initial time step
size to 0.025, and choose relatively
1992large absolute and relative error tolerances of 0.01
for the time step
size
1993adaptation algorithm (
for which we choose
"basic"). We ask
PETSc TS to use a
1995method, and we get the following as output:
1998===========================================
1999Number of active cells: 768
2000Number of degrees of freedom: 833
2003 5 linear iterations.
2004 5 linear iterations.
2005 5 linear iterations.
2006 4 linear iterations.
2007 6 linear iterations.
2008Time step 1 at t=0.01
2009 5 linear iterations.
2010 5 linear iterations.
2011Time step 2 at t=0.02
2012 5 linear iterations.
2013 5 linear iterations.
2014Time step 3 at t=0.03
2015 5 linear iterations.
2016 5 linear iterations.
2017Time step 4 at t=0.042574
2018 5 linear iterations.
2019 5 linear iterations.
2020Time step 5 at t=0.0588392
2021 5 linear iterations.
2022 5 linear iterations.
2023Time step 6 at t=0.0783573
2024 5 linear iterations.
2025 7 linear iterations.
2026 5 linear iterations.
2027 7 linear iterations.
2028Time step 7 at t=0.100456
2029 5 linear iterations.
2030 7 linear iterations.
2031 5 linear iterations.
2032 7 linear iterations.
2033Time step 8 at t=0.124982
2034 5 linear iterations.
2035 8 linear iterations.
2036 5 linear iterations.
2037 7 linear iterations.
2038Time step 9 at t=0.153156
2039 6 linear iterations.
2040 5 linear iterations.
2044Time step 206 at t=4.96911
2045 5 linear iterations.
2046 5 linear iterations.
2047Time step 207 at t=4.99398
2048 5 linear iterations.
2049 5 linear iterations.
2050Time step 208 at t=5.01723
2053+---------------------------------------------+------------+------------+
2054| Total wallclock time elapsed since start | 117s | |
2056| Section | no. calls | wall time | % of total |
2057+---------------------------------+-----------+------------+------------+
2058|
assemble implicit Jacobian | 593 | 35.6s | 31% |
2059| implicit function | 1101 | 56.3s | 48% |
2060| output results | 209 | 12.5s | 11% |
2061| set algebraic components | 508 | 0.156s | 0.13% |
2062| setup system | 21 | 1.11s | 0.95% |
2063| solve with Jacobian | 593 | 1.97s | 1.7% |
2064| update current constraints | 509 | 4.53s | 3.9% |
2065+---------------------------------+-----------+------------+------------+
2067What is happening here is that apparently
PETSc TS is not happy with our
2068choice of
initial time step
size, and after several linear solves has
2069reduced it to the minimum we allow it to, 0.01. The following time steps
2070then
run at a time step
size of 0.01 until it decides to make it slightly
2071larger again and (apparently) switches to a higher order method that
2072requires more linear solves per time step but allows
for a larger time
2073step closer to our
initial choice 0.025 again. It does not quite hit the
2074final time of @f$T=5@f$ with its time step choices, but we
've got only
2075ourselves to blame for that by setting
2077 set match final time = false
2079in the input file. If hitting the end time exactly is important to us,
2080setting the flag to `true` resolves this issue.
2082We can even reason why PETSc eventually chooses a time step of around
20830.025: The boundary values undergo a complete cosine cycle within 0.5
2084time units; we should expect that it takes around ten or twenty time
2085steps to resolve each period of a cycle to reasonable accuracy, and
2086this leads to the time step choice PETSc finds.
2088Not all combinations of methods, time step adaptation algorithms, and
2089other parameters are valid, but the main messages from the experiment
2090above that you should take away are:
2091- It would undoubtedly be quite time consuming to implement many of the
2092 methods that PETSc TS offers for time stepping -- but with a program
2093 such as the one here, we don't need to: We can just select from the
2094 many methods
PETSc TS already has.
2095- Adaptive time step control is difficult; adaptive choice of which
2096 method or order to choose is perhaps even more difficult.
None of the
2097 time dependent programs that came before the current one (say, @ref step_23
"step-23",
2098 @ref step_26
"step-26", @ref step_31
"step-31", @ref step_58
"step-58", and a good number of others) have either.
2099 Moreover,
while deal.II is good at spatial adaptation of meshes, it
2100 is not a library written by experts in time step adaptation, and so will
2101 likely not gain
this ability either. But, again, it doesn
't
2102 have to: We can rely on a library written by experts in that area.
2106<a name="step-86-extensions"></a>
2107<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2110The program actually runs in parallel, even though we have not used
2111that above. Specifically, if you have configured deal.II to use MPI,
2112then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2113program with 8 processes.
2115For the program as currently written (and in debug mode), this makes
2116little difference: It will run about twice as fast, but take about 8
2117times as much CPU time. That is because the problem is just so small:
2118Generally between 1000 and 2000 degrees of freedom. A good rule of
2119thumb is that programs really only benefit from parallel computing if
2120you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2121process*. But it is not difficult to adapt the program at hand here to
2122run with a much finer mesh, or perhaps in 3d, so that one is beyond
2123that limit and sees the benefits of parallel computing.
2127<a name="step_86-PlainProg"></a>
2128<h1> The plain program</h1>
2129@include "step-86.cc"
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > ×_and_names)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
::SolutionTransfer< dim, VectorType, spacedim > SolutionTransfer
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation