deal.II version GIT relicensing-2173-gae8fc9d14b 2024-11-24 06:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-86.h
Go to the documentation of this file.
1 0.0,
798 *   /* end time */ 1.0,
799 *   /* initial time step */ 0.025)
800 *   , initial_global_refinement(5)
801 *   , max_delta_refinement_level(2)
802 *   , mesh_adaptation_frequency(0)
803 *   , right_hand_side_function("/Heat Equation/Right hand side")
804 *   , initial_value_function("/Heat Equation/Initial value")
805 *   , boundary_values_function("/Heat Equation/Boundary values")
806 *   {
807 *   enter_subsection("Time stepper");
808 *   {
809 *   enter_my_subsection(this->prm);
810 *   {
811 *   time_stepper_data.add_parameters(this->prm);
812 *   }
813 *   leave_my_subsection(this->prm);
814 *   }
815 *   leave_subsection();
816 *  
817 *   add_parameter("Initial global refinement",
818 *   initial_global_refinement,
819 *   "Number of times the mesh is refined globally before "
820 *   "starting the time stepping.");
821 *   add_parameter("Maximum delta refinement level",
822 *   max_delta_refinement_level,
823 *   "Maximum number of local refinement levels.");
824 *   add_parameter("Mesh adaptation frequency",
825 *   mesh_adaptation_frequency,
826 *   "When to adapt the mesh.");
827 *   }
828 *  
829 *  
830 *  
831 * @endcode
832 *
833 *
834 * <a name="step_86-TheHeatEquationsetup_systemfunction"></a>
835 * <h4>The HeatEquation::setup_system() function</h4>
836 *
837
838 *
839 * This function is not very different from what we do in many other programs
840 * (including @ref step_26 "step-26"). We enumerate degrees of freedom, output some
841 * information about then, build constraint objects (recalling that we
842 * put hanging node constraints into their separate object), and then
843 * also build an AffineConstraint object that contains both the hanging
844 * node constraints as well as constraints corresponding to zero Dirichlet
845 * boundary conditions. This last object is needed since we impose the
846 * constraints through algebraic equations. While technically it would be
847 * possible to use the time derivative of the boundary function as a boundary
848 * conditions for the time derivative of the solution, this is not done here.
849 * Instead, we impose the boundary conditions through algebraic equations, and
850 * therefore the time derivative of the boundary conditions is not part of
851 * the algebraic system, and we need zero boundary conditions on the time
852 * derivative of the solution when computing the residual. We use the
853 * `homogeneous_constraints` object for this purpose.
854 *
855
856 *
857 * Finally, we create the actual non-homogeneous `current_constraints` by
858 * calling `update_current_constraints). These are also used during the
859 * assembly and during the residual evaluation.
860 *
861 * @code
862 *   template <int dim>
863 *   void HeatEquation<dim>::setup_system(const double time)
864 *   {
865 *   TimerOutput::Scope t(computing_timer, "setup system");
866 *  
867 *   dof_handler.distribute_dofs(fe);
868 *   pcout << std::endl
869 *   << "Number of active cells: " << triangulation.n_active_cells()
870 *   << std::endl
871 *   << "Number of degrees of freedom: " << dof_handler.n_dofs()
872 *   << std::endl
873 *   << std::endl;
874 *  
875 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
876 *   locally_relevant_dofs =
878 *  
879 *  
880 *   hanging_nodes_constraints.clear();
881 *   hanging_nodes_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
883 *   hanging_nodes_constraints);
884 *   hanging_nodes_constraints.make_consistent_in_parallel(locally_owned_dofs,
885 *   locally_relevant_dofs,
886 *   mpi_communicator);
887 *   hanging_nodes_constraints.close();
888 *  
889 *  
890 *   homogeneous_constraints.clear();
891 *   homogeneous_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
892 *   homogeneous_constraints.merge(hanging_nodes_constraints);
894 *   0,
896 *   homogeneous_constraints);
897 *   homogeneous_constraints.make_consistent_in_parallel(locally_owned_dofs,
898 *   locally_relevant_dofs,
899 *   mpi_communicator);
900 *   homogeneous_constraints.close();
901 *  
902 *  
903 *   update_current_constraints(time);
904 *  
905 *  
906 * @endcode
907 *
908 * The final block of code resets and initializes the matrix object with
909 * the appropriate sparsity pattern. Recall that we do not store solution
910 * vectors in this class (the time integrator object does that internally)
911 * and so do not have to resize and initialize them either.
912 *
913 * @code
914 *   DynamicSparsityPattern dsp(locally_relevant_dofs);
915 *   DoFTools::make_sparsity_pattern(dof_handler,
916 *   dsp,
917 *   homogeneous_constraints,
918 *   false);
920 *   locally_owned_dofs,
921 *   mpi_communicator,
922 *   locally_relevant_dofs);
923 *  
924 *   jacobian_matrix.reinit(locally_owned_dofs,
925 *   locally_owned_dofs,
926 *   dsp,
927 *   mpi_communicator);
928 *   }
929 *  
930 *  
931 * @endcode
932 *
933 *
934 * <a name="step_86-TheHeatEquationoutput_resultsfunction"></a>
935 * <h4>The HeatEquation::output_results() function</h4>
936 *
937
938 *
939 * This function is called from "monitor" function that is called in turns
940 * by the time stepper in each time step. We use it to write the solution to a
941 * file, and provide graphical output through paraview or visit. We also write
942 * a pvd file, which groups all metadata about the `.vtu` files into a single
943 * file that can be used to load the full time dependent solution in paraview.
944 *
945 * @code
946 *   template <int dim>
947 *   void HeatEquation<dim>::output_results(const double time,
948 *   const unsigned int timestep_number,
949 *   const PETScWrappers::MPI::Vector &y)
950 *   {
951 *   TimerOutput::Scope t(computing_timer, "output results");
952 *  
953 *   DataOut<dim> data_out;
954 *   data_out.attach_dof_handler(dof_handler);
955 *   data_out.add_data_vector(y, "U");
956 *   data_out.build_patches();
957 *  
958 *   data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
959 *  
960 *   const std::string filename =
961 *   "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtu";
962 *   data_out.write_vtu_in_parallel(filename, mpi_communicator);
963 *  
964 *   if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
965 *   {
966 *   static std::vector<std::pair<double, std::string>> times_and_names;
967 *   times_and_names.emplace_back(time, filename);
968 *  
969 *   std::ofstream pvd_output("solution.pvd");
970 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
971 *   }
972 *   }
973 *  
974 *  
975 *  
976 * @endcode
977 *
978 *
979 * <a name="step_86-TheHeatEquationimplicit_functionfunction"></a>
980 * <h4>The HeatEquation::implicit_function() function</h4>
981 *
982
983 *
984 * As discussed in the introduction, we describe the ODE system to the time
985 * stepper via its residual,
986 * @f[
987 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
988 * @f]
989 * The following function computes it, given vectors for @f$U,\dot U@f$ that
990 * we will denote by `y` and `y_dot` because that's how they are called
991 * in the documentation of the PETScWrappers::TimeStepper class.
992 *
993
994 *
995 * At the top of the function, we do the usual set up when computing
996 * integrals. We face two minor difficulties here: the first is that
997 * the `y` and `y_dot` vectors we get as input are read only, but we
998 * need to make sure they satisfy the correct boundary conditions and so
999 * have to set elements in these vectors. The second is that we need to
1000 * compute the residual, and therefore in general we need to evaluate solution
1001 * values and gradients inside locally owned cells, and for this need access
1002 * to degrees of freedom which may be owned by neighboring processors. To
1003 * address these issues, we create (non-ghosted) writable copies of the input
1004 * vectors, apply boundary conditions and hanging node current_constraints;
1005 * and then copy these vectors to ghosted vectors before we can do anything
1006 * sensible with them.
1007 *
1008 * @code
1009 *   template <int dim>
1010 *   void
1011 *   HeatEquation<dim>::implicit_function(const double time,
1012 *   const PETScWrappers::MPI::Vector &y,
1013 *   const PETScWrappers::MPI::Vector &y_dot,
1014 *   PETScWrappers::MPI::Vector &residual)
1015 *   {
1016 *   TimerOutput::Scope t(computing_timer, "implicit function");
1017 *  
1018 *   PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1019 *   mpi_communicator);
1020 *   PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1021 *   mpi_communicator);
1022 *   tmp_solution = y;
1023 *   tmp_solution_dot = y_dot;
1024 *  
1025 *   update_current_constraints(time);
1026 *   current_constraints.distribute(tmp_solution);
1027 *   homogeneous_constraints.distribute(tmp_solution_dot);
1028 *  
1029 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1030 *   locally_relevant_dofs,
1031 *   mpi_communicator);
1032 *   PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1033 *   locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1034 *   locally_relevant_solution = tmp_solution;
1035 *   locally_relevant_solution_dot = tmp_solution_dot;
1036 *  
1037 *  
1038 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1039 *   FEValues<dim> fe_values(fe,
1040 *   quadrature_formula,
1041 *   update_values | update_gradients |
1042 *   update_quadrature_points | update_JxW_values);
1043 *  
1044 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1045 *   const unsigned int n_q_points = quadrature_formula.size();
1046 *  
1047 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1048 *  
1049 *   std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1050 *   std::vector<double> solution_dot_values(n_q_points);
1051 *  
1052 *   Vector<double> cell_residual(dofs_per_cell);
1053 *  
1054 *   right_hand_side_function.set_time(time);
1055 *  
1056 * @endcode
1057 *
1058 * Now for computing the actual residual. Recall that we wan to compute the
1059 * vector
1060 * @f[
1061 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1062 * @f]
1063 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1064 * is not efficient. Instead, recall (by writing out how the elements of
1065 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1066 * @f$i@f$th element of the residual vector is given by
1067 * @f{align*}{
1068 * R(t,U,\dot U)_i
1069 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1070 * {\partial U_j(t)}{\partial t}
1071 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1072 * \varphi_j(\mathbf x, t) U_j(t)
1073 * - \int_\Omega \varphi_i f(\mathbf x, t)
1074 * \\ &=
1075 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1076 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1077 * u_h(\mathbf x, t)
1078 * - \int_\Omega \varphi_i f(\mathbf x, t).
1079 * @f}
1080 * We can compute these integrals efficiently by breaking them up into
1081 * a sum over all cells and then applying quadrature. For the integrand,
1082 * we need to evaluate the solution and its gradient at the quadrature
1083 * points within each locally owned cell, and for this, we need also
1084 * access to degrees of freedom that may be owned by neighboring
1085 * processors. We therefore use the locally_relevant_solution and and
1086 * locally_relevant_solution_dot vectors.
1087 *
1088 * @code
1089 *   residual = 0;
1090 *   for (const auto &cell : dof_handler.active_cell_iterators())
1091 *   if (cell->is_locally_owned())
1092 *   {
1093 *   fe_values.reinit(cell);
1094 *  
1095 *   fe_values.get_function_gradients(locally_relevant_solution,
1096 *   solution_gradients);
1097 *   fe_values.get_function_values(locally_relevant_solution_dot,
1098 *   solution_dot_values);
1099 *  
1100 *   cell->get_dof_indices(local_dof_indices);
1101 *  
1102 *   cell_residual = 0;
1103 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1104 *   for (const unsigned int i : fe_values.dof_indices())
1105 *   {
1106 *   cell_residual(i) +=
1107 *   (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1108 *   solution_dot_values[q] // u(x_q)
1109 *   + // +
1110 *   fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1111 *   solution_gradients[q] // grad u(x_q)
1112 *   - // -
1113 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1114 *   right_hand_side_function.value(
1115 *   fe_values.quadrature_point(q))) // f(x_q)]
1116 *   * fe_values.JxW(q); // * dx
1117 *   }
1118 *   current_constraints.distribute_local_to_global(cell_residual,
1119 *   local_dof_indices,
1120 *   residual);
1121 *   }
1122 *   residual.compress(VectorOperation::add);
1123 *  
1124 * @endcode
1125 *
1126 * The end result of the operations above is a vector that contains the
1127 * residual vector, having taken into account the constraints due to
1128 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1129 * used `current_constraints.distribute_local_to_global()` to add the
1130 * local contributions to the global vector. At the end of the day, the
1131 * residual vector @f$r@f$ will be used in the solution of linear systems
1132 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1133 * below. We want to achieve that for algebraic components, the algebraic
1134 * components of @f$z@f$ have predictable values that achieve the purposes
1135 * discussed in the following. We do this by ensuring that the entries
1136 * corresponding to algebraic components in the residual @f$r@f$ have specific
1137 * values, and then we will do the same in the next function for the
1138 * matrix; for this, you will have to know that the rows and columns
1139 * of the matrix corresponding to constrained entries are zero with the
1140 * exception of the diagonal entries. We will manually set that diagonal
1141 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1142 *
1143
1144 *
1145 * From the point of view of the residual vector, if the input `y`
1146 * vector does not contain the correct values on constrained degrees of
1147 * freedom (hanging nodes or boundary conditions), we need to communicate
1148 * this to the time stepper, and we do so by setting the residual to the
1149 * actual difference between the input `y` vector and the our local copy of
1150 * it, in which we have applied the constraints (see the top of the
1151 * function where we called `current_constraints.distribute(tmp_solution)`
1152 * and a similar operation on the time derivative). Since we have made a
1153 * copy of the input vector for this purpose, we use it to compute the
1154 * residual value. However, there is a difference between hanging nodes
1155 * constraints and boundary conditions: we do not want to make hanging node
1156 * constraints actually depend on their dependent degrees of freedom, since
1157 * this would imply that we are actually solving for the dependent degrees
1158 * of freedom. This is not what we are actually doing, however, since
1159 * hanging nodes are not actually solved for. They are eliminated from the
1160 * system by the call to AffineConstraints::distribute_local_to_global()
1161 * above. From the point of view of the Jacobian matrix, we are effectively
1162 * setting hanging nodes to an artificial value (usually zero), and we
1163 * simply want to make sure that we solve for those degrees of freedom a
1164 * posteriori, by calling the function AffineConstraints::distribute().
1165 *
1166
1167 *
1168 * Here we therefore check that the residual is equal to the input value on
1169 * the constrained dofs corresponding to hanging nodes (i.e., those for
1170 * which the lines of the `current_constraints` contain at least one other
1171 * entry), and to the difference between the input vector and the actual
1172 * solution on those constraints that correspond to boundary conditions.
1173 *
1174 * @code
1175 *   for (const auto &c : current_constraints.get_lines())
1176 *   if (locally_owned_dofs.is_element(c.index))
1177 *   {
1178 *   if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1179 *   residual[c.index] = y[c.index] - tmp_solution[c.index];
1180 *   else /* has dependencies -> a hanging node */
1181 *   residual[c.index] = y[c.index];
1182 *   }
1183 *   residual.compress(VectorOperation::insert);
1184 *   }
1185 *  
1186 *  
1187 * @endcode
1188 *
1189 *
1190 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1191 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1192 *
1193
1194 *
1195 * The next operation is to compute the "Jacobian", which PETSc TS defines
1196 * as the matrix
1197 * @f[
1198 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1199 * R}{\partial \dot y}
1200 * @f]
1201 * which, for the current linear problem, is simply
1202 * @f[
1203 * J_\alpha = A + \alpha M
1204 * @f]
1205 * and which is in particular independent of time and the current solution
1206 * vectors @f$y@f$ and @f$\dot y@f$.
1207 *
1208
1209 *
1210 * Having seen the assembly of matrices before, there is little that should
1211 * surprise you in the actual assembly here:
1212 *
1213 * @code
1214 *   template <int dim>
1215 *   void HeatEquation<dim>::assemble_implicit_jacobian(
1216 *   const double /* time */,
1217 *   const PETScWrappers::MPI::Vector & /* y */,
1218 *   const PETScWrappers::MPI::Vector & /* y_dot */,
1219 *   const double alpha)
1220 *   {
1221 *   TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1222 *  
1223 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1224 *   FEValues<dim> fe_values(fe,
1225 *   quadrature_formula,
1226 *   update_values | update_gradients |
1227 *   update_quadrature_points | update_JxW_values);
1228 *  
1229 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1230 *  
1231 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1232 *  
1233 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1234 *  
1235 *   jacobian_matrix = 0;
1236 *   for (const auto &cell : dof_handler.active_cell_iterators())
1237 *   if (cell->is_locally_owned())
1238 *   {
1239 *   fe_values.reinit(cell);
1240 *  
1241 *   cell->get_dof_indices(local_dof_indices);
1242 *  
1243 *   cell_matrix = 0;
1244 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1245 *   for (const unsigned int i : fe_values.dof_indices())
1246 *   for (const unsigned int j : fe_values.dof_indices())
1247 *   {
1248 *   cell_matrix(i, j) +=
1249 *   (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1250 *   fe_values.shape_grad(j, q) // grad phi_j(x_q)
1251 *   + alpha *
1252 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1253 *   fe_values.shape_value(j, q) // phi_j(x_q)
1254 *   ) *
1255 *   fe_values.JxW(q); // * dx
1256 *   }
1257 *   current_constraints.distribute_local_to_global(cell_matrix,
1258 *   local_dof_indices,
1259 *   jacobian_matrix);
1260 *   }
1261 *   jacobian_matrix.compress(VectorOperation::add);
1262 *  
1263 * @endcode
1264 *
1265 * The only interesting part is the following. Recall that we modified the
1266 * residual vector's entries corresponding to the algebraic components of
1267 * the solution in the previous function. The outcome of calling
1268 * `current_constraints.distribute_local_to_global()` a few lines
1269 * above is that the global matrix has zero rows and columns for the
1270 * algebraic (constrained) components of the solution; the function
1271 * puts a value on the diagonal that is nonzero and has about the
1272 * same size as the remaining diagonal entries of the matrix. What
1273 * this diagonal value is is unknown to us -- in other cases where
1274 * we call `current_constraints.distribute_local_to_global()` on
1275 * both the left side matrix and the right side vector, as in most
1276 * other tutorial programs, the matrix diagonal entries and right
1277 * hand side values are chosen in such a way that the result of
1278 * solving a linear system is what we want it to be, but the scaling
1279 * is done automatically.
1280 *
1281
1282 *
1283 * This is not good enough for us here, because we are building the
1284 * right hand side independently from the matrix in different functions.
1285 * Thus, for any constrained degree of freedom, we set the diagonal of the
1286 * Jacobian to be one. This leaves the Jacobian matrix invertible,
1287 * consistent with what the time stepper expects, and it also makes sure
1288 * that if we did not make a mistake in the residual and/or in the Jacbian
1289 * matrix, then asking the time stepper to check the Jacobian with a finite
1290 * difference method will produce the correct result. This can be activated
1291 * at run time via passing the `-snes_test_jacobian` option on the command
1292 * line.
1293 *
1294 * @code
1295 *   for (const auto &c : current_constraints.get_lines())
1296 *   jacobian_matrix.set(c.index, c.index, 1.0);
1297 *   jacobian_matrix.compress(VectorOperation::insert);
1298 *   }
1299 *  
1300 *  
1301 * @endcode
1302 *
1303 *
1304 * <a name="step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1305 * <h4>The HeatEquation::solve_with_jacobian() function</h4>
1306 *
1307
1308 *
1309 * This is the function that actually solves the linear system with the
1310 * Jacobian matrix we have previously built (in a call to the previous
1311 * function during the current time step or another earlier one -- time
1312 * steppers are quite sophisticated in determining internally whether it is
1313 * necessary to update the Jacobian matrix, or whether one can reuse it for
1314 * another time step without rebuilding it; this is similar to how one can
1315 * re-use the Newton matrix for several Newton steps, see for example the
1316 * discussion in @ref step_77 "step-77"). We could in principle not provide this function to
1317 * the time stepper, and instead select a specific solver on the command line
1318 * by using the `-ksp_*` options of PETSc. However, by providing this
1319 * function, we can use a specific solver and preconditioner for the linear
1320 * system, and still have the possibility to change them on the command line.
1321 *
1322
1323 *
1324 * Providing a specific solver is more in line with the way we usually do
1325 * things in other deal.II examples, while letting PETSc choose a generic
1326 * solver, and changing it on the command line via `-ksp_type` is more in line
1327 * with the way PETSc is usually used, and it can be a convenient approach
1328 * when we are experimenting to find an optimal solver for our problem. Both
1329 * options are available here, since we can still change both the solver and
1330 * the preconditioner on the command line via `-user_ksp_type` and
1331 * `-user_pc_type` options.
1332 *
1333
1334 *
1335 * In any case, recall that the Jacobian we built in the previous function is
1336 * always of the form
1337 * @f[
1338 * J_\alpha = \alpha M + A
1339 * @f]
1340 * where @f$M@f$ is a mass matrix and @f$A@f$ a Laplace matrix. @f$M@f$ is symmetric and
1341 * positive definite; @f$A@f$ is symmetric and at least positive semidefinite;
1342 * @f$\alpha> 0@f$. As a consequence, the Jacobian matrix is a symmetric and
1343 * positive definite matrix, which we can efficiently solve with the Conjugate
1344 * Gradient method, along with either SSOR or (if available) the algebraic
1345 * multigrid implementation provided by PETSc (via the Hypre package) as
1346 * preconditioner. In practice, if you wanted to solve "real" problems, one
1347 * would spend some time finding which preconditioner is optimal, perhaps
1348 * using PETSc's ability to read solver and preconditioner choices from the
1349 * command line. But this is not the focus of this tutorial program, and so
1350 * we just go with the following:
1351 *
1352 * @code
1353 *   template <int dim>
1354 *   void
1355 *   HeatEquation<dim>::solve_with_jacobian(const PETScWrappers::MPI::Vector &src,
1356 *   PETScWrappers::MPI::Vector &dst)
1357 *   {
1358 *   TimerOutput::Scope t(computing_timer, "solve with Jacobian");
1359 *  
1360 *   #if defined(PETSC_HAVE_HYPRE)
1361 *   PETScWrappers::PreconditionBoomerAMG preconditioner;
1362 *   preconditioner.initialize(jacobian_matrix);
1363 *   #else
1364 *   PETScWrappers::PreconditionSSOR preconditioner;
1365 *   preconditioner.initialize(
1366 *   jacobian_matrix, PETScWrappers::PreconditionSSOR::AdditionalData(1.0));
1367 *   #endif
1368 *  
1369 *   SolverControl solver_control(1000, 1e-8 * src.l2_norm());
1370 *   PETScWrappers::SolverCG cg(solver_control);
1371 *   cg.set_prefix("user_");
1372 *  
1373 *   cg.solve(jacobian_matrix, dst, src, preconditioner);
1374 *  
1375 *   pcout << " " << solver_control.last_step() << " linear iterations."
1376 *   << std::endl;
1377 *   }
1378 *  
1379 *  
1380 * @endcode
1381 *
1382 *
1383 * <a name="step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1384 * <h4>The HeatEquation::prepare_for_coarsening_and_refinement() function</h4>
1385 *
1386
1387 *
1388 * The next block of functions deals with mesh refinement. We split this
1389 * process up into a "decide whether and what you want to refine" and a
1390 * "please transfer these vectors from old to new mesh" phase, where the first
1391 * also deals with marking cells for refinement. (The decision whether or
1392 * not to refine is done in the lambda function that calls the current
1393 * function.)
1394 *
1395
1396 *
1397 * Breaking things into a "mark cells" function and into a "execute mesh
1398 * adaptation and transfer solution vectors" function is awkward, though
1399 * conceptually not difficult to understand. These two pieces of code
1400 * should really be part of the same function, as they are in @ref step_26 "step-26".
1401 * The issue is with what PETScWrappers::TimeStepper provides us with in these
1402 * callbacks. Specifically, the "decide whether and what you want to refine"
1403 * callback has access to the current solution, and so can evaluate
1404 * (spatial) error estimators to decide which cells to refine. The
1405 * second callback that transfers vectors from old to new mesh gets a bunch
1406 * of vectors, but without the semantic information on which of these is
1407 * the current solution vector. As a consequence, it cannot do the marking
1408 * of cells for refinement or coarsening, and we have to do that from the
1409 * first callback.
1410 *
1411
1412 *
1413 * In practice, however, the problem is minor. The first of these
1414 * two functions is essentially identical to the
1415 * first half of the corresponding function in @ref step_26 "step-26", with the only
1416 * difference that it uses the parallel::distributed::GridRefinement namespace
1417 * function instead of the serial one. Once again, we make sure that we never
1418 * fall below the minimum refinement level, and above the maximum one, that we
1419 * can select from the parameter file.
1420 *
1421 * @code
1422 *   template <int dim>
1423 *   void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1424 *   const PETScWrappers::MPI::Vector &y)
1425 *   {
1426 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1427 *   locally_relevant_dofs,
1428 *   mpi_communicator);
1429 *   locally_relevant_solution = y;
1430 *  
1431 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1432 *   KellyErrorEstimator<dim>::estimate(dof_handler,
1433 *   QGauss<dim - 1>(fe.degree + 1),
1434 *   {},
1435 *   locally_relevant_solution,
1436 *   estimated_error_per_cell);
1437 *  
1439 *   triangulation, estimated_error_per_cell, 0.6, 0.4);
1440 *  
1441 *   const unsigned int max_grid_level =
1442 *   initial_global_refinement + max_delta_refinement_level;
1443 *   const unsigned int min_grid_level = initial_global_refinement;
1444 *  
1445 *   if (triangulation.n_levels() > max_grid_level)
1446 *   for (const auto &cell :
1447 *   triangulation.active_cell_iterators_on_level(max_grid_level))
1448 *   cell->clear_refine_flag();
1449 *   for (const auto &cell :
1450 *   triangulation.active_cell_iterators_on_level(min_grid_level))
1451 *   cell->clear_coarsen_flag();
1452 *   }
1453 *  
1454 *  
1455 * @endcode
1456 *
1457 *
1458 * <a name="step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1459 * <h4>The HeatEquation::transfer_solution_vectors_to_new_mesh() function</h4>
1460 *
1461
1462 *
1463 * The following function then is the second half of the correspond function
1464 * in @ref step_26 "step-26". It is called by the time stepper whenever it requires to
1465 * transfer the solution and any intermediate stage vectors to a new mesh. We
1466 * must make sure that all input vectors are transformed into ghosted vectors
1467 * before the actual transfer is executed, and that we distribute the hanging
1468 * node constraints on the output vectors as soon as we have interpolated the
1469 * vectors to the new mesh -- i.e., that all constraints are satisfied on
1470 * the vectors we transfer.
1471 *
1472
1473 *
1474 * We have no way to enforce boundary conditions at this stage, however. This
1475 * is because the various vectors may correspond to solutions at previous time
1476 * steps if the method used here is a multistep time integrator, and so may
1477 * correspond to different time points that we are not privy to.
1478 *
1479
1480 *
1481 * While this could be a problem if we used the values of the solution and of
1482 * the intermediate stages on the constrained degrees of freedom to compute
1483 * the errors, we do not do so. Instead, we compute the errors on the
1484 * differential equation, and ignore algebraic constraints; therefore we do no
1485 * need to guarantee that the boundary conditions are satisfied also in the
1486 * intermediate stages.
1487 *
1488
1489 *
1490 * We have at our disposal the hanging node current_constraints alone, though,
1491 * and therefore we enforce them on the output vectors, even if this is not
1492 * really needed.
1493 *
1494 * @code
1495 *   template <int dim>
1496 *   void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1497 *   const double time,
1498 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1499 *   std::vector<PETScWrappers::MPI::Vector> &all_out)
1500 *   {
1502 *   solution_trans(dof_handler);
1503 *  
1504 *   std::vector<PETScWrappers::MPI::Vector> all_in_ghosted(all_in.size());
1505 *   std::vector<const PETScWrappers::MPI::Vector *> all_in_ghosted_ptr(
1506 *   all_in.size());
1507 *   std::vector<PETScWrappers::MPI::Vector *> all_out_ptr(all_in.size());
1508 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1509 *   {
1510 *   all_in_ghosted[i].reinit(locally_owned_dofs,
1511 *   locally_relevant_dofs,
1512 *   mpi_communicator);
1513 *   all_in_ghosted[i] = all_in[i];
1514 *   all_in_ghosted_ptr[i] = &all_in_ghosted[i];
1515 *   }
1516 *  
1517 *   triangulation.prepare_coarsening_and_refinement();
1518 *   solution_trans.prepare_for_coarsening_and_refinement(all_in_ghosted_ptr);
1519 *   triangulation.execute_coarsening_and_refinement();
1520 *  
1521 *   setup_system(time);
1522 *  
1523 *   all_out.resize(all_in.size());
1524 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1525 *   {
1526 *   all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1527 *   all_out_ptr[i] = &all_out[i];
1528 *   }
1529 *   solution_trans.interpolate(all_out_ptr);
1530 *  
1531 *   for (PETScWrappers::MPI::Vector &v : all_out)
1532 *   hanging_nodes_constraints.distribute(v);
1533 *   }
1534 *  
1535 *  
1536 * @endcode
1537 *
1538 *
1539 * <a name="step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1540 * <h4>The HeatEquation::update_current_constraints() function</h4>
1541 *
1542
1543 *
1544 * Since regenerating the constraints at each time step may be expensive, we
1545 * make sure that we only do so when the time changes. We track time change by
1546 * checking if the time of the boundary_values_function has changed, with
1547 * respect to the time of the last call to this function. This will work most
1548 * of the times, but not the very first time we call this function, since the
1549 * time then may be zero and the time of the `boundary_values_function` is
1550 * zero at construction time. We therefore also check if the number
1551 * constraints in `current_constraints`, and if these are empty, we regenerate
1552 * the constraints regardless of the time variable.
1553 *
1554 * @code
1555 *   template <int dim>
1556 *   void HeatEquation<dim>::update_current_constraints(const double time)
1557 *   {
1558 *   if (current_constraints.n_constraints() == 0 ||
1559 *   time != boundary_values_function.get_time())
1560 *   {
1561 *   TimerOutput::Scope t(computing_timer, "update current constraints");
1562 *  
1563 *   boundary_values_function.set_time(time);
1564 *   current_constraints.clear();
1565 *   current_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1566 *   current_constraints.merge(hanging_nodes_constraints);
1568 *   0,
1569 *   boundary_values_function,
1570 *   current_constraints);
1571 *   current_constraints.make_consistent_in_parallel(locally_owned_dofs,
1572 *   locally_relevant_dofs,
1573 *   mpi_communicator);
1574 *   current_constraints.close();
1575 *   }
1576 *   }
1577 *  
1578 *  
1579 * @endcode
1580 *
1581 *
1582 * <a name="step_86-TheHeatEquationrunfunction"></a>
1583 * <h4>The HeatEquation::run() function</h4>
1584 *
1585
1586 *
1587 * We have finally arrived at the main function of the class. At the top, it
1588 * creates the mesh and sets up the variables that make up the linear system:
1589 *
1590 * @code
1591 *   template <int dim>
1592 *   void HeatEquation<dim>::run()
1593 *   {
1595 *   triangulation.refine_global(initial_global_refinement);
1596 *  
1597 *   setup_system(/* time */ 0);
1598 *  
1599 * @endcode
1600 *
1601 * We then set up the time stepping object and associate the matrix we will
1602 * build whenever requested for both the Jacobian matrix (see the definition
1603 * above of what the "Jacobian" actually refers to) and for the matrix
1604 * that will be used as a preconditioner for the Jacobian.
1605 *
1606 * @code
1609 *   petsc_ts(time_stepper_data);
1610 *  
1611 *   petsc_ts.set_matrices(jacobian_matrix, jacobian_matrix);
1612 *  
1613 *  
1614 * @endcode
1615 *
1616 * The real work setting up the time stepping object starts here. As
1617 * discussed in the introduction, the way the PETScWrappers::TimeStepper
1618 * class is used is by inverting control: At the end of this function, we
1619 * will call PETScWrappers::TimeStepper::solve() which internally will
1620 * run the loop over time steps, and at the appropriate places *call back*
1621 * into user code for whatever functionality is required. What we need to
1622 * do is to hook up these callback functions by assigning appropriate
1623 * lambda functions to member variables of the `petsc_ts` object.
1624 *
1625
1626 *
1627 * We start by creating lambda functions that provide information about
1628 * the "implicit function" (i.e., that part of the right hand side of the
1629 * ODE system that we want to treat implicitly -- which in our case is
1630 * the entire right hand side), a function that assembles the Jacobian
1631 * matrix, and a function that solves a linear system with the Jacobian.
1632 *
1633 * @code
1634 *   petsc_ts.implicit_function = [&](const double time,
1635 *   const PETScWrappers::MPI::Vector &y,
1636 *   const PETScWrappers::MPI::Vector &y_dot,
1637 *   PETScWrappers::MPI::Vector &res) {
1638 *   this->implicit_function(time, y, y_dot, res);
1639 *   };
1640 *  
1641 *   petsc_ts.setup_jacobian = [&](const double time,
1642 *   const PETScWrappers::MPI::Vector &y,
1643 *   const PETScWrappers::MPI::Vector &y_dot,
1644 *   const double alpha) {
1645 *   this->assemble_implicit_jacobian(time, y, y_dot, alpha);
1646 *   };
1647 *  
1648 *   petsc_ts.solve_with_jacobian = [&](const PETScWrappers::MPI::Vector &src,
1649 *   PETScWrappers::MPI::Vector &dst) {
1650 *   this->solve_with_jacobian(src, dst);
1651 *   };
1652 *  
1653 * @endcode
1654 *
1655 * The next two callbacks deal with identifying and setting variables
1656 * that are considered "algebraic" (rather than "differential"), i.e., for
1657 * which we know what values they are supposed to have rather than letting
1658 * their values be determined by the differential equation. We need to
1659 * instruct the time stepper to ignore these components when computing the
1660 * error in the residuals, and we do so by first providing a function that
1661 * returns an IndexSet with the indices of these algebraic components of the
1662 * solution vector (or rather, that subset of the locally-owned part of the
1663 * vector that is algebraic, in case we are running in parallel). This first
1664 * of the following two functions does that. Specifically, both nodes at
1665 * which Dirichlet boundary conditions are applied, and hanging nodes are
1666 * algebraically constrained. This function then returns a set of
1667 * indices that is initially empty (but knows about the size of the index
1668 * space) and which we then construct as the union of boundary and hanging
1669 * node indices.
1670 *
1671
1672 *
1673 * Following this, we then also need a function that, given a solution
1674 * vector `y` and the current time, sets the algebraic components of that
1675 * vector to their correct value. This comes down to ensuring that we have
1676 * up to date constraints in the `constraints` variable, and then applying
1677 * these constraints to the solution vector via
1678 * AffineConstraints::distribute(). (It is perhaps worth noting that we
1679 * *could* have achieved the same in `solve_with_jacobian()`. Whenever the
1680 * time stepper solves a linear system, it follows up the call to the solver
1681 * by calling the callback to set algebraic components correct. We could
1682 * also have put the calls to `update_current_constraints()` and
1683 * `distribute()` into the `solve_with_jacobian` function, but by not doing
1684 * so, we can also replace the `solve_with_jacobian` function with a call to
1685 * a PETSc solver, and we would still have the current_constraints correctly
1686 * applied to the solution vector.)
1687 *
1688 * @code
1689 *   petsc_ts.algebraic_components = [&]() {
1690 *   IndexSet algebraic_set(dof_handler.n_dofs());
1691 *   algebraic_set.add_indices(DoFTools::extract_boundary_dofs(dof_handler));
1692 *   algebraic_set.add_indices(
1693 *   DoFTools::extract_hanging_node_dofs(dof_handler));
1694 *   return algebraic_set;
1695 *   };
1696 *  
1697 *   petsc_ts.update_constrained_components =
1698 *   [&](const double time, PETScWrappers::MPI::Vector &y) {
1699 *   TimerOutput::Scope t(computing_timer, "set algebraic components");
1700 *   update_current_constraints(time);
1701 *   current_constraints.distribute(y);
1702 *   };
1703 *  
1704 *  
1705 * @endcode
1706 *
1707 * The next two callbacks relate to mesh refinement. As discussed in the
1708 * introduction, PETScWrappers::TimeStepper knows how to deal with the
1709 * situation where we want to change the mesh. All we have to provide
1710 * is a callback that returns `true` if we are at a point where we want
1711 * to refine the mesh (and `false` otherwise) and that if we want to
1712 * do mesh refinement does some prep work for that in the form of
1713 * calling the `prepare_for_coarsening_and_refinement` function.
1714 *
1715
1716 *
1717 * If the first callback below returns `true`, then PETSc TS will
1718 * do some clean-up operations, and call the second of the
1719 * callback functions
1720 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`) with a
1721 * collection of vectors that need to be interpolated from the old
1722 * to the new mesh. This may include the current solution, perhaps
1723 * the current time derivative of the solution, and in the case of
1724 * [multistep time
1725 * integrators](https://en.wikipedia.org/wiki/Linear_multistep_method) also
1726 * the solutions of some previous time steps. We hand all of these over to
1727 * the `interpolate()` member function of this class.
1728 *
1729 * @code
1730 *   petsc_ts.decide_and_prepare_for_remeshing =
1731 *   [&](const double /* time */,
1732 *   const unsigned int step_number,
1733 *   const PETScWrappers::MPI::Vector &y) -> bool {
1734 *   if (step_number > 0 && this->mesh_adaptation_frequency > 0 &&
1735 *   step_number % this->mesh_adaptation_frequency == 0)
1736 *   {
1737 *   pcout << std::endl << "Adapting the mesh..." << std::endl;
1738 *   this->prepare_for_coarsening_and_refinement(y);
1739 *   return true;
1740 *   }
1741 *   else
1742 *   return false;
1743 *   };
1744 *  
1745 *   petsc_ts.transfer_solution_vectors_to_new_mesh =
1746 *   [&](const double time,
1747 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1748 *   std::vector<PETScWrappers::MPI::Vector> &all_out) {
1749 *   this->transfer_solution_vectors_to_new_mesh(time, all_in, all_out);
1750 *   };
1751 *  
1752 * @endcode
1753 *
1754 * The final callback is a "monitor" that is called in each
1755 * time step. Here we use it to create a graphical output. Perhaps a better
1756 * scheme would output the solution at fixed time intervals, rather
1757 * than in every time step, but this is not the main point of
1758 * this program and so we go with the easy approach:
1759 *
1760 * @code
1761 *   petsc_ts.monitor = [&](const double time,
1762 *   const PETScWrappers::MPI::Vector &y,
1763 *   const unsigned int step_number) {
1764 *   pcout << "Time step " << step_number << " at t=" << time << std::endl;
1765 *   this->output_results(time, step_number, y);
1766 *   };
1767 *  
1768 *  
1769 * @endcode
1770 *
1771 * With all of this out of the way, the rest of the function is
1772 * anticlimactic: We just have to initialize the solution vector with
1773 * the initial conditions and call the function that does the time
1774 * stepping, and everything else will happen automatically:
1775 *
1776 * @code
1777 *   PETScWrappers::MPI::Vector solution(locally_owned_dofs, mpi_communicator);
1778 *   VectorTools::interpolate(dof_handler, initial_value_function, solution);
1779 *  
1780 *   petsc_ts.solve(solution);
1781 *   }
1782 *   } // namespace Step86
1783 *  
1784 *  
1785 * @endcode
1786 *
1787 *
1788 * <a name="step_86-Themainfunction"></a>
1789 * <h3>The main() function</h3>
1790 *
1791
1792 *
1793 * The rest of the program is as it always looks. We read run-time parameters
1794 * from an input file via the ParameterAcceptor class in the same way as
1795 * we showed in @ref step_60 "step-60" and @ref step_70 "step-70".
1796 *
1797 * @code
1798 *   int main(int argc, char **argv)
1799 *   {
1800 *   try
1801 *   {
1802 *   using namespace Step86;
1803 *  
1804 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1805 *   HeatEquation<2> heat_equation_solver(MPI_COMM_WORLD);
1806 *  
1807 *   const std::string input_filename =
1808 *   (argc > 1 ? argv[1] : "heat_equation.prm");
1809 *   ParameterAcceptor::initialize(input_filename, "heat_equation_used.prm");
1810 *   heat_equation_solver.run();
1811 *   }
1812 *   catch (std::exception &exc)
1813 *   {
1814 *   std::cerr << std::endl
1815 *   << std::endl
1816 *   << "----------------------------------------------------"
1817 *   << std::endl;
1818 *   std::cerr << "Exception on processing: " << std::endl
1819 *   << exc.what() << std::endl
1820 *   << "Aborting!" << std::endl
1821 *   << "----------------------------------------------------"
1822 *   << std::endl;
1823 *  
1824 *   return 1;
1825 *   }
1826 *   catch (...)
1827 *   {
1828 *   std::cerr << std::endl
1829 *   << std::endl
1830 *   << "----------------------------------------------------"
1831 *   << std::endl;
1832 *   std::cerr << "Unknown exception!" << std::endl
1833 *   << "Aborting!" << std::endl
1834 *   << "----------------------------------------------------"
1835 *   << std::endl;
1836 *   return 1;
1837 *   }
1838 *  
1839 *   return 0;
1840 *   }
1841 * @endcode
1842<a name="step_86-Results"></a><h1>Results</h1>
1843
1844
1845When you run this program with the input file as is, you get output as follows:
1846@code
1847
1848Number of active cells: 768
1849Number of degrees of freedom: 833
1850
1851Time step 0 at t=0
1852 5 linear iterations.
1853 8 linear iterations.
1854Time step 1 at t=0.025
1855 6 linear iterations.
1856Time step 2 at t=0.05
1857 5 linear iterations.
1858 8 linear iterations.
1859Time step 3 at t=0.075
1860 6 linear iterations.
1861Time step 4 at t=0.1
1862 6 linear iterations.
1863Time step 5 at t=0.125
1864 6 linear iterations.
1865Time step 6 at t=0.15
1866 6 linear iterations.
1867Time step 7 at t=0.175
1868 6 linear iterations.
1869Time step 8 at t=0.2
1870 6 linear iterations.
1871Time step 9 at t=0.225
1872 6 linear iterations.
1873
1874Adapting the mesh...
1875
1876Number of active cells: 1050
1877Number of degrees of freedom: 1155
1878
1879Time step 10 at t=0.25
1880 5 linear iterations.
1881 8 linear iterations.
1882Time step 11 at t=0.275
1883 5 linear iterations.
1884 7 linear iterations.
1885
1886[...]
1887
1888Time step 195 at t=4.875
1889 6 linear iterations.
1890Time step 196 at t=4.9
1891 6 linear iterations.
1892Time step 197 at t=4.925
1893 6 linear iterations.
1894Time step 198 at t=4.95
1895 6 linear iterations.
1896Time step 199 at t=4.975
1897 5 linear iterations.
1898
1899Adapting the mesh...
1900
1901Number of active cells: 1380
1902Number of degrees of freedom: 1547
1903
1904Time step 200 at t=5
1905
1906
1907+---------------------------------------------+------------+------------+
1908| Total wallclock time elapsed since start | 43.2s | |
1909| | | |
1910| Section | no. calls | wall time | % of total |
1911+---------------------------------+-----------+------------+------------+
1912| assemble implicit Jacobian | 226 | 9.93s | 23% |
1913| implicit function | 426 | 16.2s | 37% |
1914| output results | 201 | 9.74s | 23% |
1915| set algebraic components | 200 | 0.0496s | 0.11% |
1916| setup system | 21 | 0.799s | 1.8% |
1917| solve with Jacobian | 226 | 0.56s | 1.3% |
1918| update current constraints | 201 | 1.53s | 3.5% |
1919+---------------------------------+-----------+------------+------------+
1920
1921@endcode
1922
1923We can generate output for this in the form of a video:
1924@htmlonly
1925<p align="center">
1926 <iframe width="560" height="315" src="https://www.youtube.com/embed/fhJVkcdRksM"
1927 frameborder="0"
1928 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1929 allowfullscreen></iframe>
1930 </p>
1931@endhtmlonly
1932The solution here is driven by boundary values (the initial conditions are zero,
1933and so is the right hand side of the equation). It takes a little bit of time
1934for the boundary values to diffuse into the domain, and so the temperature
1935(the solution of the heat equation) in the interior of the domain has a slight
1936lag compared to the temperature at the boundary.
1937
1938
1939The more interesting component of this program is how easy it is to play with
1940the details of the time stepping algorithm. Recall that the solution above
1941is controlled by the following parameters:
1942@code
1943 subsection Time stepper
1944 subsection Running parameters
1945 set final time = 5
1946 set initial step size = 0.025
1947 set initial time = 0
1948 set match final time = false
1949 set maximum number of steps = -1
1950 set options prefix =
1951 set solver type = beuler
1952 end
1953
1954 subsection Error control
1955 set absolute error tolerance = -1
1956 set adaptor type = none
1957 set ignore algebraic lte = true
1958 set maximum step size = -1
1959 set minimum step size = -1
1960 set relative error tolerance = -1
1961 end
1962 end
1963@endcode
1964Of particular interest for us here is to set the time stepping algorithm
1965and the adaptive time step control method. The latter is set to "none"
1966above, but there are
1967[several alternative choices](https://petsc.org/release/manualpages/TS/TSAdaptType/)
1968for this parameter. For example, we can set parameters as follows,
1969@code
1970 subsection Time stepper
1971 subsection Running parameters
1972 set final time = 5
1973 set initial step size = 0.025
1974 set initial time = 0
1975 set match final time = false
1976 set maximum number of steps = -1
1977 set options prefix =
1978 set solver type = bdf
1979 end
1980
1981 subsection Error control
1982 set absolute error tolerance = 1e-2
1983 set relative error tolerance = 1e-2
1984 set adaptor type = basic
1985 set ignore algebraic lte = true
1986 set maximum step size = 1
1987 set minimum step size = 0.01
1988 end
1989 end
1990@endcode
1991What we do here is set the initial time step size to 0.025, and choose relatively
1992large absolute and relative error tolerances of 0.01 for the time step size
1993adaptation algorithm (for which we choose "basic"). We ask PETSc TS to use a
1994[Backward Differentiation Formula (BDF)](https://en.wikipedia.org/wiki/Backward_differentiation_formula)
1995method, and we get the following as output:
1996@code
1997
1998===========================================
1999Number of active cells: 768
2000Number of degrees of freedom: 833
2001
2002Time step 0 at t=0
2003 5 linear iterations.
2004 5 linear iterations.
2005 5 linear iterations.
2006 4 linear iterations.
2007 6 linear iterations.
2008Time step 1 at t=0.01
2009 5 linear iterations.
2010 5 linear iterations.
2011Time step 2 at t=0.02
2012 5 linear iterations.
2013 5 linear iterations.
2014Time step 3 at t=0.03
2015 5 linear iterations.
2016 5 linear iterations.
2017Time step 4 at t=0.042574
2018 5 linear iterations.
2019 5 linear iterations.
2020Time step 5 at t=0.0588392
2021 5 linear iterations.
2022 5 linear iterations.
2023Time step 6 at t=0.0783573
2024 5 linear iterations.
2025 7 linear iterations.
2026 5 linear iterations.
2027 7 linear iterations.
2028Time step 7 at t=0.100456
2029 5 linear iterations.
2030 7 linear iterations.
2031 5 linear iterations.
2032 7 linear iterations.
2033Time step 8 at t=0.124982
2034 5 linear iterations.
2035 8 linear iterations.
2036 5 linear iterations.
2037 7 linear iterations.
2038Time step 9 at t=0.153156
2039 6 linear iterations.
2040 5 linear iterations.
2041
2042[...]
2043
2044Time step 206 at t=4.96911
2045 5 linear iterations.
2046 5 linear iterations.
2047Time step 207 at t=4.99398
2048 5 linear iterations.
2049 5 linear iterations.
2050Time step 208 at t=5.01723
2051
2052
2053+---------------------------------------------+------------+------------+
2054| Total wallclock time elapsed since start | 117s | |
2055| | | |
2056| Section | no. calls | wall time | % of total |
2057+---------------------------------+-----------+------------+------------+
2058| assemble implicit Jacobian | 593 | 35.6s | 31% |
2059| implicit function | 1101 | 56.3s | 48% |
2060| output results | 209 | 12.5s | 11% |
2061| set algebraic components | 508 | 0.156s | 0.13% |
2062| setup system | 21 | 1.11s | 0.95% |
2063| solve with Jacobian | 593 | 1.97s | 1.7% |
2064| update current constraints | 509 | 4.53s | 3.9% |
2065+---------------------------------+-----------+------------+------------+
2066@endcode
2067What is happening here is that apparently PETSc TS is not happy with our
2068choice of initial time step size, and after several linear solves has
2069reduced it to the minimum we allow it to, 0.01. The following time steps
2070then run at a time step size of 0.01 until it decides to make it slightly
2071larger again and (apparently) switches to a higher order method that
2072requires more linear solves per time step but allows for a larger time
2073step closer to our initial choice 0.025 again. It does not quite hit the
2074final time of @f$T=5@f$ with its time step choices, but we've got only
2075ourselves to blame for that by setting
2076@code
2077 set match final time = false
2078@endcode
2079in the input file. If hitting the end time exactly is important to us,
2080setting the flag to `true` resolves this issue.
2081
2082We can even reason why PETSc eventually chooses a time step of around
20830.025: The boundary values undergo a complete cosine cycle within 0.5
2084time units; we should expect that it takes around ten or twenty time
2085steps to resolve each period of a cycle to reasonable accuracy, and
2086this leads to the time step choice PETSc finds.
2087
2088Not all combinations of methods, time step adaptation algorithms, and
2089other parameters are valid, but the main messages from the experiment
2090above that you should take away are:
2091- It would undoubtedly be quite time consuming to implement many of the
2092 methods that PETSc TS offers for time stepping -- but with a program
2093 such as the one here, we don't need to: We can just select from the
2094 many methods PETSc TS already has.
2095- Adaptive time step control is difficult; adaptive choice of which
2096 method or order to choose is perhaps even more difficult. None of the
2097 time dependent programs that came before the current one (say, @ref step_23 "step-23",
2098 @ref step_26 "step-26", @ref step_31 "step-31", @ref step_58 "step-58", and a good number of others) have either.
2099 Moreover, while deal.II is good at spatial adaptation of meshes, it
2100 is not a library written by experts in time step adaptation, and so will
2101 likely not gain this ability either. But, again, it doesn't
2102 have to: We can rely on a library written by experts in that area.
2103
2104
2105
2106<a name="step-86-extensions"></a>
2107<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2108
2109
2110The program actually runs in parallel, even though we have not used
2111that above. Specifically, if you have configured deal.II to use MPI,
2112then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2113program with 8 processes.
2114
2115For the program as currently written (and in debug mode), this makes
2116little difference: It will run about twice as fast, but take about 8
2117times as much CPU time. That is because the problem is just so small:
2118Generally between 1000 and 2000 degrees of freedom. A good rule of
2119thumb is that programs really only benefit from parallel computing if
2120you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2121process*. But it is not difficult to adapt the program at hand here to
2122run with a much finer mesh, or perhaps in 3d, so that one is beyond
2123that limit and sees the benefits of parallel computing.
2124
2125 *
2126 *
2127<a name="step_86-PlainProg"></a>
2128<h1> The plain program</h1>
2129@include "step-86.cc"
2130*/
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
std::size_t size
Definition mpi.cc:734
const Event initial
Definition event.cc:64
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > &times_and_names)
IndexSet extract_boundary_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask={}, const std::set< types::boundary_id > &boundary_ids={})
Definition dof_tools.cc:616
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_hanging_node_dofs(const DoFHandler< dim, spacedim > &dof_handler)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
void interpolate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const ComponentMask &component_mask={}, const unsigned int level=numbers::invalid_unsigned_int)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
::SolutionTransfer< dim, VectorType, spacedim > SolutionTransfer
STL namespace.
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation