Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-86.h
Go to the documentation of this file.
1
795 0.0,
796 *   /* end time */ 1.0,
797 *   /* initial time step */ 0.025)
798 *   , initial_global_refinement(5)
799 *   , max_delta_refinement_level(2)
800 *   , mesh_adaptation_frequency(0)
801 *   , right_hand_side_function("/Heat Equation/Right hand side")
802 *   , initial_value_function("/Heat Equation/Initial value")
803 *   , boundary_values_function("/Heat Equation/Boundary values")
804 *   {
805 *   enter_subsection("Time stepper");
806 *   {
807 *   enter_my_subsection(this->prm);
808 *   {
809 *   time_stepper_data.add_parameters(this->prm);
810 *   }
811 *   leave_my_subsection(this->prm);
812 *   }
813 *   leave_subsection();
814 *  
815 *   add_parameter("Initial global refinement",
816 *   initial_global_refinement,
817 *   "Number of times the mesh is refined globally before "
818 *   "starting the time stepping.");
819 *   add_parameter("Maximum delta refinement level",
820 *   max_delta_refinement_level,
821 *   "Maximum number of local refinement levels.");
822 *   add_parameter("Mesh adaptation frequency",
823 *   mesh_adaptation_frequency,
824 *   "When to adapt the mesh.");
825 *   }
826 *  
827 *  
828 *  
829 * @endcode
830 *
831 *
832 * <a name="step_86-TheHeatEquationsetup_systemfunction"></a>
833 * <h4>The HeatEquation::setup_system() function</h4>
834 *
835
836 *
837 * This function is not very different from what we do in many other programs
838 * (including @ref step_26 "step-26"). We enumerate degrees of freedom, output some
839 * information about then, build constraint objects (recalling that we
840 * put hanging node constraints into their separate object), and then
841 * also build an AffineConstraint object that contains both the hanging
842 * node constraints as well as constraints corresponding to zero Dirichlet
843 * boundary conditions. This last object is needed since we impose the
844 * constraints through algebraic equations. While technically it would be
845 * possible to use the time derivative of the boundary function as a boundary
846 * conditions for the time derivative of the solution, this is not done here.
847 * Instead, we impose the boundary conditions through algebraic equations, and
848 * therefore the time derivative of the boundary conditions is not part of
849 * the algebraic system, and we need zero boundary conditions on the time
850 * derivative of the solution when computing the residual. We use the
851 * `homogeneous_constraints` object for this purpose.
852 *
853
854 *
855 * Finally, we create the actual non-homogeneous `current_constraints` by
856 * calling `update_current_constraints). These are also used during the
857 * assembly and during the residual evaluation.
858 *
859 * @code
860 *   template <int dim>
861 *   void HeatEquation<dim>::setup_system(const double time)
862 *   {
863 *   TimerOutput::Scope t(computing_timer, "setup system");
864 *  
865 *   dof_handler.distribute_dofs(fe);
866 *   pcout << std::endl
867 *   << "Number of active cells: " << triangulation.n_active_cells()
868 *   << std::endl
869 *   << "Number of degrees of freedom: " << dof_handler.n_dofs()
870 *   << std::endl
871 *   << std::endl;
872 *  
873 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
874 *   locally_relevant_dofs =
876 *  
877 *  
878 *   hanging_nodes_constraints.clear();
879 *   hanging_nodes_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
881 *   hanging_nodes_constraints);
882 *   hanging_nodes_constraints.make_consistent_in_parallel(locally_owned_dofs,
883 *   locally_relevant_dofs,
884 *   mpi_communicator);
885 *   hanging_nodes_constraints.close();
886 *  
887 *  
888 *   homogeneous_constraints.clear();
889 *   homogeneous_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
890 *   homogeneous_constraints.merge(hanging_nodes_constraints);
892 *   0,
894 *   homogeneous_constraints);
895 *   homogeneous_constraints.make_consistent_in_parallel(locally_owned_dofs,
896 *   locally_relevant_dofs,
897 *   mpi_communicator);
898 *   homogeneous_constraints.close();
899 *  
900 *  
901 *   update_current_constraints(time);
902 *  
903 *  
904 * @endcode
905 *
906 * The final block of code resets and initializes the matrix object with
907 * the appropriate sparsity pattern. Recall that we do not store solution
908 * vectors in this class (the time integrator object does that internally)
909 * and so do not have to resize and initialize them either.
910 *
911 * @code
912 *   DynamicSparsityPattern dsp(locally_relevant_dofs);
913 *   DoFTools::make_sparsity_pattern(dof_handler,
914 *   dsp,
915 *   homogeneous_constraints,
916 *   false);
918 *   locally_owned_dofs,
919 *   mpi_communicator,
920 *   locally_relevant_dofs);
921 *  
922 *   jacobian_matrix.reinit(locally_owned_dofs,
923 *   locally_owned_dofs,
924 *   dsp,
925 *   mpi_communicator);
926 *   }
927 *  
928 *  
929 * @endcode
930 *
931 *
932 * <a name="step_86-TheHeatEquationoutput_resultsfunction"></a>
933 * <h4>The HeatEquation::output_results() function</h4>
934 *
935
936 *
937 * This function is called from "monitor" function that is called in turns
938 * by the time stepper in each time step. We use it to write the solution to a
939 * file, and provide graphical output through paraview or visit. We also write
940 * a pvd file, which groups all metadata about the `.vtu` files into a single
941 * file that can be used to load the full time dependent solution in paraview.
942 *
943 * @code
944 *   template <int dim>
945 *   void HeatEquation<dim>::output_results(const double time,
946 *   const unsigned int timestep_number,
947 *   const PETScWrappers::MPI::Vector &y)
948 *   {
949 *   TimerOutput::Scope t(computing_timer, "output results");
950 *  
951 *   DataOut<dim> data_out;
952 *   data_out.attach_dof_handler(dof_handler);
953 *   data_out.add_data_vector(y, "U");
954 *   data_out.build_patches();
955 *  
956 *   data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
957 *  
958 *   const std::string filename =
959 *   "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtu";
960 *   data_out.write_vtu_in_parallel(filename, mpi_communicator);
961 *  
962 *   if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
963 *   {
964 *   static std::vector<std::pair<double, std::string>> times_and_names;
965 *   times_and_names.emplace_back(time, filename);
966 *  
967 *   std::ofstream pvd_output("solution.pvd");
968 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
969 *   }
970 *   }
971 *  
972 *  
973 *  
974 * @endcode
975 *
976 *
977 * <a name="step_86-TheHeatEquationimplicit_functionfunction"></a>
978 * <h4>The HeatEquation::implicit_function() function</h4>
979 *
980
981 *
982 * As discussed in the introduction, we describe the ODE system to the time
983 * stepper via its residual,
984 * @f[
985 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
986 * @f]
987 * The following function computes it, given vectors for @f$U,\dot U@f$ that
988 * we will denote by `y` and `y_dot` because that's how they are called
989 * in the documentation of the PETScWrappers::TimeStepper class.
990 *
991
992 *
993 * At the top of the function, we do the usual set up when computing
994 * integrals. We face two minor difficulties here: the first is that
995 * the `y` and `y_dot` vectors we get as input are read only, but we
996 * need to make sure they satisfy the correct boundary conditions and so
997 * have to set elements in these vectors. The second is that we need to
998 * compute the residual, and therefore in general we need to evaluate solution
999 * values and gradients inside locally owned cells, and for this need access
1000 * to degrees of freedom which may be owned by neighboring processors. To
1001 * address these issues, we create (non-ghosted) writable copies of the input
1002 * vectors, apply boundary conditions and hanging node current_constraints;
1003 * and then copy these vectors to ghosted vectors before we can do anything
1004 * sensible with them.
1005 *
1006 * @code
1007 *   template <int dim>
1008 *   void
1009 *   HeatEquation<dim>::implicit_function(const double time,
1010 *   const PETScWrappers::MPI::Vector &y,
1011 *   const PETScWrappers::MPI::Vector &y_dot,
1012 *   PETScWrappers::MPI::Vector &residual)
1013 *   {
1014 *   TimerOutput::Scope t(computing_timer, "implicit function");
1015 *  
1016 *   PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1017 *   mpi_communicator);
1018 *   PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1019 *   mpi_communicator);
1020 *   tmp_solution = y;
1021 *   tmp_solution_dot = y_dot;
1022 *  
1023 *   update_current_constraints(time);
1024 *   current_constraints.distribute(tmp_solution);
1025 *   homogeneous_constraints.distribute(tmp_solution_dot);
1026 *  
1027 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1028 *   locally_relevant_dofs,
1029 *   mpi_communicator);
1030 *   PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1031 *   locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1032 *   locally_relevant_solution = tmp_solution;
1033 *   locally_relevant_solution_dot = tmp_solution_dot;
1034 *  
1035 *  
1036 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1037 *   FEValues<dim> fe_values(fe,
1038 *   quadrature_formula,
1039 *   update_values | update_gradients |
1040 *   update_quadrature_points | update_JxW_values);
1041 *  
1042 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1043 *   const unsigned int n_q_points = quadrature_formula.size();
1044 *  
1045 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1046 *  
1047 *   std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1048 *   std::vector<double> solution_dot_values(n_q_points);
1049 *  
1050 *   Vector<double> cell_residual(dofs_per_cell);
1051 *  
1052 *   right_hand_side_function.set_time(time);
1053 *  
1054 * @endcode
1055 *
1056 * Now for computing the actual residual. Recall that we wan to compute the
1057 * vector
1058 * @f[
1059 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1060 * @f]
1061 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1062 * is not efficient. Instead, recall (by writing out how the elements of
1063 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1064 * @f$i@f$th element of the residual vector is given by
1065 * @f{align*}{
1066 * R(t,U,\dot U)_i
1067 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1068 * {\partial U_j(t)}{\partial t}
1069 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1070 * \varphi_j(\mathbf x, t) U_j(t)
1071 * - \int_\Omega \varphi_i f(\mathbf x, t)
1072 * \\ &=
1073 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1074 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1075 * u_h(\mathbf x, t)
1076 * - \int_\Omega \varphi_i f(\mathbf x, t).
1077 * @f}
1078 * We can compute these integrals efficiently by breaking them up into
1079 * a sum over all cells and then applying quadrature. For the integrand,
1080 * we need to evaluate the solution and its gradient at the quadrature
1081 * points within each locally owned cell, and for this, we need also
1082 * access to degrees of freedom that may be owned by neighboring
1083 * processors. We therefore use the locally_relevant_solution and and
1084 * locally_relevant_solution_dot vectors.
1085 *
1086 * @code
1087 *   residual = 0;
1088 *   for (const auto &cell : dof_handler.active_cell_iterators())
1089 *   if (cell->is_locally_owned())
1090 *   {
1091 *   fe_values.reinit(cell);
1092 *  
1093 *   fe_values.get_function_gradients(locally_relevant_solution,
1094 *   solution_gradients);
1095 *   fe_values.get_function_values(locally_relevant_solution_dot,
1096 *   solution_dot_values);
1097 *  
1098 *   cell->get_dof_indices(local_dof_indices);
1099 *  
1100 *   cell_residual = 0;
1101 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1102 *   for (const unsigned int i : fe_values.dof_indices())
1103 *   {
1104 *   cell_residual(i) +=
1105 *   (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1106 *   solution_dot_values[q] // u(x_q)
1107 *   + // +
1108 *   fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1109 *   solution_gradients[q] // grad u(x_q)
1110 *   - // -
1111 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1112 *   right_hand_side_function.value(
1113 *   fe_values.quadrature_point(q))) // f(x_q)]
1114 *   * fe_values.JxW(q); // * dx
1115 *   }
1116 *   current_constraints.distribute_local_to_global(cell_residual,
1117 *   local_dof_indices,
1118 *   residual);
1119 *   }
1120 *   residual.compress(VectorOperation::add);
1121 *  
1122 * @endcode
1123 *
1124 * The end result of the operations above is a vector that contains the
1125 * residual vector, having taken into account the constraints due to
1126 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1127 * used `current_constraints.distribute_local_to_global()` to add the
1128 * local contributions to the global vector. At the end of the day, the
1129 * residual vector @f$r@f$ will be used in the solution of linear systems
1130 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1131 * below. We want to achieve that for algebraic components, the algebraic
1132 * components of @f$z@f$ have predictable values that achieve the purposes
1133 * discussed in the following. We do this by ensuring that the entries
1134 * corresponding to algebraic components in the residual @f$r@f$ have specific
1135 * values, and then we will do the same in the next function for the
1136 * matrix; for this, you will have to know that the rows and columns
1137 * of the matrix corresponding to constrained entries are zero with the
1138 * exception of the diagonal entries. We will manually set that diagonal
1139 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1140 *
1141
1142 *
1143 * From the point of view of the residual vector, if the input `y`
1144 * vector does not contain the correct values on constrained degrees of
1145 * freedom (hanging nodes or boundary conditions), we need to communicate
1146 * this to the time stepper, and we do so by setting the residual to the
1147 * actual difference between the input `y` vector and the our local copy of
1148 * it, in which we have applied the constraints (see the top of the
1149 * function where we called `current_constraints.distribute(tmp_solution)`
1150 * and a similar operation on the time derivative). Since we have made a
1151 * copy of the input vector for this purpose, we use it to compute the
1152 * residual value. However, there is a difference between hanging nodes
1153 * constraints and boundary conditions: we do not want to make hanging node
1154 * constraints actually depend on their dependent degrees of freedom, since
1155 * this would imply that we are actually solving for the dependent degrees
1156 * of freedom. This is not what we are actually doing, however, since
1157 * hanging nodes are not actually solved for. They are eliminated from the
1158 * system by the call to AffineConstraints::distribute_local_to_global()
1159 * above. From the point of view of the Jacobian matrix, we are effectively
1160 * setting hanging nodes to an artificial value (usually zero), and we
1161 * simply want to make sure that we solve for those degrees of freedom a
1162 * posteriori, by calling the function AffineConstraints::distribute().
1163 *
1164
1165 *
1166 * Here we therefore check that the residual is equal to the input value on
1167 * the constrained dofs corresponding to hanging nodes (i.e., those for
1168 * which the lines of the `current_constraints` contain at least one other
1169 * entry), and to the difference between the input vector and the actual
1170 * solution on those constraints that correspond to boundary conditions.
1171 *
1172 * @code
1173 *   for (const auto &c : current_constraints.get_lines())
1174 *   if (locally_owned_dofs.is_element(c.index))
1175 *   {
1176 *   if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1177 *   residual[c.index] = y[c.index] - tmp_solution[c.index];
1178 *   else /* has dependencies -> a hanging node */
1179 *   residual[c.index] = y[c.index];
1180 *   }
1181 *   residual.compress(VectorOperation::insert);
1182 *   }
1183 *  
1184 *  
1185 * @endcode
1186 *
1187 *
1188 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1189 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1190 *
1191
1192 *
1193 * The next operation is to compute the "Jacobian", which PETSc TS defines
1194 * as the matrix
1195 * @f[
1196 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1197 * R}{\partial \dot y}
1198 * @f]
1199 * which, for the current linear problem, is simply
1200 * @f[
1201 * J_\alpha = A + \alpha M
1202 * @f]
1203 * and which is in particular independent of time and the current solution
1204 * vectors @f$y@f$ and @f$\dot y@f$.
1205 *
1206
1207 *
1208 * Having seen the assembly of matrices before, there is little that should
1209 * surprise you in the actual assembly here:
1210 *
1211 * @code
1212 *   template <int dim>
1213 *   void HeatEquation<dim>::assemble_implicit_jacobian(
1214 *   const double /* time */,
1215 *   const PETScWrappers::MPI::Vector & /* y */,
1216 *   const PETScWrappers::MPI::Vector & /* y_dot */,
1217 *   const double alpha)
1218 *   {
1219 *   TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1220 *  
1221 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1222 *   FEValues<dim> fe_values(fe,
1223 *   quadrature_formula,
1224 *   update_values | update_gradients |
1225 *   update_quadrature_points | update_JxW_values);
1226 *  
1227 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1228 *  
1229 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1230 *  
1231 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1232 *  
1233 *   jacobian_matrix = 0;
1234 *   for (const auto &cell : dof_handler.active_cell_iterators())
1235 *   if (cell->is_locally_owned())
1236 *   {
1237 *   fe_values.reinit(cell);
1238 *  
1239 *   cell->get_dof_indices(local_dof_indices);
1240 *  
1241 *   cell_matrix = 0;
1242 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1243 *   for (const unsigned int i : fe_values.dof_indices())
1244 *   for (const unsigned int j : fe_values.dof_indices())
1245 *   {
1246 *   cell_matrix(i, j) +=
1247 *   (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1248 *   fe_values.shape_grad(j, q) // grad phi_j(x_q)
1249 *   + alpha *
1250 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1251 *   fe_values.shape_value(j, q) // phi_j(x_q)
1252 *   ) *
1253 *   fe_values.JxW(q); // * dx
1254 *   }
1255 *   current_constraints.distribute_local_to_global(cell_matrix,
1256 *   local_dof_indices,
1257 *   jacobian_matrix);
1258 *   }
1259 *   jacobian_matrix.compress(VectorOperation::add);
1260 *  
1261 * @endcode
1262 *
1263 * The only interesting part is the following. Recall that we modified the
1264 * residual vector's entries corresponding to the algebraic components of
1265 * the solution in the previous function. The outcome of calling
1266 * `current_constraints.distribute_local_to_global()` a few lines
1267 * above is that the global matrix has zero rows and columns for the
1268 * algebraic (constrained) components of the solution; the function
1269 * puts a value on the diagonal that is nonzero and has about the
1270 * same size as the remaining diagonal entries of the matrix. What
1271 * this diagonal value is is unknown to us -- in other cases where
1272 * we call `current_constraints.distribute_local_to_global()` on
1273 * both the left side matrix and the right side vector, as in most
1274 * other tutorial programs, the matrix diagonal entries and right
1275 * hand side values are chosen in such a way that the result of
1276 * solving a linear system is what we want it to be, but the scaling
1277 * is done automatically.
1278 *
1279
1280 *
1281 * This is not good enough for us here, because we are building the
1282 * right hand side independently from the matrix in different functions.
1283 * Thus, for any constrained degree of freedom, we set the diagonal of the
1284 * Jacobian to be one. This leaves the Jacobian matrix invertible,
1285 * consistent with what the time stepper expects, and it also makes sure
1286 * that if we did not make a mistake in the residual and/or in the Jacbian
1287 * matrix, then asking the time stepper to check the Jacobian with a finite
1288 * difference method will produce the correct result. This can be activated
1289 * at run time via passing the `-snes_test_jacobian` option on the command
1290 * line.
1291 *
1292 * @code
1293 *   for (const auto &c : current_constraints.get_lines())
1294 *   jacobian_matrix.set(c.index, c.index, 1.0);
1295 *   jacobian_matrix.compress(VectorOperation::insert);
1296 *   }
1297 *  
1298 *  
1299 * @endcode
1300 *
1301 *
1302 * <a name="step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1303 * <h4>The HeatEquation::solve_with_jacobian() function</h4>
1304 *
1305
1306 *
1307 * This is the function that actually solves the linear system with the
1308 * Jacobian matrix we have previously built (in a call to the previous
1309 * function during the current time step or another earlier one -- time
1310 * steppers are quite sophisticated in determining internally whether it is
1311 * necessary to update the Jacobian matrix, or whether one can reuse it for
1312 * another time step without rebuilding it; this is similar to how one can
1313 * re-use the Newton matrix for several Newton steps, see for example the
1314 * discussion in @ref step_77 "step-77"). We could in principle not provide this function to
1315 * the time stepper, and instead select a specific solver on the command line
1316 * by using the `-ksp_*` options of PETSc. However, by providing this
1317 * function, we can use a specific solver and preconditioner for the linear
1318 * system, and still have the possibility to change them on the command line.
1319 *
1320
1321 *
1322 * Providing a specific solver is more in line with the way we usually do
1323 * things in other deal.II examples, while letting PETSc choose a generic
1324 * solver, and changing it on the command line via `-ksp_type` is more in line
1325 * with the way PETSc is usually used, and it can be a convenient approach
1326 * when we are experimenting to find an optimal solver for our problem. Both
1327 * options are available here, since we can still change both the solver and
1328 * the preconditioner on the command line via `-user_ksp_type` and
1329 * `-user_pc_type` options.
1330 *
1331
1332 *
1333 * In any case, recall that the Jacobian we built in the previous function is
1334 * always of the form
1335 * @f[
1336 * J_\alpha = \alpha M + A
1337 * @f]
1338 * where @f$M@f$ is a mass matrix and @f$A@f$ a Laplace matrix. @f$M@f$ is symmetric and
1339 * positive definite; @f$A@f$ is symmetric and at least positive semidefinite;
1340 * @f$\alpha> 0@f$. As a consequence, the Jacobian matrix is a symmetric and
1341 * positive definite matrix, which we can efficiently solve with the Conjugate
1342 * Gradient method, along with either SSOR or (if available) the algebraic
1343 * multigrid implementation provided by PETSc (via the Hypre package) as
1344 * preconditioner. In practice, if you wanted to solve "real" problems, one
1345 * would spend some time finding which preconditioner is optimal, perhaps
1346 * using PETSc's ability to read solver and preconditioner choices from the
1347 * command line. But this is not the focus of this tutorial program, and so
1348 * we just go with the following:
1349 *
1350 * @code
1351 *   template <int dim>
1352 *   void
1353 *   HeatEquation<dim>::solve_with_jacobian(const PETScWrappers::MPI::Vector &src,
1354 *   PETScWrappers::MPI::Vector &dst)
1355 *   {
1356 *   TimerOutput::Scope t(computing_timer, "solve with Jacobian");
1357 *  
1358 *   #if defined(PETSC_HAVE_HYPRE)
1359 *   PETScWrappers::PreconditionBoomerAMG preconditioner;
1360 *   preconditioner.initialize(jacobian_matrix);
1361 *   #else
1362 *   PETScWrappers::PreconditionSSOR preconditioner;
1363 *   preconditioner.initialize(
1364 *   jacobian_matrix, PETScWrappers::PreconditionSSOR::AdditionalData(1.0));
1365 *   #endif
1366 *  
1367 *   SolverControl solver_control(1000, 1e-8 * src.l2_norm());
1368 *   PETScWrappers::SolverCG cg(solver_control);
1369 *   cg.set_prefix("user_");
1370 *  
1371 *   cg.solve(jacobian_matrix, dst, src, preconditioner);
1372 *  
1373 *   pcout << " " << solver_control.last_step() << " linear iterations."
1374 *   << std::endl;
1375 *   }
1376 *  
1377 *  
1378 * @endcode
1379 *
1380 *
1381 * <a name="step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1382 * <h4>The HeatEquation::prepare_for_coarsening_and_refinement() function</h4>
1383 *
1384
1385 *
1386 * The next block of functions deals with mesh refinement. We split this
1387 * process up into a "decide whether and what you want to refine" and a
1388 * "please transfer these vectors from old to new mesh" phase, where the first
1389 * also deals with marking cells for refinement. (The decision whether or
1390 * not to refine is done in the lambda function that calls the current
1391 * function.)
1392 *
1393
1394 *
1395 * Breaking things into a "mark cells" function and into a "execute mesh
1396 * adaptation and transfer solution vectors" function is awkward, though
1397 * conceptually not difficult to understand. These two pieces of code
1398 * should really be part of the same function, as they are in @ref step_26 "step-26".
1399 * The issue is with what PETScWrappers::TimeStepper provides us with in these
1400 * callbacks. Specifically, the "decide whether and what you want to refine"
1401 * callback has access to the current solution, and so can evaluate
1402 * (spatial) error estimators to decide which cells to refine. The
1403 * second callback that transfers vectors from old to new mesh gets a bunch
1404 * of vectors, but without the semantic information on which of these is
1405 * the current solution vector. As a consequence, it cannot do the marking
1406 * of cells for refinement or coarsening, and we have to do that from the
1407 * first callback.
1408 *
1409
1410 *
1411 * In practice, however, the problem is minor. The first of these
1412 * two functions is essentially identical to the
1413 * first half of the corresponding function in @ref step_26 "step-26", with the only
1414 * difference that it uses the parallel::distributed::GridRefinement namespace
1415 * function instead of the serial one. Once again, we make sure that we never
1416 * fall below the minimum refinement level, and above the maximum one, that we
1417 * can select from the parameter file.
1418 *
1419 * @code
1420 *   template <int dim>
1421 *   void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1422 *   const PETScWrappers::MPI::Vector &y)
1423 *   {
1424 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1425 *   locally_relevant_dofs,
1426 *   mpi_communicator);
1427 *   locally_relevant_solution = y;
1428 *  
1429 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1430 *   KellyErrorEstimator<dim>::estimate(dof_handler,
1431 *   QGauss<dim - 1>(fe.degree + 1),
1432 *   {},
1433 *   locally_relevant_solution,
1434 *   estimated_error_per_cell);
1435 *  
1437 *   triangulation, estimated_error_per_cell, 0.6, 0.4);
1438 *  
1439 *   const unsigned int max_grid_level =
1440 *   initial_global_refinement + max_delta_refinement_level;
1441 *   const unsigned int min_grid_level = initial_global_refinement;
1442 *  
1443 *   if (triangulation.n_levels() > max_grid_level)
1444 *   for (const auto &cell :
1445 *   triangulation.active_cell_iterators_on_level(max_grid_level))
1446 *   cell->clear_refine_flag();
1447 *   for (const auto &cell :
1448 *   triangulation.active_cell_iterators_on_level(min_grid_level))
1449 *   cell->clear_coarsen_flag();
1450 *   }
1451 *  
1452 *  
1453 * @endcode
1454 *
1455 *
1456 * <a name="step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1457 * <h4>The HeatEquation::transfer_solution_vectors_to_new_mesh() function</h4>
1458 *
1459
1460 *
1461 * The following function then is the second half of the correspond function
1462 * in @ref step_26 "step-26". It is called by the time stepper whenever it requires to
1463 * transfer the solution and any intermediate stage vectors to a new mesh. We
1464 * must make sure that all input vectors are transformed into ghosted vectors
1465 * before the actual transfer is executed, and that we distribute the hanging
1466 * node constraints on the output vectors as soon as we have interpolated the
1467 * vectors to the new mesh -- i.e., that all constraints are satisfied on
1468 * the vectors we transfer.
1469 *
1470
1471 *
1472 * We have no way to enforce boundary conditions at this stage, however. This
1473 * is because the various vectors may correspond to solutions at previous time
1474 * steps if the method used here is a multistep time integrator, and so may
1475 * correspond to different time points that we are not privy to.
1476 *
1477
1478 *
1479 * While this could be a problem if we used the values of the solution and of
1480 * the intermediate stages on the constrained degrees of freedom to compute
1481 * the errors, we do not do so. Instead, we compute the errors on the
1482 * differential equation, and ignore algebraic constraints; therefore we do no
1483 * need to guarantee that the boundary conditions are satisfied also in the
1484 * intermediate stages.
1485 *
1486
1487 *
1488 * We have at our disposal the hanging node current_constraints alone, though,
1489 * and therefore we enforce them on the output vectors, even if this is not
1490 * really needed.
1491 *
1492 * @code
1493 *   template <int dim>
1494 *   void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1495 *   const double time,
1496 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1497 *   std::vector<PETScWrappers::MPI::Vector> &all_out)
1498 *   {
1500 *   solution_trans(dof_handler);
1501 *  
1502 *   std::vector<PETScWrappers::MPI::Vector> all_in_ghosted(all_in.size());
1503 *   std::vector<const PETScWrappers::MPI::Vector *> all_in_ghosted_ptr(
1504 *   all_in.size());
1505 *   std::vector<PETScWrappers::MPI::Vector *> all_out_ptr(all_in.size());
1506 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1507 *   {
1508 *   all_in_ghosted[i].reinit(locally_owned_dofs,
1509 *   locally_relevant_dofs,
1510 *   mpi_communicator);
1511 *   all_in_ghosted[i] = all_in[i];
1512 *   all_in_ghosted_ptr[i] = &all_in_ghosted[i];
1513 *   }
1514 *  
1516 *   solution_trans.prepare_for_coarsening_and_refinement(all_in_ghosted_ptr);
1518 *  
1519 *   setup_system(time);
1520 *  
1521 *   all_out.resize(all_in.size());
1522 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1523 *   {
1524 *   all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1525 *   all_out_ptr[i] = &all_out[i];
1526 *   }
1527 *   solution_trans.interpolate(all_out_ptr);
1528 *  
1529 *   for (PETScWrappers::MPI::Vector &v : all_out)
1530 *   hanging_nodes_constraints.distribute(v);
1531 *   }
1532 *  
1533 *  
1534 * @endcode
1535 *
1536 *
1537 * <a name="step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1538 * <h4>The HeatEquation::update_current_constraints() function</h4>
1539 *
1540
1541 *
1542 * Since regenerating the constraints at each time step may be expensive, we
1543 * make sure that we only do so when the time changes. We track time change by
1544 * checking if the time of the boundary_values_function has changed, with
1545 * respect to the time of the last call to this function. This will work most
1546 * of the times, but not the very first time we call this function, since the
1547 * time then may be zero and the time of the `boundary_values_function` is
1548 * zero at construction time. We therefore also check if the number
1549 * constraints in `current_constraints`, and if these are empty, we regenerate
1550 * the constraints regardless of the time variable.
1551 *
1552 * @code
1553 *   template <int dim>
1554 *   void HeatEquation<dim>::update_current_constraints(const double time)
1555 *   {
1556 *   if (current_constraints.n_constraints() == 0 ||
1557 *   time != boundary_values_function.get_time())
1558 *   {
1559 *   TimerOutput::Scope t(computing_timer, "update current constraints");
1560 *  
1561 *   boundary_values_function.set_time(time);
1562 *   current_constraints.clear();
1563 *   current_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1564 *   current_constraints.merge(hanging_nodes_constraints);
1566 *   0,
1567 *   boundary_values_function,
1568 *   current_constraints);
1569 *   current_constraints.make_consistent_in_parallel(locally_owned_dofs,
1570 *   locally_relevant_dofs,
1571 *   mpi_communicator);
1572 *   current_constraints.close();
1573 *   }
1574 *   }
1575 *  
1576 *  
1577 * @endcode
1578 *
1579 *
1580 * <a name="step_86-TheHeatEquationrunfunction"></a>
1581 * <h4>The HeatEquation::run() function</h4>
1582 *
1583
1584 *
1585 * We have finally arrived at the main function of the class. At the top, it
1586 * creates the mesh and sets up the variables that make up the linear system:
1587 *
1588 * @code
1589 *   template <int dim>
1590 *   void HeatEquation<dim>::run()
1591 *   {
1593 *   triangulation.refine_global(initial_global_refinement);
1594 *  
1595 *   setup_system(/* time */ 0);
1596 *  
1597 * @endcode
1598 *
1599 * We then set up the time stepping object and associate the matrix we will
1600 * build whenever requested for both the Jacobian matrix (see the definition
1601 * above of what the "Jacobian" actually refers to) and for the matrix
1602 * that will be used as a preconditioner for the Jacobian.
1603 *
1604 * @code
1607 *   petsc_ts(time_stepper_data);
1608 *  
1609 *   petsc_ts.set_matrices(jacobian_matrix, jacobian_matrix);
1610 *  
1611 *  
1612 * @endcode
1613 *
1614 * The real work setting up the time stepping object starts here. As
1615 * discussed in the introduction, the way the PETScWrappers::TimeStepper
1616 * class is used is by inverting control: At the end of this function, we
1617 * will call PETScWrappers::TimeStepper::solve() which internally will
1618 * run the loop over time steps, and at the appropriate places *call back*
1619 * into user code for whatever functionality is required. What we need to
1620 * do is to hook up these callback functions by assigning appropriate
1621 * lambda functions to member variables of the `petsc_ts` object.
1622 *
1623
1624 *
1625 * We start by creating lambda functions that provide information about
1626 * the "implicit function" (i.e., that part of the right hand side of the
1627 * ODE system that we want to treat implicitly -- which in our case is
1628 * the entire right hand side), a function that assembles the Jacobian
1629 * matrix, and a function that solves a linear system with the Jacobian.
1630 *
1631 * @code
1632 *   petsc_ts.implicit_function = [&](const double time,
1633 *   const PETScWrappers::MPI::Vector &y,
1634 *   const PETScWrappers::MPI::Vector &y_dot,
1635 *   PETScWrappers::MPI::Vector &res) {
1636 *   this->implicit_function(time, y, y_dot, res);
1637 *   };
1638 *  
1639 *   petsc_ts.setup_jacobian = [&](const double time,
1640 *   const PETScWrappers::MPI::Vector &y,
1641 *   const PETScWrappers::MPI::Vector &y_dot,
1642 *   const double alpha) {
1643 *   this->assemble_implicit_jacobian(time, y, y_dot, alpha);
1644 *   };
1645 *  
1646 *   petsc_ts.solve_with_jacobian = [&](const PETScWrappers::MPI::Vector &src,
1647 *   PETScWrappers::MPI::Vector &dst) {
1648 *   this->solve_with_jacobian(src, dst);
1649 *   };
1650 *  
1651 * @endcode
1652 *
1653 * The next two callbacks deal with identifying and setting variables
1654 * that are considered "algebraic" (rather than "differential"), i.e., for
1655 * which we know what values they are supposed to have rather than letting
1656 * their values be determined by the differential equation. We need to
1657 * instruct the time stepper to ignore these components when computing the
1658 * error in the residuals, and we do so by first providing a function that
1659 * returns an IndexSet with the indices of these algebraic components of the
1660 * solution vector (or rather, that subset of the locally-owned part of the
1661 * vector that is algebraic, in case we are running in parallel). This first
1662 * of the following two functions does that. Specifically, both nodes at
1663 * which Dirichlet boundary conditions are applied, and hanging nodes are
1664 * algebraically constrained. This function then returns a set of
1665 * indices that is initially empty (but knows about the size of the index
1666 * space) and which we then construct as the union of boundary and hanging
1667 * node indices.
1668 *
1669
1670 *
1671 * Following this, we then also need a function that, given a solution
1672 * vector `y` and the current time, sets the algebraic components of that
1673 * vector to their correct value. This comes down to ensuring that we have
1674 * up to date constraints in the `constraints` variable, and then applying
1675 * these constraints to the solution vector via
1676 * AffineConstraints::distribute(). (It is perhaps worth noting that we
1677 * *could* have achieved the same in `solve_with_jacobian()`. Whenever the
1678 * time stepper solves a linear system, it follows up the call to the solver
1679 * by calling the callback to set algebraic components correct. We could
1680 * also have put the calls to `update_current_constraints()` and
1681 * `distribute()` into the `solve_with_jacobian` function, but by not doing
1682 * so, we can also replace the `solve_with_jacobian` function with a call to
1683 * a PETSc solver, and we would still have the current_constraints correctly
1684 * applied to the solution vector.)
1685 *
1686 * @code
1687 *   petsc_ts.algebraic_components = [&]() {
1688 *   IndexSet algebraic_set(dof_handler.n_dofs());
1689 *   algebraic_set.add_indices(DoFTools::extract_boundary_dofs(dof_handler));
1690 *   algebraic_set.add_indices(
1691 *   DoFTools::extract_hanging_node_dofs(dof_handler));
1692 *   return algebraic_set;
1693 *   };
1694 *  
1695 *   petsc_ts.update_constrained_components =
1696 *   [&](const double time, PETScWrappers::MPI::Vector &y) {
1697 *   TimerOutput::Scope t(computing_timer, "set algebraic components");
1698 *   update_current_constraints(time);
1699 *   current_constraints.distribute(y);
1700 *   };
1701 *  
1702 *  
1703 * @endcode
1704 *
1705 * The next two callbacks relate to mesh refinement. As discussed in the
1706 * introduction, PETScWrappers::TimeStepper knows how to deal with the
1707 * situation where we want to change the mesh. All we have to provide
1708 * is a callback that returns `true` if we are at a point where we want
1709 * to refine the mesh (and `false` otherwise) and that if we want to
1710 * do mesh refinement does some prep work for that in the form of
1711 * calling the `prepare_for_coarsening_and_refinement` function.
1712 *
1713
1714 *
1715 * If the first callback below returns `true`, then PETSc TS will
1716 * do some clean-up operations, and call the second of the
1717 * callback functions
1718 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`) with a
1719 * collection of vectors that need to be interpolated from the old
1720 * to the new mesh. This may include the current solution, perhaps
1721 * the current time derivative of the solution, and in the case of
1722 * [multistep time
1723 * integrators](https://en.wikipedia.org/wiki/Linear_multistep_method) also
1724 * the solutions of some previous time steps. We hand all of these over to
1725 * the `interpolate()` member function of this class.
1726 *
1727 * @code
1728 *   petsc_ts.decide_and_prepare_for_remeshing =
1729 *   [&](const double /* time */,
1730 *   const unsigned int step_number,
1731 *   const PETScWrappers::MPI::Vector &y) -> bool {
1732 *   if (step_number > 0 && this->mesh_adaptation_frequency > 0 &&
1733 *   step_number % this->mesh_adaptation_frequency == 0)
1734 *   {
1735 *   pcout << std::endl << "Adapting the mesh..." << std::endl;
1736 *   this->prepare_for_coarsening_and_refinement(y);
1737 *   return true;
1738 *   }
1739 *   else
1740 *   return false;
1741 *   };
1742 *  
1743 *   petsc_ts.transfer_solution_vectors_to_new_mesh =
1744 *   [&](const double time,
1745 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1746 *   std::vector<PETScWrappers::MPI::Vector> &all_out) {
1747 *   this->transfer_solution_vectors_to_new_mesh(time, all_in, all_out);
1748 *   };
1749 *  
1750 * @endcode
1751 *
1752 * The final callback is a "monitor" that is called in each
1753 * time step. Here we use it to create a graphical output. Perhaps a better
1754 * scheme would output the solution at fixed time intervals, rather
1755 * than in every time step, but this is not the main point of
1756 * this program and so we go with the easy approach:
1757 *
1758 * @code
1759 *   petsc_ts.monitor = [&](const double time,
1760 *   const PETScWrappers::MPI::Vector &y,
1761 *   const unsigned int step_number) {
1762 *   pcout << "Time step " << step_number << " at t=" << time << std::endl;
1763 *   this->output_results(time, step_number, y);
1764 *   };
1765 *  
1766 *  
1767 * @endcode
1768 *
1769 * With all of this out of the way, the rest of the function is
1770 * anticlimactic: We just have to initialize the solution vector with
1771 * the initial conditions and call the function that does the time
1772 * stepping, and everything else will happen automatically:
1773 *
1774 * @code
1775 *   PETScWrappers::MPI::Vector solution(locally_owned_dofs, mpi_communicator);
1776 *   VectorTools::interpolate(dof_handler, initial_value_function, solution);
1777 *  
1778 *   petsc_ts.solve(solution);
1779 *   }
1780 *   } // namespace Step86
1781 *  
1782 *  
1783 * @endcode
1784 *
1785 *
1786 * <a name="step_86-Themainfunction"></a>
1787 * <h3>The main() function</h3>
1788 *
1789
1790 *
1791 * The rest of the program is as it always looks. We read run-time parameters
1792 * from an input file via the ParameterAcceptor class in the same way as
1793 * we showed in @ref step_60 "step-60" and @ref step_70 "step-70".
1794 *
1795 * @code
1796 *   int main(int argc, char **argv)
1797 *   {
1798 *   try
1799 *   {
1800 *   using namespace Step86;
1801 *  
1802 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1803 *   HeatEquation<2> heat_equation_solver(MPI_COMM_WORLD);
1804 *  
1805 *   const std::string input_filename =
1806 *   (argc > 1 ? argv[1] : "heat_equation.prm");
1807 *   ParameterAcceptor::initialize(input_filename, "heat_equation_used.prm");
1808 *   heat_equation_solver.run();
1809 *   }
1810 *   catch (std::exception &exc)
1811 *   {
1812 *   std::cerr << std::endl
1813 *   << std::endl
1814 *   << "----------------------------------------------------"
1815 *   << std::endl;
1816 *   std::cerr << "Exception on processing: " << std::endl
1817 *   << exc.what() << std::endl
1818 *   << "Aborting!" << std::endl
1819 *   << "----------------------------------------------------"
1820 *   << std::endl;
1821 *  
1822 *   return 1;
1823 *   }
1824 *   catch (...)
1825 *   {
1826 *   std::cerr << std::endl
1827 *   << std::endl
1828 *   << "----------------------------------------------------"
1829 *   << std::endl;
1830 *   std::cerr << "Unknown exception!" << std::endl
1831 *   << "Aborting!" << std::endl
1832 *   << "----------------------------------------------------"
1833 *   << std::endl;
1834 *   return 1;
1835 *   }
1836 *  
1837 *   return 0;
1838 *   }
1839 * @endcode
1840<a name="step_86-Results"></a><h1>Results</h1>
1841
1842
1843When you run this program with the input file as is, you get output as follows:
1844@code
1845
1846Number of active cells: 768
1847Number of degrees of freedom: 833
1848
1849Time step 0 at t=0
1850 5 linear iterations.
1851 8 linear iterations.
1852Time step 1 at t=0.025
1853 6 linear iterations.
1854Time step 2 at t=0.05
1855 5 linear iterations.
1856 8 linear iterations.
1857Time step 3 at t=0.075
1858 6 linear iterations.
1859Time step 4 at t=0.1
1860 6 linear iterations.
1861Time step 5 at t=0.125
1862 6 linear iterations.
1863Time step 6 at t=0.15
1864 6 linear iterations.
1865Time step 7 at t=0.175
1866 6 linear iterations.
1867Time step 8 at t=0.2
1868 6 linear iterations.
1869Time step 9 at t=0.225
1870 6 linear iterations.
1871
1872Adapting the mesh...
1873
1874Number of active cells: 1050
1875Number of degrees of freedom: 1155
1876
1877Time step 10 at t=0.25
1878 5 linear iterations.
1879 8 linear iterations.
1880Time step 11 at t=0.275
1881 5 linear iterations.
1882 7 linear iterations.
1883
1884[...]
1885
1886Time step 195 at t=4.875
1887 6 linear iterations.
1888Time step 196 at t=4.9
1889 6 linear iterations.
1890Time step 197 at t=4.925
1891 6 linear iterations.
1892Time step 198 at t=4.95
1893 6 linear iterations.
1894Time step 199 at t=4.975
1895 5 linear iterations.
1896
1897Adapting the mesh...
1898
1899Number of active cells: 1380
1900Number of degrees of freedom: 1547
1901
1902Time step 200 at t=5
1903
1904
1905+---------------------------------------------+------------+------------+
1906| Total wallclock time elapsed since start | 43.2s | |
1907| | | |
1908| Section | no. calls | wall time | % of total |
1909+---------------------------------+-----------+------------+------------+
1910| assemble implicit Jacobian | 226 | 9.93s | 23% |
1911| implicit function | 426 | 16.2s | 37% |
1912| output results | 201 | 9.74s | 23% |
1913| set algebraic components | 200 | 0.0496s | 0.11% |
1914| setup system | 21 | 0.799s | 1.8% |
1915| solve with Jacobian | 226 | 0.56s | 1.3% |
1916| update current constraints | 201 | 1.53s | 3.5% |
1917+---------------------------------+-----------+------------+------------+
1918
1919@endcode
1920
1921We can generate output for this in the form of a video:
1922@htmlonly
1923<p align="center">
1924 <iframe width="560" height="315" src="https://www.youtube.com/embed/fhJVkcdRksM"
1925 frameborder="0"
1926 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1927 allowfullscreen></iframe>
1928 </p>
1929@endhtmlonly
1930The solution here is driven by boundary values (the initial conditions are zero,
1931and so is the right hand side of the equation). It takes a little bit of time
1932for the boundary values to diffuse into the domain, and so the temperature
1933(the solution of the heat equation) in the interior of the domain has a slight
1934lag compared to the temperature at the boundary.
1935
1936
1937The more interesting component of this program is how easy it is to play with
1938the details of the time stepping algorithm. Recall that the solution above
1939is controlled by the following parameters:
1940@code
1941 subsection Time stepper
1942 subsection Running parameters
1943 set final time = 5
1944 set initial step size = 0.025
1945 set initial time = 0
1946 set match final time = false
1947 set maximum number of steps = -1
1948 set options prefix =
1949 set solver type = beuler
1950 end
1951
1952 subsection Error control
1953 set absolute error tolerance = -1
1954 set adaptor type = none
1955 set ignore algebraic lte = true
1956 set maximum step size = -1
1957 set minimum step size = -1
1958 set relative error tolerance = -1
1959 end
1960 end
1961@endcode
1962Of particular interest for us here is to set the time stepping algorithm
1963and the adaptive time step control method. The latter is set to "none"
1964above, but there are
1965[several alternative choices](https://petsc.org/release/manualpages/TS/TSAdaptType/)
1966for this parameter. For example, we can set parameters as follows,
1967@code
1968 subsection Time stepper
1969 subsection Running parameters
1970 set final time = 5
1971 set initial step size = 0.025
1972 set initial time = 0
1973 set match final time = false
1974 set maximum number of steps = -1
1975 set options prefix =
1976 set solver type = bdf
1977 end
1978
1979 subsection Error control
1980 set absolute error tolerance = 1e-2
1981 set relative error tolerance = 1e-2
1982 set adaptor type = basic
1983 set ignore algebraic lte = true
1984 set maximum step size = 1
1985 set minimum step size = 0.01
1986 end
1987 end
1988@endcode
1989What we do here is set the initial time step size to 0.025, and choose relatively
1990large absolute and relative error tolerances of 0.01 for the time step size
1991adaptation algorithm (for which we choose "basic"). We ask PETSc TS to use a
1992[Backward Differentiation Formula (BDF)](https://en.wikipedia.org/wiki/Backward_differentiation_formula)
1993method, and we get the following as output:
1994@code
1995
1996===========================================
1997Number of active cells: 768
1998Number of degrees of freedom: 833
1999
2000Time step 0 at t=0
2001 5 linear iterations.
2002 5 linear iterations.
2003 5 linear iterations.
2004 4 linear iterations.
2005 6 linear iterations.
2006Time step 1 at t=0.01
2007 5 linear iterations.
2008 5 linear iterations.
2009Time step 2 at t=0.02
2010 5 linear iterations.
2011 5 linear iterations.
2012Time step 3 at t=0.03
2013 5 linear iterations.
2014 5 linear iterations.
2015Time step 4 at t=0.042574
2016 5 linear iterations.
2017 5 linear iterations.
2018Time step 5 at t=0.0588392
2019 5 linear iterations.
2020 5 linear iterations.
2021Time step 6 at t=0.0783573
2022 5 linear iterations.
2023 7 linear iterations.
2024 5 linear iterations.
2025 7 linear iterations.
2026Time step 7 at t=0.100456
2027 5 linear iterations.
2028 7 linear iterations.
2029 5 linear iterations.
2030 7 linear iterations.
2031Time step 8 at t=0.124982
2032 5 linear iterations.
2033 8 linear iterations.
2034 5 linear iterations.
2035 7 linear iterations.
2036Time step 9 at t=0.153156
2037 6 linear iterations.
2038 5 linear iterations.
2039
2040[...]
2041
2042Time step 206 at t=4.96911
2043 5 linear iterations.
2044 5 linear iterations.
2045Time step 207 at t=4.99398
2046 5 linear iterations.
2047 5 linear iterations.
2048Time step 208 at t=5.01723
2049
2050
2051+---------------------------------------------+------------+------------+
2052| Total wallclock time elapsed since start | 117s | |
2053| | | |
2054| Section | no. calls | wall time | % of total |
2055+---------------------------------+-----------+------------+------------+
2056| assemble implicit Jacobian | 593 | 35.6s | 31% |
2057| implicit function | 1101 | 56.3s | 48% |
2058| output results | 209 | 12.5s | 11% |
2059| set algebraic components | 508 | 0.156s | 0.13% |
2060| setup system | 21 | 1.11s | 0.95% |
2061| solve with Jacobian | 593 | 1.97s | 1.7% |
2062| update current constraints | 509 | 4.53s | 3.9% |
2063+---------------------------------+-----------+------------+------------+
2064@endcode
2065What is happening here is that apparently PETSc TS is not happy with our
2066choice of initial time step size, and after several linear solves has
2067reduced it to the minimum we allow it to, 0.01. The following time steps
2068then run at a time step size of 0.01 until it decides to make it slightly
2069larger again and (apparently) switches to a higher order method that
2070requires more linear solves per time step but allows for a larger time
2071step closer to our initial choice 0.025 again. It does not quite hit the
2072final time of @f$T=5@f$ with its time step choices, but we've got only
2073ourselves to blame for that by setting
2074@code
2075 set match final time = false
2076@endcode
2077in the input file.
2078
2079Not all combinations of methods, time step adaptation algorithms, and
2080other parameters are valid, but the main messages from the experiment
2081above that you should take away are:
2082- It would undoubtedly be quite time consuming to implement many of the
2083 methods that PETSc TS offers for time stepping -- but with a program
2084 such as the one here, we don't need to: We can just select from the
2085 many methods PETSc TS already has.
2086- Adaptive time step control is difficult; adaptive choice of which
2087 method or order to choose is perhaps even more difficult. None of the
2088 time dependent programs that came before the current one (say, @ref step_23 "step-23",
2089 @ref step_26 "step-26", @ref step_31 "step-31", @ref step_58 "step-58", and a good number of others) have either.
2090 Moreover, while deal.II is good at spatial adaptation of meshes, it
2091 is not a library written by experts in time step adaptation, and so will
2092 likely not gain this ability either. But, again, it doesn't
2093 have to: We can rely on a library written by experts in that area.
2094
2095
2096
2097<a name="step-86-extensions"></a>
2098<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2099
2100
2101The program actually runs in parallel, even though we have not used
2102that above. Specifically, if you have configured deal.II to use MPI,
2103then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2104program with 8 processes.
2105
2106For the program as currently written (and in debug mode), this makes
2107little difference: It will run about twice as fast, but take about 8
2108times as much CPU time. That is because the problem is just so small:
2109Generally between 1000 and 2000 degrees of freedom. A good rule of
2110thumb is that programs really only benefit from parallel computing if
2111you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2112process*. But it is not difficult to adapt the program at hand here to
2113run with a much finer mesh, or perhaps in 3d, so that one is beyond
2114that limit and sees the benefits of parallel computing.
2115
2116 *
2117 *
2118<a name="step_86-PlainProg"></a>
2119<h1> The plain program</h1>
2120@include "step-86.cc"
2121*/
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
unsigned int n_active_cells() const
void refine_global(const unsigned int times=1)
unsigned int n_levels() const
virtual void execute_coarsening_and_refinement() override
Definition tria.cc:3320
virtual bool prepare_coarsening_and_refinement() override
Definition tria.cc:2805
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
__global__ void set(Number *val, const Number s, const size_type N)
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:64
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > &times_and_names)
IndexSet extract_boundary_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask={}, const std::set< types::boundary_id > &boundary_ids={})
Definition dof_tools.cc:614
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_hanging_node_dofs(const DoFHandler< dim, spacedim > &dof_handler)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
void interpolate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const ComponentMask &component_mask={})
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(& functions)(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
STL namespace.
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation