deal.II version GIT relicensing-1721-g8100761196 2024-08-31 12:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-86.h
Go to the documentation of this file.
1 0.0,
797 *   /* end time */ 1.0,
798 *   /* initial time step */ 0.025)
799 *   , initial_global_refinement(5)
800 *   , max_delta_refinement_level(2)
801 *   , mesh_adaptation_frequency(0)
802 *   , right_hand_side_function("/Heat Equation/Right hand side")
803 *   , initial_value_function("/Heat Equation/Initial value")
804 *   , boundary_values_function("/Heat Equation/Boundary values")
805 *   {
806 *   enter_subsection("Time stepper");
807 *   {
808 *   enter_my_subsection(this->prm);
809 *   {
810 *   time_stepper_data.add_parameters(this->prm);
811 *   }
812 *   leave_my_subsection(this->prm);
813 *   }
814 *   leave_subsection();
815 *  
816 *   add_parameter("Initial global refinement",
817 *   initial_global_refinement,
818 *   "Number of times the mesh is refined globally before "
819 *   "starting the time stepping.");
820 *   add_parameter("Maximum delta refinement level",
821 *   max_delta_refinement_level,
822 *   "Maximum number of local refinement levels.");
823 *   add_parameter("Mesh adaptation frequency",
824 *   mesh_adaptation_frequency,
825 *   "When to adapt the mesh.");
826 *   }
827 *  
828 *  
829 *  
830 * @endcode
831 *
832 *
833 * <a name="step_86-TheHeatEquationsetup_systemfunction"></a>
834 * <h4>The HeatEquation::setup_system() function</h4>
835 *
836
837 *
838 * This function is not very different from what we do in many other programs
839 * (including @ref step_26 "step-26"). We enumerate degrees of freedom, output some
840 * information about then, build constraint objects (recalling that we
841 * put hanging node constraints into their separate object), and then
842 * also build an AffineConstraint object that contains both the hanging
843 * node constraints as well as constraints corresponding to zero Dirichlet
844 * boundary conditions. This last object is needed since we impose the
845 * constraints through algebraic equations. While technically it would be
846 * possible to use the time derivative of the boundary function as a boundary
847 * conditions for the time derivative of the solution, this is not done here.
848 * Instead, we impose the boundary conditions through algebraic equations, and
849 * therefore the time derivative of the boundary conditions is not part of
850 * the algebraic system, and we need zero boundary conditions on the time
851 * derivative of the solution when computing the residual. We use the
852 * `homogeneous_constraints` object for this purpose.
853 *
854
855 *
856 * Finally, we create the actual non-homogeneous `current_constraints` by
857 * calling `update_current_constraints). These are also used during the
858 * assembly and during the residual evaluation.
859 *
860 * @code
861 *   template <int dim>
862 *   void HeatEquation<dim>::setup_system(const double time)
863 *   {
864 *   TimerOutput::Scope t(computing_timer, "setup system");
865 *  
866 *   dof_handler.distribute_dofs(fe);
867 *   pcout << std::endl
868 *   << "Number of active cells: " << triangulation.n_active_cells()
869 *   << std::endl
870 *   << "Number of degrees of freedom: " << dof_handler.n_dofs()
871 *   << std::endl
872 *   << std::endl;
873 *  
874 *   locally_owned_dofs = dof_handler.locally_owned_dofs();
875 *   locally_relevant_dofs =
877 *  
878 *  
879 *   hanging_nodes_constraints.clear();
880 *   hanging_nodes_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
882 *   hanging_nodes_constraints);
883 *   hanging_nodes_constraints.make_consistent_in_parallel(locally_owned_dofs,
884 *   locally_relevant_dofs,
885 *   mpi_communicator);
886 *   hanging_nodes_constraints.close();
887 *  
888 *  
889 *   homogeneous_constraints.clear();
890 *   homogeneous_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
891 *   homogeneous_constraints.merge(hanging_nodes_constraints);
893 *   0,
895 *   homogeneous_constraints);
896 *   homogeneous_constraints.make_consistent_in_parallel(locally_owned_dofs,
897 *   locally_relevant_dofs,
898 *   mpi_communicator);
899 *   homogeneous_constraints.close();
900 *  
901 *  
902 *   update_current_constraints(time);
903 *  
904 *  
905 * @endcode
906 *
907 * The final block of code resets and initializes the matrix object with
908 * the appropriate sparsity pattern. Recall that we do not store solution
909 * vectors in this class (the time integrator object does that internally)
910 * and so do not have to resize and initialize them either.
911 *
912 * @code
913 *   DynamicSparsityPattern dsp(locally_relevant_dofs);
914 *   DoFTools::make_sparsity_pattern(dof_handler,
915 *   dsp,
916 *   homogeneous_constraints,
917 *   false);
919 *   locally_owned_dofs,
920 *   mpi_communicator,
921 *   locally_relevant_dofs);
922 *  
923 *   jacobian_matrix.reinit(locally_owned_dofs,
924 *   locally_owned_dofs,
925 *   dsp,
926 *   mpi_communicator);
927 *   }
928 *  
929 *  
930 * @endcode
931 *
932 *
933 * <a name="step_86-TheHeatEquationoutput_resultsfunction"></a>
934 * <h4>The HeatEquation::output_results() function</h4>
935 *
936
937 *
938 * This function is called from "monitor" function that is called in turns
939 * by the time stepper in each time step. We use it to write the solution to a
940 * file, and provide graphical output through paraview or visit. We also write
941 * a pvd file, which groups all metadata about the `.vtu` files into a single
942 * file that can be used to load the full time dependent solution in paraview.
943 *
944 * @code
945 *   template <int dim>
946 *   void HeatEquation<dim>::output_results(const double time,
947 *   const unsigned int timestep_number,
948 *   const PETScWrappers::MPI::Vector &y)
949 *   {
950 *   TimerOutput::Scope t(computing_timer, "output results");
951 *  
952 *   DataOut<dim> data_out;
953 *   data_out.attach_dof_handler(dof_handler);
954 *   data_out.add_data_vector(y, "U");
955 *   data_out.build_patches();
956 *  
957 *   data_out.set_flags(DataOutBase::VtkFlags(time, timestep_number));
958 *  
959 *   const std::string filename =
960 *   "solution-" + Utilities::int_to_string(timestep_number, 3) + ".vtu";
961 *   data_out.write_vtu_in_parallel(filename, mpi_communicator);
962 *  
963 *   if (Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
964 *   {
965 *   static std::vector<std::pair<double, std::string>> times_and_names;
966 *   times_and_names.emplace_back(time, filename);
967 *  
968 *   std::ofstream pvd_output("solution.pvd");
969 *   DataOutBase::write_pvd_record(pvd_output, times_and_names);
970 *   }
971 *   }
972 *  
973 *  
974 *  
975 * @endcode
976 *
977 *
978 * <a name="step_86-TheHeatEquationimplicit_functionfunction"></a>
979 * <h4>The HeatEquation::implicit_function() function</h4>
980 *
981
982 *
983 * As discussed in the introduction, we describe the ODE system to the time
984 * stepper via its residual,
985 * @f[
986 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
987 * @f]
988 * The following function computes it, given vectors for @f$U,\dot U@f$ that
989 * we will denote by `y` and `y_dot` because that's how they are called
990 * in the documentation of the PETScWrappers::TimeStepper class.
991 *
992
993 *
994 * At the top of the function, we do the usual set up when computing
995 * integrals. We face two minor difficulties here: the first is that
996 * the `y` and `y_dot` vectors we get as input are read only, but we
997 * need to make sure they satisfy the correct boundary conditions and so
998 * have to set elements in these vectors. The second is that we need to
999 * compute the residual, and therefore in general we need to evaluate solution
1000 * values and gradients inside locally owned cells, and for this need access
1001 * to degrees of freedom which may be owned by neighboring processors. To
1002 * address these issues, we create (non-ghosted) writable copies of the input
1003 * vectors, apply boundary conditions and hanging node current_constraints;
1004 * and then copy these vectors to ghosted vectors before we can do anything
1005 * sensible with them.
1006 *
1007 * @code
1008 *   template <int dim>
1009 *   void
1010 *   HeatEquation<dim>::implicit_function(const double time,
1011 *   const PETScWrappers::MPI::Vector &y,
1012 *   const PETScWrappers::MPI::Vector &y_dot,
1013 *   PETScWrappers::MPI::Vector &residual)
1014 *   {
1015 *   TimerOutput::Scope t(computing_timer, "implicit function");
1016 *  
1017 *   PETScWrappers::MPI::Vector tmp_solution(locally_owned_dofs,
1018 *   mpi_communicator);
1019 *   PETScWrappers::MPI::Vector tmp_solution_dot(locally_owned_dofs,
1020 *   mpi_communicator);
1021 *   tmp_solution = y;
1022 *   tmp_solution_dot = y_dot;
1023 *  
1024 *   update_current_constraints(time);
1025 *   current_constraints.distribute(tmp_solution);
1026 *   homogeneous_constraints.distribute(tmp_solution_dot);
1027 *  
1028 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1029 *   locally_relevant_dofs,
1030 *   mpi_communicator);
1031 *   PETScWrappers::MPI::Vector locally_relevant_solution_dot(
1032 *   locally_owned_dofs, locally_relevant_dofs, mpi_communicator);
1033 *   locally_relevant_solution = tmp_solution;
1034 *   locally_relevant_solution_dot = tmp_solution_dot;
1035 *  
1036 *  
1037 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1038 *   FEValues<dim> fe_values(fe,
1039 *   quadrature_formula,
1040 *   update_values | update_gradients |
1041 *   update_quadrature_points | update_JxW_values);
1042 *  
1043 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1044 *   const unsigned int n_q_points = quadrature_formula.size();
1045 *  
1046 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1047 *  
1048 *   std::vector<Tensor<1, dim>> solution_gradients(n_q_points);
1049 *   std::vector<double> solution_dot_values(n_q_points);
1050 *  
1051 *   Vector<double> cell_residual(dofs_per_cell);
1052 *  
1053 *   right_hand_side_function.set_time(time);
1054 *  
1055 * @endcode
1056 *
1057 * Now for computing the actual residual. Recall that we wan to compute the
1058 * vector
1059 * @f[
1060 * R(t,U,\dot U) = M \frac{\partial U(t)}{\partial t} + AU(t) - F(t).
1061 * @f]
1062 * We could do that by actually forming the matrices @f$M@f$ and @f$A@f$, but this
1063 * is not efficient. Instead, recall (by writing out how the elements of
1064 * @f$M@f$ and @f$A@f$ are defined, and exchanging integrals and sums) that the
1065 * @f$i@f$th element of the residual vector is given by
1066 * @f{align*}{
1067 * R(t,U,\dot U)_i
1068 * &= \sum_j \int_\Omega \varphi_i(\mathbf x, t) \varphi_j(\mathbf x, t)
1069 * {\partial U_j(t)}{\partial t}
1070 * + \sum_j \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1071 * \varphi_j(\mathbf x, t) U_j(t)
1072 * - \int_\Omega \varphi_i f(\mathbf x, t)
1073 * \\ &=
1074 * \int_\Omega \varphi_i(\mathbf x, t) u_h(\mathbf x, t)
1075 * + \int_\Omega \nabla \varphi_i(\mathbf x, t) \cdot \nabla
1076 * u_h(\mathbf x, t)
1077 * - \int_\Omega \varphi_i f(\mathbf x, t).
1078 * @f}
1079 * We can compute these integrals efficiently by breaking them up into
1080 * a sum over all cells and then applying quadrature. For the integrand,
1081 * we need to evaluate the solution and its gradient at the quadrature
1082 * points within each locally owned cell, and for this, we need also
1083 * access to degrees of freedom that may be owned by neighboring
1084 * processors. We therefore use the locally_relevant_solution and and
1085 * locally_relevant_solution_dot vectors.
1086 *
1087 * @code
1088 *   residual = 0;
1089 *   for (const auto &cell : dof_handler.active_cell_iterators())
1090 *   if (cell->is_locally_owned())
1091 *   {
1092 *   fe_values.reinit(cell);
1093 *  
1094 *   fe_values.get_function_gradients(locally_relevant_solution,
1095 *   solution_gradients);
1096 *   fe_values.get_function_values(locally_relevant_solution_dot,
1097 *   solution_dot_values);
1098 *  
1099 *   cell->get_dof_indices(local_dof_indices);
1100 *  
1101 *   cell_residual = 0;
1102 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1103 *   for (const unsigned int i : fe_values.dof_indices())
1104 *   {
1105 *   cell_residual(i) +=
1106 *   (fe_values.shape_value(i, q) * // [phi_i(x_q) *
1107 *   solution_dot_values[q] // u(x_q)
1108 *   + // +
1109 *   fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1110 *   solution_gradients[q] // grad u(x_q)
1111 *   - // -
1112 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1113 *   right_hand_side_function.value(
1114 *   fe_values.quadrature_point(q))) // f(x_q)]
1115 *   * fe_values.JxW(q); // * dx
1116 *   }
1117 *   current_constraints.distribute_local_to_global(cell_residual,
1118 *   local_dof_indices,
1119 *   residual);
1120 *   }
1121 *   residual.compress(VectorOperation::add);
1122 *  
1123 * @endcode
1124 *
1125 * The end result of the operations above is a vector that contains the
1126 * residual vector, having taken into account the constraints due to
1127 * hanging nodes and Dirichlet boundary conditions (by virtue of having
1128 * used `current_constraints.distribute_local_to_global()` to add the
1129 * local contributions to the global vector. At the end of the day, the
1130 * residual vector @f$r@f$ will be used in the solution of linear systems
1131 * of the form @f$J z = r@f$ with the "Jacobian" matrix that we define
1132 * below. We want to achieve that for algebraic components, the algebraic
1133 * components of @f$z@f$ have predictable values that achieve the purposes
1134 * discussed in the following. We do this by ensuring that the entries
1135 * corresponding to algebraic components in the residual @f$r@f$ have specific
1136 * values, and then we will do the same in the next function for the
1137 * matrix; for this, you will have to know that the rows and columns
1138 * of the matrix corresponding to constrained entries are zero with the
1139 * exception of the diagonal entries. We will manually set that diagonal
1140 * entry to one, and so @f$z_i=r_i@f$ for algebraic components.
1141 *
1142
1143 *
1144 * From the point of view of the residual vector, if the input `y`
1145 * vector does not contain the correct values on constrained degrees of
1146 * freedom (hanging nodes or boundary conditions), we need to communicate
1147 * this to the time stepper, and we do so by setting the residual to the
1148 * actual difference between the input `y` vector and the our local copy of
1149 * it, in which we have applied the constraints (see the top of the
1150 * function where we called `current_constraints.distribute(tmp_solution)`
1151 * and a similar operation on the time derivative). Since we have made a
1152 * copy of the input vector for this purpose, we use it to compute the
1153 * residual value. However, there is a difference between hanging nodes
1154 * constraints and boundary conditions: we do not want to make hanging node
1155 * constraints actually depend on their dependent degrees of freedom, since
1156 * this would imply that we are actually solving for the dependent degrees
1157 * of freedom. This is not what we are actually doing, however, since
1158 * hanging nodes are not actually solved for. They are eliminated from the
1159 * system by the call to AffineConstraints::distribute_local_to_global()
1160 * above. From the point of view of the Jacobian matrix, we are effectively
1161 * setting hanging nodes to an artificial value (usually zero), and we
1162 * simply want to make sure that we solve for those degrees of freedom a
1163 * posteriori, by calling the function AffineConstraints::distribute().
1164 *
1165
1166 *
1167 * Here we therefore check that the residual is equal to the input value on
1168 * the constrained dofs corresponding to hanging nodes (i.e., those for
1169 * which the lines of the `current_constraints` contain at least one other
1170 * entry), and to the difference between the input vector and the actual
1171 * solution on those constraints that correspond to boundary conditions.
1172 *
1173 * @code
1174 *   for (const auto &c : current_constraints.get_lines())
1175 *   if (locally_owned_dofs.is_element(c.index))
1176 *   {
1177 *   if (c.entries.empty()) /* no dependencies -> a Dirichlet node */
1178 *   residual[c.index] = y[c.index] - tmp_solution[c.index];
1179 *   else /* has dependencies -> a hanging node */
1180 *   residual[c.index] = y[c.index];
1181 *   }
1182 *   residual.compress(VectorOperation::insert);
1183 *   }
1184 *  
1185 *  
1186 * @endcode
1187 *
1188 *
1189 * <a name="step_86-TheHeatEquationassemble_implicit_jacobianfunction"></a>
1190 * <h4>The HeatEquation::assemble_implicit_jacobian() function</h4>
1191 *
1192
1193 *
1194 * The next operation is to compute the "Jacobian", which PETSc TS defines
1195 * as the matrix
1196 * @f[
1197 * J_\alpha = \dfrac{\partial R}{\partial y} + \alpha \dfrac{\partial
1198 * R}{\partial \dot y}
1199 * @f]
1200 * which, for the current linear problem, is simply
1201 * @f[
1202 * J_\alpha = A + \alpha M
1203 * @f]
1204 * and which is in particular independent of time and the current solution
1205 * vectors @f$y@f$ and @f$\dot y@f$.
1206 *
1207
1208 *
1209 * Having seen the assembly of matrices before, there is little that should
1210 * surprise you in the actual assembly here:
1211 *
1212 * @code
1213 *   template <int dim>
1214 *   void HeatEquation<dim>::assemble_implicit_jacobian(
1215 *   const double /* time */,
1216 *   const PETScWrappers::MPI::Vector & /* y */,
1217 *   const PETScWrappers::MPI::Vector & /* y_dot */,
1218 *   const double alpha)
1219 *   {
1220 *   TimerOutput::Scope t(computing_timer, "assemble implicit Jacobian");
1221 *  
1222 *   const QGauss<dim> quadrature_formula(fe.degree + 1);
1223 *   FEValues<dim> fe_values(fe,
1224 *   quadrature_formula,
1225 *   update_values | update_gradients |
1226 *   update_quadrature_points | update_JxW_values);
1227 *  
1228 *   const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
1229 *  
1230 *   std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
1231 *  
1232 *   FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
1233 *  
1234 *   jacobian_matrix = 0;
1235 *   for (const auto &cell : dof_handler.active_cell_iterators())
1236 *   if (cell->is_locally_owned())
1237 *   {
1238 *   fe_values.reinit(cell);
1239 *  
1240 *   cell->get_dof_indices(local_dof_indices);
1241 *  
1242 *   cell_matrix = 0;
1243 *   for (const unsigned int q : fe_values.quadrature_point_indices())
1244 *   for (const unsigned int i : fe_values.dof_indices())
1245 *   for (const unsigned int j : fe_values.dof_indices())
1246 *   {
1247 *   cell_matrix(i, j) +=
1248 *   (fe_values.shape_grad(i, q) * // grad phi_i(x_q) *
1249 *   fe_values.shape_grad(j, q) // grad phi_j(x_q)
1250 *   + alpha *
1251 *   fe_values.shape_value(i, q) * // phi_i(x_q) *
1252 *   fe_values.shape_value(j, q) // phi_j(x_q)
1253 *   ) *
1254 *   fe_values.JxW(q); // * dx
1255 *   }
1256 *   current_constraints.distribute_local_to_global(cell_matrix,
1257 *   local_dof_indices,
1258 *   jacobian_matrix);
1259 *   }
1260 *   jacobian_matrix.compress(VectorOperation::add);
1261 *  
1262 * @endcode
1263 *
1264 * The only interesting part is the following. Recall that we modified the
1265 * residual vector's entries corresponding to the algebraic components of
1266 * the solution in the previous function. The outcome of calling
1267 * `current_constraints.distribute_local_to_global()` a few lines
1268 * above is that the global matrix has zero rows and columns for the
1269 * algebraic (constrained) components of the solution; the function
1270 * puts a value on the diagonal that is nonzero and has about the
1271 * same size as the remaining diagonal entries of the matrix. What
1272 * this diagonal value is is unknown to us -- in other cases where
1273 * we call `current_constraints.distribute_local_to_global()` on
1274 * both the left side matrix and the right side vector, as in most
1275 * other tutorial programs, the matrix diagonal entries and right
1276 * hand side values are chosen in such a way that the result of
1277 * solving a linear system is what we want it to be, but the scaling
1278 * is done automatically.
1279 *
1280
1281 *
1282 * This is not good enough for us here, because we are building the
1283 * right hand side independently from the matrix in different functions.
1284 * Thus, for any constrained degree of freedom, we set the diagonal of the
1285 * Jacobian to be one. This leaves the Jacobian matrix invertible,
1286 * consistent with what the time stepper expects, and it also makes sure
1287 * that if we did not make a mistake in the residual and/or in the Jacbian
1288 * matrix, then asking the time stepper to check the Jacobian with a finite
1289 * difference method will produce the correct result. This can be activated
1290 * at run time via passing the `-snes_test_jacobian` option on the command
1291 * line.
1292 *
1293 * @code
1294 *   for (const auto &c : current_constraints.get_lines())
1295 *   jacobian_matrix.set(c.index, c.index, 1.0);
1296 *   jacobian_matrix.compress(VectorOperation::insert);
1297 *   }
1298 *  
1299 *  
1300 * @endcode
1301 *
1302 *
1303 * <a name="step_86-TheHeatEquationsolve_with_jacobianfunction"></a>
1304 * <h4>The HeatEquation::solve_with_jacobian() function</h4>
1305 *
1306
1307 *
1308 * This is the function that actually solves the linear system with the
1309 * Jacobian matrix we have previously built (in a call to the previous
1310 * function during the current time step or another earlier one -- time
1311 * steppers are quite sophisticated in determining internally whether it is
1312 * necessary to update the Jacobian matrix, or whether one can reuse it for
1313 * another time step without rebuilding it; this is similar to how one can
1314 * re-use the Newton matrix for several Newton steps, see for example the
1315 * discussion in @ref step_77 "step-77"). We could in principle not provide this function to
1316 * the time stepper, and instead select a specific solver on the command line
1317 * by using the `-ksp_*` options of PETSc. However, by providing this
1318 * function, we can use a specific solver and preconditioner for the linear
1319 * system, and still have the possibility to change them on the command line.
1320 *
1321
1322 *
1323 * Providing a specific solver is more in line with the way we usually do
1324 * things in other deal.II examples, while letting PETSc choose a generic
1325 * solver, and changing it on the command line via `-ksp_type` is more in line
1326 * with the way PETSc is usually used, and it can be a convenient approach
1327 * when we are experimenting to find an optimal solver for our problem. Both
1328 * options are available here, since we can still change both the solver and
1329 * the preconditioner on the command line via `-user_ksp_type` and
1330 * `-user_pc_type` options.
1331 *
1332
1333 *
1334 * In any case, recall that the Jacobian we built in the previous function is
1335 * always of the form
1336 * @f[
1337 * J_\alpha = \alpha M + A
1338 * @f]
1339 * where @f$M@f$ is a mass matrix and @f$A@f$ a Laplace matrix. @f$M@f$ is symmetric and
1340 * positive definite; @f$A@f$ is symmetric and at least positive semidefinite;
1341 * @f$\alpha> 0@f$. As a consequence, the Jacobian matrix is a symmetric and
1342 * positive definite matrix, which we can efficiently solve with the Conjugate
1343 * Gradient method, along with either SSOR or (if available) the algebraic
1344 * multigrid implementation provided by PETSc (via the Hypre package) as
1345 * preconditioner. In practice, if you wanted to solve "real" problems, one
1346 * would spend some time finding which preconditioner is optimal, perhaps
1347 * using PETSc's ability to read solver and preconditioner choices from the
1348 * command line. But this is not the focus of this tutorial program, and so
1349 * we just go with the following:
1350 *
1351 * @code
1352 *   template <int dim>
1353 *   void
1354 *   HeatEquation<dim>::solve_with_jacobian(const PETScWrappers::MPI::Vector &src,
1355 *   PETScWrappers::MPI::Vector &dst)
1356 *   {
1357 *   TimerOutput::Scope t(computing_timer, "solve with Jacobian");
1358 *  
1359 *   #if defined(PETSC_HAVE_HYPRE)
1360 *   PETScWrappers::PreconditionBoomerAMG preconditioner;
1361 *   preconditioner.initialize(jacobian_matrix);
1362 *   #else
1363 *   PETScWrappers::PreconditionSSOR preconditioner;
1364 *   preconditioner.initialize(
1365 *   jacobian_matrix, PETScWrappers::PreconditionSSOR::AdditionalData(1.0));
1366 *   #endif
1367 *  
1368 *   SolverControl solver_control(1000, 1e-8 * src.l2_norm());
1369 *   PETScWrappers::SolverCG cg(solver_control);
1370 *   cg.set_prefix("user_");
1371 *  
1372 *   cg.solve(jacobian_matrix, dst, src, preconditioner);
1373 *  
1374 *   pcout << " " << solver_control.last_step() << " linear iterations."
1375 *   << std::endl;
1376 *   }
1377 *  
1378 *  
1379 * @endcode
1380 *
1381 *
1382 * <a name="step_86-TheHeatEquationprepare_for_coarsening_and_refinementfunction"></a>
1383 * <h4>The HeatEquation::prepare_for_coarsening_and_refinement() function</h4>
1384 *
1385
1386 *
1387 * The next block of functions deals with mesh refinement. We split this
1388 * process up into a "decide whether and what you want to refine" and a
1389 * "please transfer these vectors from old to new mesh" phase, where the first
1390 * also deals with marking cells for refinement. (The decision whether or
1391 * not to refine is done in the lambda function that calls the current
1392 * function.)
1393 *
1394
1395 *
1396 * Breaking things into a "mark cells" function and into a "execute mesh
1397 * adaptation and transfer solution vectors" function is awkward, though
1398 * conceptually not difficult to understand. These two pieces of code
1399 * should really be part of the same function, as they are in @ref step_26 "step-26".
1400 * The issue is with what PETScWrappers::TimeStepper provides us with in these
1401 * callbacks. Specifically, the "decide whether and what you want to refine"
1402 * callback has access to the current solution, and so can evaluate
1403 * (spatial) error estimators to decide which cells to refine. The
1404 * second callback that transfers vectors from old to new mesh gets a bunch
1405 * of vectors, but without the semantic information on which of these is
1406 * the current solution vector. As a consequence, it cannot do the marking
1407 * of cells for refinement or coarsening, and we have to do that from the
1408 * first callback.
1409 *
1410
1411 *
1412 * In practice, however, the problem is minor. The first of these
1413 * two functions is essentially identical to the
1414 * first half of the corresponding function in @ref step_26 "step-26", with the only
1415 * difference that it uses the parallel::distributed::GridRefinement namespace
1416 * function instead of the serial one. Once again, we make sure that we never
1417 * fall below the minimum refinement level, and above the maximum one, that we
1418 * can select from the parameter file.
1419 *
1420 * @code
1421 *   template <int dim>
1422 *   void HeatEquation<dim>::prepare_for_coarsening_and_refinement(
1423 *   const PETScWrappers::MPI::Vector &y)
1424 *   {
1425 *   PETScWrappers::MPI::Vector locally_relevant_solution(locally_owned_dofs,
1426 *   locally_relevant_dofs,
1427 *   mpi_communicator);
1428 *   locally_relevant_solution = y;
1429 *  
1430 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
1431 *   KellyErrorEstimator<dim>::estimate(dof_handler,
1432 *   QGauss<dim - 1>(fe.degree + 1),
1433 *   {},
1434 *   locally_relevant_solution,
1435 *   estimated_error_per_cell);
1436 *  
1438 *   triangulation, estimated_error_per_cell, 0.6, 0.4);
1439 *  
1440 *   const unsigned int max_grid_level =
1441 *   initial_global_refinement + max_delta_refinement_level;
1442 *   const unsigned int min_grid_level = initial_global_refinement;
1443 *  
1444 *   if (triangulation.n_levels() > max_grid_level)
1445 *   for (const auto &cell :
1446 *   triangulation.active_cell_iterators_on_level(max_grid_level))
1447 *   cell->clear_refine_flag();
1448 *   for (const auto &cell :
1449 *   triangulation.active_cell_iterators_on_level(min_grid_level))
1450 *   cell->clear_coarsen_flag();
1451 *   }
1452 *  
1453 *  
1454 * @endcode
1455 *
1456 *
1457 * <a name="step_86-TheHeatEquationtransfer_solution_vectors_to_new_meshfunction"></a>
1458 * <h4>The HeatEquation::transfer_solution_vectors_to_new_mesh() function</h4>
1459 *
1460
1461 *
1462 * The following function then is the second half of the correspond function
1463 * in @ref step_26 "step-26". It is called by the time stepper whenever it requires to
1464 * transfer the solution and any intermediate stage vectors to a new mesh. We
1465 * must make sure that all input vectors are transformed into ghosted vectors
1466 * before the actual transfer is executed, and that we distribute the hanging
1467 * node constraints on the output vectors as soon as we have interpolated the
1468 * vectors to the new mesh -- i.e., that all constraints are satisfied on
1469 * the vectors we transfer.
1470 *
1471
1472 *
1473 * We have no way to enforce boundary conditions at this stage, however. This
1474 * is because the various vectors may correspond to solutions at previous time
1475 * steps if the method used here is a multistep time integrator, and so may
1476 * correspond to different time points that we are not privy to.
1477 *
1478
1479 *
1480 * While this could be a problem if we used the values of the solution and of
1481 * the intermediate stages on the constrained degrees of freedom to compute
1482 * the errors, we do not do so. Instead, we compute the errors on the
1483 * differential equation, and ignore algebraic constraints; therefore we do no
1484 * need to guarantee that the boundary conditions are satisfied also in the
1485 * intermediate stages.
1486 *
1487
1488 *
1489 * We have at our disposal the hanging node current_constraints alone, though,
1490 * and therefore we enforce them on the output vectors, even if this is not
1491 * really needed.
1492 *
1493 * @code
1494 *   template <int dim>
1495 *   void HeatEquation<dim>::transfer_solution_vectors_to_new_mesh(
1496 *   const double time,
1497 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1498 *   std::vector<PETScWrappers::MPI::Vector> &all_out)
1499 *   {
1501 *   solution_trans(dof_handler);
1502 *  
1503 *   std::vector<PETScWrappers::MPI::Vector> all_in_ghosted(all_in.size());
1504 *   std::vector<const PETScWrappers::MPI::Vector *> all_in_ghosted_ptr(
1505 *   all_in.size());
1506 *   std::vector<PETScWrappers::MPI::Vector *> all_out_ptr(all_in.size());
1507 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1508 *   {
1509 *   all_in_ghosted[i].reinit(locally_owned_dofs,
1510 *   locally_relevant_dofs,
1511 *   mpi_communicator);
1512 *   all_in_ghosted[i] = all_in[i];
1513 *   all_in_ghosted_ptr[i] = &all_in_ghosted[i];
1514 *   }
1515 *  
1516 *   triangulation.prepare_coarsening_and_refinement();
1517 *   solution_trans.prepare_for_coarsening_and_refinement(all_in_ghosted_ptr);
1518 *   triangulation.execute_coarsening_and_refinement();
1519 *  
1520 *   setup_system(time);
1521 *  
1522 *   all_out.resize(all_in.size());
1523 *   for (unsigned int i = 0; i < all_in.size(); ++i)
1524 *   {
1525 *   all_out[i].reinit(locally_owned_dofs, mpi_communicator);
1526 *   all_out_ptr[i] = &all_out[i];
1527 *   }
1528 *   solution_trans.interpolate(all_out_ptr);
1529 *  
1530 *   for (PETScWrappers::MPI::Vector &v : all_out)
1531 *   hanging_nodes_constraints.distribute(v);
1532 *   }
1533 *  
1534 *  
1535 * @endcode
1536 *
1537 *
1538 * <a name="step_86-TheHeatEquationupdate_current_constraintsfunction"></a>
1539 * <h4>The HeatEquation::update_current_constraints() function</h4>
1540 *
1541
1542 *
1543 * Since regenerating the constraints at each time step may be expensive, we
1544 * make sure that we only do so when the time changes. We track time change by
1545 * checking if the time of the boundary_values_function has changed, with
1546 * respect to the time of the last call to this function. This will work most
1547 * of the times, but not the very first time we call this function, since the
1548 * time then may be zero and the time of the `boundary_values_function` is
1549 * zero at construction time. We therefore also check if the number
1550 * constraints in `current_constraints`, and if these are empty, we regenerate
1551 * the constraints regardless of the time variable.
1552 *
1553 * @code
1554 *   template <int dim>
1555 *   void HeatEquation<dim>::update_current_constraints(const double time)
1556 *   {
1557 *   if (current_constraints.n_constraints() == 0 ||
1558 *   time != boundary_values_function.get_time())
1559 *   {
1560 *   TimerOutput::Scope t(computing_timer, "update current constraints");
1561 *  
1562 *   boundary_values_function.set_time(time);
1563 *   current_constraints.clear();
1564 *   current_constraints.reinit(locally_owned_dofs, locally_relevant_dofs);
1565 *   current_constraints.merge(hanging_nodes_constraints);
1567 *   0,
1568 *   boundary_values_function,
1569 *   current_constraints);
1570 *   current_constraints.make_consistent_in_parallel(locally_owned_dofs,
1571 *   locally_relevant_dofs,
1572 *   mpi_communicator);
1573 *   current_constraints.close();
1574 *   }
1575 *   }
1576 *  
1577 *  
1578 * @endcode
1579 *
1580 *
1581 * <a name="step_86-TheHeatEquationrunfunction"></a>
1582 * <h4>The HeatEquation::run() function</h4>
1583 *
1584
1585 *
1586 * We have finally arrived at the main function of the class. At the top, it
1587 * creates the mesh and sets up the variables that make up the linear system:
1588 *
1589 * @code
1590 *   template <int dim>
1591 *   void HeatEquation<dim>::run()
1592 *   {
1594 *   triangulation.refine_global(initial_global_refinement);
1595 *  
1596 *   setup_system(/* time */ 0);
1597 *  
1598 * @endcode
1599 *
1600 * We then set up the time stepping object and associate the matrix we will
1601 * build whenever requested for both the Jacobian matrix (see the definition
1602 * above of what the "Jacobian" actually refers to) and for the matrix
1603 * that will be used as a preconditioner for the Jacobian.
1604 *
1605 * @code
1608 *   petsc_ts(time_stepper_data);
1609 *  
1610 *   petsc_ts.set_matrices(jacobian_matrix, jacobian_matrix);
1611 *  
1612 *  
1613 * @endcode
1614 *
1615 * The real work setting up the time stepping object starts here. As
1616 * discussed in the introduction, the way the PETScWrappers::TimeStepper
1617 * class is used is by inverting control: At the end of this function, we
1618 * will call PETScWrappers::TimeStepper::solve() which internally will
1619 * run the loop over time steps, and at the appropriate places *call back*
1620 * into user code for whatever functionality is required. What we need to
1621 * do is to hook up these callback functions by assigning appropriate
1622 * lambda functions to member variables of the `petsc_ts` object.
1623 *
1624
1625 *
1626 * We start by creating lambda functions that provide information about
1627 * the "implicit function" (i.e., that part of the right hand side of the
1628 * ODE system that we want to treat implicitly -- which in our case is
1629 * the entire right hand side), a function that assembles the Jacobian
1630 * matrix, and a function that solves a linear system with the Jacobian.
1631 *
1632 * @code
1633 *   petsc_ts.implicit_function = [&](const double time,
1634 *   const PETScWrappers::MPI::Vector &y,
1635 *   const PETScWrappers::MPI::Vector &y_dot,
1636 *   PETScWrappers::MPI::Vector &res) {
1637 *   this->implicit_function(time, y, y_dot, res);
1638 *   };
1639 *  
1640 *   petsc_ts.setup_jacobian = [&](const double time,
1641 *   const PETScWrappers::MPI::Vector &y,
1642 *   const PETScWrappers::MPI::Vector &y_dot,
1643 *   const double alpha) {
1644 *   this->assemble_implicit_jacobian(time, y, y_dot, alpha);
1645 *   };
1646 *  
1647 *   petsc_ts.solve_with_jacobian = [&](const PETScWrappers::MPI::Vector &src,
1648 *   PETScWrappers::MPI::Vector &dst) {
1649 *   this->solve_with_jacobian(src, dst);
1650 *   };
1651 *  
1652 * @endcode
1653 *
1654 * The next two callbacks deal with identifying and setting variables
1655 * that are considered "algebraic" (rather than "differential"), i.e., for
1656 * which we know what values they are supposed to have rather than letting
1657 * their values be determined by the differential equation. We need to
1658 * instruct the time stepper to ignore these components when computing the
1659 * error in the residuals, and we do so by first providing a function that
1660 * returns an IndexSet with the indices of these algebraic components of the
1661 * solution vector (or rather, that subset of the locally-owned part of the
1662 * vector that is algebraic, in case we are running in parallel). This first
1663 * of the following two functions does that. Specifically, both nodes at
1664 * which Dirichlet boundary conditions are applied, and hanging nodes are
1665 * algebraically constrained. This function then returns a set of
1666 * indices that is initially empty (but knows about the size of the index
1667 * space) and which we then construct as the union of boundary and hanging
1668 * node indices.
1669 *
1670
1671 *
1672 * Following this, we then also need a function that, given a solution
1673 * vector `y` and the current time, sets the algebraic components of that
1674 * vector to their correct value. This comes down to ensuring that we have
1675 * up to date constraints in the `constraints` variable, and then applying
1676 * these constraints to the solution vector via
1677 * AffineConstraints::distribute(). (It is perhaps worth noting that we
1678 * *could* have achieved the same in `solve_with_jacobian()`. Whenever the
1679 * time stepper solves a linear system, it follows up the call to the solver
1680 * by calling the callback to set algebraic components correct. We could
1681 * also have put the calls to `update_current_constraints()` and
1682 * `distribute()` into the `solve_with_jacobian` function, but by not doing
1683 * so, we can also replace the `solve_with_jacobian` function with a call to
1684 * a PETSc solver, and we would still have the current_constraints correctly
1685 * applied to the solution vector.)
1686 *
1687 * @code
1688 *   petsc_ts.algebraic_components = [&]() {
1689 *   IndexSet algebraic_set(dof_handler.n_dofs());
1690 *   algebraic_set.add_indices(DoFTools::extract_boundary_dofs(dof_handler));
1691 *   algebraic_set.add_indices(
1692 *   DoFTools::extract_hanging_node_dofs(dof_handler));
1693 *   return algebraic_set;
1694 *   };
1695 *  
1696 *   petsc_ts.update_constrained_components =
1697 *   [&](const double time, PETScWrappers::MPI::Vector &y) {
1698 *   TimerOutput::Scope t(computing_timer, "set algebraic components");
1699 *   update_current_constraints(time);
1700 *   current_constraints.distribute(y);
1701 *   };
1702 *  
1703 *  
1704 * @endcode
1705 *
1706 * The next two callbacks relate to mesh refinement. As discussed in the
1707 * introduction, PETScWrappers::TimeStepper knows how to deal with the
1708 * situation where we want to change the mesh. All we have to provide
1709 * is a callback that returns `true` if we are at a point where we want
1710 * to refine the mesh (and `false` otherwise) and that if we want to
1711 * do mesh refinement does some prep work for that in the form of
1712 * calling the `prepare_for_coarsening_and_refinement` function.
1713 *
1714
1715 *
1716 * If the first callback below returns `true`, then PETSc TS will
1717 * do some clean-up operations, and call the second of the
1718 * callback functions
1719 * (`petsc_ts.transfer_solution_vectors_to_new_mesh`) with a
1720 * collection of vectors that need to be interpolated from the old
1721 * to the new mesh. This may include the current solution, perhaps
1722 * the current time derivative of the solution, and in the case of
1723 * [multistep time
1724 * integrators](https://en.wikipedia.org/wiki/Linear_multistep_method) also
1725 * the solutions of some previous time steps. We hand all of these over to
1726 * the `interpolate()` member function of this class.
1727 *
1728 * @code
1729 *   petsc_ts.decide_and_prepare_for_remeshing =
1730 *   [&](const double /* time */,
1731 *   const unsigned int step_number,
1732 *   const PETScWrappers::MPI::Vector &y) -> bool {
1733 *   if (step_number > 0 && this->mesh_adaptation_frequency > 0 &&
1734 *   step_number % this->mesh_adaptation_frequency == 0)
1735 *   {
1736 *   pcout << std::endl << "Adapting the mesh..." << std::endl;
1737 *   this->prepare_for_coarsening_and_refinement(y);
1738 *   return true;
1739 *   }
1740 *   else
1741 *   return false;
1742 *   };
1743 *  
1744 *   petsc_ts.transfer_solution_vectors_to_new_mesh =
1745 *   [&](const double time,
1746 *   const std::vector<PETScWrappers::MPI::Vector> &all_in,
1747 *   std::vector<PETScWrappers::MPI::Vector> &all_out) {
1748 *   this->transfer_solution_vectors_to_new_mesh(time, all_in, all_out);
1749 *   };
1750 *  
1751 * @endcode
1752 *
1753 * The final callback is a "monitor" that is called in each
1754 * time step. Here we use it to create a graphical output. Perhaps a better
1755 * scheme would output the solution at fixed time intervals, rather
1756 * than in every time step, but this is not the main point of
1757 * this program and so we go with the easy approach:
1758 *
1759 * @code
1760 *   petsc_ts.monitor = [&](const double time,
1761 *   const PETScWrappers::MPI::Vector &y,
1762 *   const unsigned int step_number) {
1763 *   pcout << "Time step " << step_number << " at t=" << time << std::endl;
1764 *   this->output_results(time, step_number, y);
1765 *   };
1766 *  
1767 *  
1768 * @endcode
1769 *
1770 * With all of this out of the way, the rest of the function is
1771 * anticlimactic: We just have to initialize the solution vector with
1772 * the initial conditions and call the function that does the time
1773 * stepping, and everything else will happen automatically:
1774 *
1775 * @code
1776 *   PETScWrappers::MPI::Vector solution(locally_owned_dofs, mpi_communicator);
1777 *   VectorTools::interpolate(dof_handler, initial_value_function, solution);
1778 *  
1779 *   petsc_ts.solve(solution);
1780 *   }
1781 *   } // namespace Step86
1782 *  
1783 *  
1784 * @endcode
1785 *
1786 *
1787 * <a name="step_86-Themainfunction"></a>
1788 * <h3>The main() function</h3>
1789 *
1790
1791 *
1792 * The rest of the program is as it always looks. We read run-time parameters
1793 * from an input file via the ParameterAcceptor class in the same way as
1794 * we showed in @ref step_60 "step-60" and @ref step_70 "step-70".
1795 *
1796 * @code
1797 *   int main(int argc, char **argv)
1798 *   {
1799 *   try
1800 *   {
1801 *   using namespace Step86;
1802 *  
1803 *   Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1804 *   HeatEquation<2> heat_equation_solver(MPI_COMM_WORLD);
1805 *  
1806 *   const std::string input_filename =
1807 *   (argc > 1 ? argv[1] : "heat_equation.prm");
1808 *   ParameterAcceptor::initialize(input_filename, "heat_equation_used.prm");
1809 *   heat_equation_solver.run();
1810 *   }
1811 *   catch (std::exception &exc)
1812 *   {
1813 *   std::cerr << std::endl
1814 *   << std::endl
1815 *   << "----------------------------------------------------"
1816 *   << std::endl;
1817 *   std::cerr << "Exception on processing: " << std::endl
1818 *   << exc.what() << std::endl
1819 *   << "Aborting!" << std::endl
1820 *   << "----------------------------------------------------"
1821 *   << std::endl;
1822 *  
1823 *   return 1;
1824 *   }
1825 *   catch (...)
1826 *   {
1827 *   std::cerr << std::endl
1828 *   << std::endl
1829 *   << "----------------------------------------------------"
1830 *   << std::endl;
1831 *   std::cerr << "Unknown exception!" << std::endl
1832 *   << "Aborting!" << std::endl
1833 *   << "----------------------------------------------------"
1834 *   << std::endl;
1835 *   return 1;
1836 *   }
1837 *  
1838 *   return 0;
1839 *   }
1840 * @endcode
1841<a name="step_86-Results"></a><h1>Results</h1>
1842
1843
1844When you run this program with the input file as is, you get output as follows:
1845@code
1846
1847Number of active cells: 768
1848Number of degrees of freedom: 833
1849
1850Time step 0 at t=0
1851 5 linear iterations.
1852 8 linear iterations.
1853Time step 1 at t=0.025
1854 6 linear iterations.
1855Time step 2 at t=0.05
1856 5 linear iterations.
1857 8 linear iterations.
1858Time step 3 at t=0.075
1859 6 linear iterations.
1860Time step 4 at t=0.1
1861 6 linear iterations.
1862Time step 5 at t=0.125
1863 6 linear iterations.
1864Time step 6 at t=0.15
1865 6 linear iterations.
1866Time step 7 at t=0.175
1867 6 linear iterations.
1868Time step 8 at t=0.2
1869 6 linear iterations.
1870Time step 9 at t=0.225
1871 6 linear iterations.
1872
1873Adapting the mesh...
1874
1875Number of active cells: 1050
1876Number of degrees of freedom: 1155
1877
1878Time step 10 at t=0.25
1879 5 linear iterations.
1880 8 linear iterations.
1881Time step 11 at t=0.275
1882 5 linear iterations.
1883 7 linear iterations.
1884
1885[...]
1886
1887Time step 195 at t=4.875
1888 6 linear iterations.
1889Time step 196 at t=4.9
1890 6 linear iterations.
1891Time step 197 at t=4.925
1892 6 linear iterations.
1893Time step 198 at t=4.95
1894 6 linear iterations.
1895Time step 199 at t=4.975
1896 5 linear iterations.
1897
1898Adapting the mesh...
1899
1900Number of active cells: 1380
1901Number of degrees of freedom: 1547
1902
1903Time step 200 at t=5
1904
1905
1906+---------------------------------------------+------------+------------+
1907| Total wallclock time elapsed since start | 43.2s | |
1908| | | |
1909| Section | no. calls | wall time | % of total |
1910+---------------------------------+-----------+------------+------------+
1911| assemble implicit Jacobian | 226 | 9.93s | 23% |
1912| implicit function | 426 | 16.2s | 37% |
1913| output results | 201 | 9.74s | 23% |
1914| set algebraic components | 200 | 0.0496s | 0.11% |
1915| setup system | 21 | 0.799s | 1.8% |
1916| solve with Jacobian | 226 | 0.56s | 1.3% |
1917| update current constraints | 201 | 1.53s | 3.5% |
1918+---------------------------------+-----------+------------+------------+
1919
1920@endcode
1921
1922We can generate output for this in the form of a video:
1923@htmlonly
1924<p align="center">
1925 <iframe width="560" height="315" src="https://www.youtube.com/embed/fhJVkcdRksM"
1926 frameborder="0"
1927 allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture"
1928 allowfullscreen></iframe>
1929 </p>
1930@endhtmlonly
1931The solution here is driven by boundary values (the initial conditions are zero,
1932and so is the right hand side of the equation). It takes a little bit of time
1933for the boundary values to diffuse into the domain, and so the temperature
1934(the solution of the heat equation) in the interior of the domain has a slight
1935lag compared to the temperature at the boundary.
1936
1937
1938The more interesting component of this program is how easy it is to play with
1939the details of the time stepping algorithm. Recall that the solution above
1940is controlled by the following parameters:
1941@code
1942 subsection Time stepper
1943 subsection Running parameters
1944 set final time = 5
1945 set initial step size = 0.025
1946 set initial time = 0
1947 set match final time = false
1948 set maximum number of steps = -1
1949 set options prefix =
1950 set solver type = beuler
1951 end
1952
1953 subsection Error control
1954 set absolute error tolerance = -1
1955 set adaptor type = none
1956 set ignore algebraic lte = true
1957 set maximum step size = -1
1958 set minimum step size = -1
1959 set relative error tolerance = -1
1960 end
1961 end
1962@endcode
1963Of particular interest for us here is to set the time stepping algorithm
1964and the adaptive time step control method. The latter is set to "none"
1965above, but there are
1966[several alternative choices](https://petsc.org/release/manualpages/TS/TSAdaptType/)
1967for this parameter. For example, we can set parameters as follows,
1968@code
1969 subsection Time stepper
1970 subsection Running parameters
1971 set final time = 5
1972 set initial step size = 0.025
1973 set initial time = 0
1974 set match final time = false
1975 set maximum number of steps = -1
1976 set options prefix =
1977 set solver type = bdf
1978 end
1979
1980 subsection Error control
1981 set absolute error tolerance = 1e-2
1982 set relative error tolerance = 1e-2
1983 set adaptor type = basic
1984 set ignore algebraic lte = true
1985 set maximum step size = 1
1986 set minimum step size = 0.01
1987 end
1988 end
1989@endcode
1990What we do here is set the initial time step size to 0.025, and choose relatively
1991large absolute and relative error tolerances of 0.01 for the time step size
1992adaptation algorithm (for which we choose "basic"). We ask PETSc TS to use a
1993[Backward Differentiation Formula (BDF)](https://en.wikipedia.org/wiki/Backward_differentiation_formula)
1994method, and we get the following as output:
1995@code
1996
1997===========================================
1998Number of active cells: 768
1999Number of degrees of freedom: 833
2000
2001Time step 0 at t=0
2002 5 linear iterations.
2003 5 linear iterations.
2004 5 linear iterations.
2005 4 linear iterations.
2006 6 linear iterations.
2007Time step 1 at t=0.01
2008 5 linear iterations.
2009 5 linear iterations.
2010Time step 2 at t=0.02
2011 5 linear iterations.
2012 5 linear iterations.
2013Time step 3 at t=0.03
2014 5 linear iterations.
2015 5 linear iterations.
2016Time step 4 at t=0.042574
2017 5 linear iterations.
2018 5 linear iterations.
2019Time step 5 at t=0.0588392
2020 5 linear iterations.
2021 5 linear iterations.
2022Time step 6 at t=0.0783573
2023 5 linear iterations.
2024 7 linear iterations.
2025 5 linear iterations.
2026 7 linear iterations.
2027Time step 7 at t=0.100456
2028 5 linear iterations.
2029 7 linear iterations.
2030 5 linear iterations.
2031 7 linear iterations.
2032Time step 8 at t=0.124982
2033 5 linear iterations.
2034 8 linear iterations.
2035 5 linear iterations.
2036 7 linear iterations.
2037Time step 9 at t=0.153156
2038 6 linear iterations.
2039 5 linear iterations.
2040
2041[...]
2042
2043Time step 206 at t=4.96911
2044 5 linear iterations.
2045 5 linear iterations.
2046Time step 207 at t=4.99398
2047 5 linear iterations.
2048 5 linear iterations.
2049Time step 208 at t=5.01723
2050
2051
2052+---------------------------------------------+------------+------------+
2053| Total wallclock time elapsed since start | 117s | |
2054| | | |
2055| Section | no. calls | wall time | % of total |
2056+---------------------------------+-----------+------------+------------+
2057| assemble implicit Jacobian | 593 | 35.6s | 31% |
2058| implicit function | 1101 | 56.3s | 48% |
2059| output results | 209 | 12.5s | 11% |
2060| set algebraic components | 508 | 0.156s | 0.13% |
2061| setup system | 21 | 1.11s | 0.95% |
2062| solve with Jacobian | 593 | 1.97s | 1.7% |
2063| update current constraints | 509 | 4.53s | 3.9% |
2064+---------------------------------+-----------+------------+------------+
2065@endcode
2066What is happening here is that apparently PETSc TS is not happy with our
2067choice of initial time step size, and after several linear solves has
2068reduced it to the minimum we allow it to, 0.01. The following time steps
2069then run at a time step size of 0.01 until it decides to make it slightly
2070larger again and (apparently) switches to a higher order method that
2071requires more linear solves per time step but allows for a larger time
2072step closer to our initial choice 0.025 again. It does not quite hit the
2073final time of @f$T=5@f$ with its time step choices, but we've got only
2074ourselves to blame for that by setting
2075@code
2076 set match final time = false
2077@endcode
2078in the input file. If hitting the end time exactly is important to us,
2079setting the flag to `true` resolves this issue.
2080
2081We can even reason why PETSc eventually chooses a time step of around
20820.025: The boundary values undergo a complete cosine cycle within 0.5
2083time units; we should expect that it takes around ten or twenty time
2084steps to resolve each period of a cycle to reasonable accuracy, and
2085this leads to the time step choice PETSc finds.
2086
2087Not all combinations of methods, time step adaptation algorithms, and
2088other parameters are valid, but the main messages from the experiment
2089above that you should take away are:
2090- It would undoubtedly be quite time consuming to implement many of the
2091 methods that PETSc TS offers for time stepping -- but with a program
2092 such as the one here, we don't need to: We can just select from the
2093 many methods PETSc TS already has.
2094- Adaptive time step control is difficult; adaptive choice of which
2095 method or order to choose is perhaps even more difficult. None of the
2096 time dependent programs that came before the current one (say, @ref step_23 "step-23",
2097 @ref step_26 "step-26", @ref step_31 "step-31", @ref step_58 "step-58", and a good number of others) have either.
2098 Moreover, while deal.II is good at spatial adaptation of meshes, it
2099 is not a library written by experts in time step adaptation, and so will
2100 likely not gain this ability either. But, again, it doesn't
2101 have to: We can rely on a library written by experts in that area.
2102
2103
2104
2105<a name="step-86-extensions"></a>
2106<a name="step_86-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
2107
2108
2109The program actually runs in parallel, even though we have not used
2110that above. Specifically, if you have configured deal.II to use MPI,
2111then you can do `mpirun -np 8 ./step-86 heat_equation.prm` to run the
2112program with 8 processes.
2113
2114For the program as currently written (and in debug mode), this makes
2115little difference: It will run about twice as fast, but take about 8
2116times as much CPU time. That is because the problem is just so small:
2117Generally between 1000 and 2000 degrees of freedom. A good rule of
2118thumb is that programs really only benefit from parallel computing if
2119you have somewhere in the range of 50,000 to 100,000 unknowns *per MPI
2120process*. But it is not difficult to adapt the program at hand here to
2121run with a much finer mesh, or perhaps in 3d, so that one is beyond
2122that limit and sees the benefits of parallel computing.
2123
2124 *
2125 *
2126<a name="step_86-PlainProg"></a>
2127<h1> The plain program</h1>
2128@include "step-86.cc"
2129*/
void distribute(VectorType &vec) const
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, Number > * > &neumann_bc, const ReadVector< Number > &solution, Vector< float > &error, const ComponentMask &component_mask={}, const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
void initialize(const MatrixBase &matrix, const AdditionalData &additional_data=AdditionalData())
unsigned int solve(VectorType &y)
static void initialize(const std::string &filename="", const std::string &output_filename="", const ParameterHandler::OutputStyle output_style_for_output_filename=ParameterHandler::Short, ParameterHandler &prm=ParameterAcceptor::prm, const ParameterHandler::OutputStyle output_style_for_filename=ParameterHandler::DefaultStyle)
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
unsigned int level
Definition grid_out.cc:4626
void loop(IteratorType begin, std_cxx20::type_identity_t< IteratorType > end, DOFINFO &dinfo, INFOBOX &info, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &cell_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &)> &boundary_worker, const std::function< void(std_cxx20::type_identity_t< DOFINFO > &, std_cxx20::type_identity_t< DOFINFO > &, typename INFOBOX::CellInfo &, typename INFOBOX::CellInfo &)> &face_worker, AssemblerType &assembler, const LoopControl &lctrl=LoopControl())
Definition loop.h:564
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternBase &sparsity_pattern, const AffineConstraints< number > &constraints={}, const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
const Event initial
Definition event.cc:64
void write_pvd_record(std::ostream &out, const std::vector< std::pair< double, std::string > > &times_and_names)
IndexSet extract_boundary_dofs(const DoFHandler< dim, spacedim > &dof_handler, const ComponentMask &component_mask={}, const std::set< types::boundary_id > &boundary_ids={})
Definition dof_tools.cc:616
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
IndexSet extract_hanging_node_dofs(const DoFHandler< dim, spacedim > &dof_handler)
void interpolate(const DoFHandler< dim, spacedim > &dof1, const InVector &u1, const DoFHandler< dim, spacedim > &dof2, OutVector &u2)
void hyper_L(Triangulation< dim > &tria, const double left=-1., const double right=1., const bool colorize=false)
void implicit_function(Triangulation< dim, 3 > &tria, const Function< 3 > &implicit_function, const CGALWrappers::AdditionalData< dim > &data=CGALWrappers::AdditionalData< dim >{}, const Point< 3 > &interior_point=Point< 3 >(), const double &outer_ball_radius=1.0)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
@ matrix
Contents is actually a matrix.
@ diagonal
Matrix is diagonal.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:191
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Tensor< 2, dim, Number > F(const Tensor< 2, dim, Number > &Grad_u)
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm mpi_comm, const IndexSet &locally_relevant_rows)
VectorType::value_type * end(VectorType &V)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
std::string int_to_string(const unsigned int value, const unsigned int digits=numbers::invalid_unsigned_int)
Definition utilities.cc:470
void interpolate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Function< spacedim, typename VectorType::value_type > &function, VectorType &vec, const ComponentMask &component_mask={}, const unsigned int level=numbers::invalid_unsigned_int)
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask={})
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
bool check(const ConstraintKinds kind_in, const unsigned int dim)
int(&) functions(const void *v1, const void *v2)
void assemble(const MeshWorker::DoFInfoBox< dim, DOFINFO > &dinfo, A *assembler)
Definition loop.h:70
void refine_and_coarsen_fixed_fraction(::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_error, const double bottom_fraction_of_error, const VectorTools::NormType norm_type=VectorTools::L1_norm)
::SolutionTransfer< dim, VectorType, spacedim > SolutionTransfer
STL namespace.
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation