deal.II version GIT relicensing-2659-g040196caa3 2025-02-18 14:20:01+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
step-40.h
Go to the documentation of this file.
1
860 *   #endif
861 *   LA::MPI::PreconditionAMG preconditioner;
862 *   preconditioner.initialize(system_matrix, data);
863 *  
864 *   solver.solve(system_matrix,
866 *   system_rhs,
867 *   preconditioner);
868 *  
869 *   pcout << " Solved in " << solver_control.last_step() << " iterations."
870 *   << std::endl;
871 *  
872 *   constraints.distribute(completely_distributed_solution);
873 *  
875 *   }
876 *  
877 *  
878 *  
879 * @endcode
880 *
881 *
882 * <a name="step_40-LaplaceProblemrefine_grid"></a>
883 * <h4>LaplaceProblem::refine_grid</h4>
884 *
885
886 *
887 * The function that estimates the error and refines the grid is again
888 * almost exactly like the one in @ref step_6 "step-6". The only difference is that the
889 * function that flags cells to be refined is now in namespace
893 *
894
895 *
896 * Note that we didn't have to do anything special about the
897 * KellyErrorEstimator class: we just give it a vector with as many elements
898 * as the local triangulation has cells (locally owned cells, ghost cells,
899 * and artificial ones), but it only fills those entries that correspond to
900 * cells that are locally owned.
901 *
902 * @code
903 *   template <int dim>
904 *   void LaplaceProblem<dim>::refine_grid()
905 *   {
906 *   TimerOutput::Scope t(computing_timer, "refine");
907 *  
908 *   Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
909 *   KellyErrorEstimator<dim>::estimate(
910 *   dof_handler,
911 *   QGauss<dim - 1>(fe.degree + 1),
912 *   std::map<types::boundary_id, const Function<dim> *>(),
913 *   locally_relevant_solution,
914 *   estimated_error_per_cell);
915 *   parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
916 *   triangulation, estimated_error_per_cell, 0.3, 0.03);
917 *   triangulation.execute_coarsening_and_refinement();
918 *   }
919 *  
920 *  
921 *  
922 * @endcode
923 *
924 *
925 * <a name="step_40-LaplaceProblemoutput_results"></a>
926 * <h4>LaplaceProblem::output_results</h4>
927 *
928
929 *
930 * Compared to the corresponding function in @ref step_6 "step-6", the one here is
931 * a tad more complicated. There are two reasons: the first one is
932 * that we do not just want to output the solution but also for each
933 * cell which processor owns it (i.e. which "subdomain" it is
934 * in). Secondly, as discussed at length in @ref step_17 "step-17" and @ref step_18 "step-18",
935 * generating graphical data can be a bottleneck in
936 * parallelizing. In those two programs, we simply generate one
937 * output file per process. That worked because the
938 * parallel::shared::Triangulation cannot be used with large numbers
939 * of MPI processes anyway. But this doesn't scale: Creating a
941 * large number of processors.
942 *
943
944 *
946 * high-performance, parallel IO routines using MPI I/O to write to
947 * a small, fixed number of visualization files (here 8). We also
948 * generate a .pvtu record referencing these .vtu files, which can
950 *
951
952 *
953 * To start, the top of the function looks like it usually does. In addition
954 * to attaching the solution vector (the one that has entries for all locally
955 * relevant, not only the locally owned, elements), we attach a data vector
956 * that stores, for each cell, the subdomain the cell belongs to. This is
958 * cell. The vector we attach therefore has an entry for every cell that the
959 * current processor has in its mesh (locally owned ones, ghost cells, and
960 * artificial cells), but the DataOut class will ignore all entries that
962 * consequence, it doesn't actually matter what values we write into these
963 * vector entries: we simply fill the entire vector with the number of the
964 * current MPI process (i.e. the subdomain_id of the current process); this
965 * correctly sets the values we care for, i.e. the entries that correspond
966 * to locally owned cells, while providing the wrong value for all other
967 * elements -- but these are then ignored anyway.
968 *
969 * @code
970 *   template <int dim>
971 *   void LaplaceProblem<dim>::output_results(const unsigned int cycle)
972 *   {
973 *   TimerOutput::Scope t(computing_timer, "output");
974 *  
975 *   DataOut<dim> data_out;
976 *   data_out.attach_dof_handler(dof_handler);
977 *   data_out.add_data_vector(locally_relevant_solution, "u");
978 *  
979 *   Vector<float> subdomain(triangulation.n_active_cells());
980 *   for (unsigned int i = 0; i < subdomain.size(); ++i)
981 *   subdomain(i) = triangulation.locally_owned_subdomain();
982 *   data_out.add_data_vector(subdomain, "subdomain");
983 *  
984 *   data_out.build_patches();
985 *  
986 * @endcode
987 *
988 * The final step is to write this data to disk. We write up to 8 VTU files
989 * in parallel with the help of MPI-IO. Additionally a PVTU record is
990 * generated, which groups the written VTU files.
991 *
992 * @code
993 *   data_out.write_vtu_with_pvtu_record(
994 *   "./", "solution", cycle, mpi_communicator, 2, 8);
995 *   }
996 *  
997 *  
998 *  
999 * @endcode
1000 *
1001 *
1002 * <a name="step_40-LaplaceProblemrun"></a>
1003 * <h4>LaplaceProblem::run</h4>
1004 *
1005
1006 *
1007 * The function that controls the overall behavior of the program is again
1008 * like the one in @ref step_6 "step-6". The minor difference are the use of
1009 * <code>pcout</code> instead of <code>std::cout</code> for output to the
1010 * console (see also @ref step_17 "step-17").
1011 *
1012
1013 *
1014 * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
1015 * we start with a slightly finer mesh (5 global refinement cycles) -- there
1017 * on 4 cells (although admittedly the point is only slightly stronger
1018 * starting on 1024).
1019 *
1020 * @code
1021 *   template <int dim>
1023 *   {
1024 *   pcout << "Running with "
1026 *   << "PETSc"
1027 *   #else
1028 *   << "Trilinos"
1029 *   #endif
1030 *   << " on " << Utilities::MPI::n_mpi_processes(mpi_communicator)
1031 *   << " MPI rank(s)..." << std::endl;
1032 *  
1033 *   const unsigned int n_cycles = 8;
1034 *   for (unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1035 *   {
1036 *   pcout << "Cycle " << cycle << ':' << std::endl;
1037 *  
1038 *   if (cycle == 0)
1039 *   {
1041 *   triangulation.refine_global(5);
1042 *   }
1043 *   else
1044 *   refine_grid();
1045 *  
1046 *   setup_system();
1047 *   assemble_system();
1048 *   solve();
1049 *   output_results(cycle);
1050 *  
1051 *   computing_timer.print_summary();
1052 *   computing_timer.reset();
1053 *  
1054 *   pcout << std::endl;
1055 *   }
1056 *   }
1057 *   } // namespace Step40
1058 *  
1059 *  
1060 *  
1061 * @endcode
1062 *
1063 *
1064 * <a name="step_40-main"></a>
1065 * <h4>main()</h4>
1066 *
1067
1068 *
1069 * The final function, <code>main()</code>, again has the same structure as in
1070 * all other programs, in particular @ref step_6 "step-6". Like the other programs that use
1071 * MPI, we have to initialize and finalize MPI, which is done using the helper
1073 * initializes libraries that depend on MPI, such as p4est, PETSc, SLEPc, and
1074 * Zoltan (though the last two are not used in this tutorial). The order here
1075 * is important: we cannot use any of these libraries until they are
1076 * initialized, so it does not make sense to do anything before creating an
1078 *
1079
1080 *
1085 * libraries), which will delete any in-use PETSc objects. This must be done
1086 * after we destruct the Laplace solver to avoid double deletion
1087 * errors. Fortunately, due to the order of destructor call rules of C++, we
1089 * order (i.e., the reverse of the order of construction). The last function
1090 * called by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() is
1093 *
1094 * @code
1095 *   int main(int argc, char *argv[])
1096 *   {
1097 *   try
1098 *   {
1099 *   using namespace dealii;
1100 *   using namespace Step40;
1101 *  
1103 *  
1105 *   laplace_problem_2d.run();
1106 *   }
1107 *   catch (std::exception &exc)
1108 *   {
1109 *   std::cerr << std::endl
1110 *   << std::endl
1111 *   << "----------------------------------------------------"
1112 *   << std::endl;
1113 *   std::cerr << "Exception on processing: " << std::endl
1114 *   << exc.what() << std::endl
1115 *   << "Aborting!" << std::endl
1116 *   << "----------------------------------------------------"
1117 *   << std::endl;
1118 *  
1119 *   return 1;
1120 *   }
1121 *   catch (...)
1122 *   {
1123 *   std::cerr << std::endl
1124 *   << std::endl
1125 *   << "----------------------------------------------------"
1126 *   << std::endl;
1127 *   std::cerr << "Unknown exception!" << std::endl
1128 *   << "Aborting!" << std::endl
1129 *   << "----------------------------------------------------"
1130 *   << std::endl;
1131 *   return 1;
1132 *   }
1133 *  
1134 *   return 0;
1135 *   }
1136 * @endcode
1137<a name="step_40-Results"></a><h1>Results</h1>
1138
1139
1141installation on a few, you should get output like this:
1142@code
1143Running with PETSc on 1 MPI rank(s)...
1144Cycle 0:
1145 Number of active cells: 1024
1146 Number of degrees of freedom: 4225
1147 Solved in 7 iterations.
1148
1149
1150+---------------------------------------------+------------+------------+
1151| Total wallclock time elapsed since start | 0.548s | |
1152| | | |
1153| Section | no. calls | wall time | % of total |
1154+---------------------------------+-----------+------------+------------+
1155| assembly | 1 | 0.242s | 44% |
1156| output | 1 | 0.0495s | 9% |
1157| setup | 1 | 0.102s | 19% |
1158| solve | 1 | 0.0283s | 5.2% |
1159+---------------------------------+-----------+------------+------------+
1160
1161
1162Cycle 1:
1163 Number of active cells: 1963
1164 Number of degrees of freedom: 8437
1165 Solved in 7 iterations.
1166
1167
1168+---------------------------------------------+------------+------------+
1169| Total wallclock time elapsed since start | 1.19s | |
1170| | | |
1171| Section | no. calls | wall time | % of total |
1172+---------------------------------+-----------+------------+------------+
1173| assembly | 1 | 0.469s | 40% |
1174| output | 1 | 0.0899s | 7.6% |
1175| refine | 1 | 0.429s | 36% |
1176| setup | 1 | 0.177s | 15% |
1177| solve | 1 | 0.0204s | 1.7% |
1178+---------------------------------+-----------+------------+------------+
1179
1180
1181Cycle 2:
1182 Number of active cells: 3670
1183 Number of degrees of freedom: 16175
1184 Solved in 7 iterations.
1185
1186...
1187@endcode
1188
1190this is due to the fact that the preconditioner depends on the
1191partitioning of the problem, the solution then differs in the last few
1196
1197When run on a sufficiently large number of machines (say a few
1203
1204<table width="100%">
1205<tr>
1206<td>
1207 <img src="https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt="">
1208</td>
1209<td>
1210 <img src="https://www.dealii.org/images/steps/developer/step-40.solution.png" alt="">
1211</td>
1212</tr>
1213</table>
1214
1215The mesh on the left has a mere 7,069 cells. This is of course a
1217processor using @ref step_6 "step-6", but the point of the program was to show how
1219here are two graphs that show how the run time of a large number of parts
1223@ref distributed_paper "Distributed Computing paper"; updated graphs showing
1226
1227<table width="100%">
1228<tr>
1229<td>
1230 <img src="https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt="">
1231</td>
1232<td>
1233 <img src="https://www.dealii.org/images/steps/developer/step-40.strong.png" alt="">
1234</td>
1235</tr>
1236</table>
1237
1240(For a discussion of what we consider "scalable" programs, see
1241@ref GlossParallelScaling "this glossary entry".)
1242The curves, in particular the linear solver, become a
1245processor has to solve in the above two examples is only 13,000 and 90,000
1246degrees of freedom when 4,096 processors are used; a good rule of thumb is that
1247parallel programs work well if each processor has at least 100,000 unknowns).
1248
1249While the strong scaling graphs above show that we can solve a problem of
1253this in the following two graphs for 256 and 4096 processors:
1254
1255<table width="100%">
1256<tr>
1257<td>
1258 <img src="https://www.dealii.org/images/steps/developer/step-40.256.png" alt="">
1259</td>
1260<td>
1261 <img src="https://www.dealii.org/images/steps/developer/step-40.4096.png" alt="">
1262</td>
1263</tr>
1264</table>
1265
1267the number of degrees of freedom. This time, lines are wobbly at the left as
1269we refer to the @ref distributed_paper "Distributed Computing paper".
1270
1274multigrid method from the <a
1275href="http://acts.nersc.gov/hypre/" target="_top">Hypre package</a> as
1276a preconditioner, which unfortunately uses signed 32-bit integers to
1277index the elements of a %distributed matrix. This limits the size of
1278problems to @f$2^{31}-1=2,147,483,647@f$ degrees of freedom. From the graphs
1284
1285On the other hand, this does not mean that deal.II cannot solve bigger
1286problems. Indeed, @ref step_37 "step-37" shows how one can solve problems that are not
1288here.
1289
1290
1291
1292<a name="step-40-extensions"></a>
1293<a name="step_40-Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1294
1295
1296In a sense, this program is the ultimate solver for the Laplace
1306this for @ref step_31 "step-31" in the @ref step_32 "step-32" tutorial program, but the same would
1307apply to, for example, @ref step_23 "step-23" and @ref step_25 "step-25" for hyperbolic time
1308dependent problems, @ref step_33 "step-33" for gas dynamics, or @ref step_35 "step-35" for the
1310
1312mentioned above, we only show pictures of the solution and the mesh
1314would produce graphical output on the order of several 10
1320solver on this processor. Implementing such an interface would allow
1323 *
1324 *
1325<a name="step_40-PlainProg"></a>
1326<h1> The plain program</h1>
1327@include "step-40.cc"
1328*/
friend class Tensor
Definition tensor.h:865
unsigned int level
Definition grid_out.cc:4632
std::vector< index_type > data
Definition mpi.cc:740
std::size_t size
Definition mpi.cc:739
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
constexpr char O
@ matrix
Contents is actually a matrix.
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition utilities.cc:193
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:97
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code *  *  MPI_Finalize()