861 * LA::MPI::PreconditionAMG preconditioner;
862 * preconditioner.initialize(system_matrix,
data);
864 * solver.solve(system_matrix,
865 * completely_distributed_solution,
869 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
872 * constraints.distribute(completely_distributed_solution);
874 * locally_relevant_solution = completely_distributed_solution;
882 * <a name=
"step_40-LaplaceProblemrefine_grid"></a>
883 * <h4>LaplaceProblem::refine_grid</h4>
887 * The function that estimates the error and refines the grid is again
888 * almost exactly like the one in @ref step_6
"step-6". The only difference is that the
889 * function that flags cells to be refined is now in
namespace
891 * that can communicate between all involved processors and determine global
892 * thresholds to use in deciding which cells to
refine and which to
coarsen.
896 * Note that we didn
't have to do anything special about the
897 * KellyErrorEstimator class: we just give it a vector with as many elements
898 * as the local triangulation has cells (locally owned cells, ghost cells,
899 * and artificial ones), but it only fills those entries that correspond to
900 * cells that are locally owned.
904 * void LaplaceProblem<dim>::refine_grid()
906 * TimerOutput::Scope t(computing_timer, "refine");
908 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
909 * KellyErrorEstimator<dim>::estimate(
911 * QGauss<dim - 1>(fe.degree + 1),
912 * std::map<types::boundary_id, const Function<dim> *>(),
913 * locally_relevant_solution,
914 * estimated_error_per_cell);
915 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
916 * triangulation, estimated_error_per_cell, 0.3, 0.03);
917 * triangulation.execute_coarsening_and_refinement();
925 * <a name="step_40-LaplaceProblemoutput_results"></a>
926 * <h4>LaplaceProblem::output_results</h4>
930 * Compared to the corresponding function in @ref step_6 "step-6", the one here is
931 * a tad more complicated. There are two reasons: the first one is
932 * that we do not just want to output the solution but also for each
933 * cell which processor owns it (i.e. which "subdomain" it is
934 * in). Secondly, as discussed at length in @ref step_17 "step-17" and @ref step_18 "step-18",
935 * generating graphical data can be a bottleneck in
936 * parallelizing. In those two programs, we simply generate one
937 * output file per process. That worked because the
938 * parallel::shared::Triangulation cannot be used with large numbers
939 * of MPI processes anyway. But this doesn't
scale: Creating a
940 * single file per processor will overwhelm the filesystem with a
941 * large number of processors.
945 * We here follow a more sophisticated approach that uses
946 * high-performance,
parallel IO routines
using MPI I/O to write to
947 * a small, fixed number of visualization files (here 8). We also
948 * generate a .pvtu record referencing these .vtu files, which can
949 * be opened directly in visualizatin tools like ParaView and VisIt.
953 * To start, the top of the function looks like it usually does. In addition
954 * to attaching the solution vector (the one that has entries
for all locally
955 * relevant, not only the locally owned, elements), we attach a
data vector
956 * that stores,
for each cell, the subdomain the cell belongs to. This is
957 * slightly tricky, because of course not every processor knows about every
958 * cell. The vector we attach therefore has an entry
for every cell that the
959 * current processor has in its mesh (locally owned ones, ghost cells, and
960 * artificial cells), but the
DataOut class will ignore all entries that
961 * correspond to cells that are not owned by the current processor. As a
962 * consequence, it doesn
't actually matter what values we write into these
963 * vector entries: we simply fill the entire vector with the number of the
964 * current MPI process (i.e. the subdomain_id of the current process); this
965 * correctly sets the values we care for, i.e. the entries that correspond
966 * to locally owned cells, while providing the wrong value for all other
967 * elements -- but these are then ignored anyway.
971 * void LaplaceProblem<dim>::output_results(const unsigned int cycle)
973 * TimerOutput::Scope t(computing_timer, "output");
975 * DataOut<dim> data_out;
976 * data_out.attach_dof_handler(dof_handler);
977 * data_out.add_data_vector(locally_relevant_solution, "u");
979 * Vector<float> subdomain(triangulation.n_active_cells());
980 * for (unsigned int i = 0; i < subdomain.size(); ++i)
981 * subdomain(i) = triangulation.locally_owned_subdomain();
982 * data_out.add_data_vector(subdomain, "subdomain");
984 * data_out.build_patches();
988 * The final step is to write this data to disk. We write up to 8 VTU files
989 * in parallel with the help of MPI-IO. Additionally a PVTU record is
990 * generated, which groups the written VTU files.
993 * data_out.write_vtu_with_pvtu_record(
994 * "./", "solution", cycle, mpi_communicator, 2, 8);
1002 * <a name="step_40-LaplaceProblemrun"></a>
1003 * <h4>LaplaceProblem::run</h4>
1007 * The function that controls the overall behavior of the program is again
1008 * like the one in @ref step_6 "step-6". The minor difference are the use of
1009 * <code>pcout</code> instead of <code>std::cout</code> for output to the
1010 * console (see also @ref step_17 "step-17").
1014 * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
1015 * we start with a slightly finer mesh (5 global refinement cycles) -- there
1016 * just isn't much of a
point showing a massively %
parallel program starting
1017 * on 4 cells (although admittedly the point is only slightly stronger
1018 * starting on 1024).
1021 *
template <
int dim>
1022 *
void LaplaceProblem<dim>::run()
1024 * pcout <<
"Running with "
1025 * #ifdef USE_PETSC_LA
1031 * <<
" MPI rank(s)..." << std::endl;
1033 *
const unsigned int n_cycles = 8;
1034 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1036 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
1047 * assemble_system();
1049 * output_results(cycle);
1051 * computing_timer.print_summary();
1052 * computing_timer.reset();
1054 * pcout << std::endl;
1064 * <a name=
"step_40-main"></a>
1069 * The
final function, <code>main()</code>, again has the same structure as in
1070 * all other programs, in particular @ref step_6
"step-6". Like the other programs that use
1071 *
MPI, we have to initialize and finalize
MPI, which is done
using the helper
1073 * initializes libraries that depend on
MPI, such as p4est,
PETSc,
SLEPc, and
1074 *
Zoltan (though the last two are not used in
this tutorial). The order here
1075 * is important: we cannot use any of these libraries until they are
1076 * initialized, so it does not make sense to
do anything before creating an
1081 * After the solver finishes, the LaplaceProblem destructor will
run followed
1084 * <code>PetscFinalize</code> (and finalization functions for other
1085 * libraries), which will delete any in-use
PETSc objects. This must be done
1086 * after we destruct the Laplace solver to avoid
double deletion
1087 * errors. Fortunately, due to the order of destructor call rules of C++, we
1088 * do not need to worry about any of this: everything happens in the correct
1089 * order (i.e., the reverse of the order of construction). The last function
1090 * called by
Utilities::
MPI::MPI_InitFinalize::~MPI_InitFinalize() is
1091 * <code>
MPI_Finalize</code>: i.e., once this
object is destructed the program
1092 * should exit since
MPI will no longer be available.
1095 *
int main(
int argc,
char *argv[])
1099 *
using namespace dealii;
1100 *
using namespace Step40;
1104 * LaplaceProblem<2> laplace_problem_2d;
1105 * laplace_problem_2d.run();
1107 *
catch (std::exception &exc)
1109 * std::cerr << std::endl
1111 * <<
"----------------------------------------------------"
1113 * std::cerr <<
"Exception on processing: " << std::endl
1114 * << exc.what() << std::endl
1115 * <<
"Aborting!" << std::endl
1116 * <<
"----------------------------------------------------"
1123 * std::cerr << std::endl
1125 * <<
"----------------------------------------------------"
1127 * std::cerr <<
"Unknown exception!" << std::endl
1128 * <<
"Aborting!" << std::endl
1129 * <<
"----------------------------------------------------"
1137<a name=
"step_40-Results"></a><h1>Results</h1>
1140When you
run the program, on a single processor or with your local
MPI
1141installation on a few, you should get output like
this:
1143Running with
PETSc on 1
MPI rank(s)...
1145 Number of active cells: 1024
1146 Number of degrees of freedom: 4225
1147 Solved in 7 iterations.
1150+---------------------------------------------+------------+------------+
1151| Total wallclock time elapsed since start | 0.548s | |
1153| Section | no. calls | wall time | % of total |
1154+---------------------------------+-----------+------------+------------+
1155| assembly | 1 | 0.242s | 44% |
1156| output | 1 | 0.0495s | 9% |
1157| setup | 1 | 0.102s | 19% |
1158| solve | 1 | 0.0283s | 5.2% |
1159+---------------------------------+-----------+------------+------------+
1163 Number of active cells: 1963
1164 Number of degrees of freedom: 8437
1165 Solved in 7 iterations.
1168+---------------------------------------------+------------+------------+
1169| Total wallclock time elapsed since start | 1.19s | |
1171| Section | no. calls | wall time | % of total |
1172+---------------------------------+-----------+------------+------------+
1173| assembly | 1 | 0.469s | 40% |
1174| output | 1 | 0.0899s | 7.6% |
1175|
refine | 1 | 0.429s | 36% |
1176| setup | 1 | 0.177s | 15% |
1177| solve | 1 | 0.0204s | 1.7% |
1178+---------------------------------+-----------+------------+------------+
1182 Number of active cells: 3670
1183 Number of degrees of freedom: 16175
1184 Solved in 7 iterations.
1189The exact
numbers differ, depending on how many processors we use;
1190this is due to the fact that the preconditioner depends on the
1191partitioning of the problem, the solution then differs in the last few
1192digits, and consequently the mesh refinement differs slightly.
1193The primary thing to notice here, though, is that the number of
1194iterations does not increase with the
size of the problem. This
1195guarantees that we can efficiently solve even the largest problems.
1197When
run on a sufficiently large number of machines (say a few
1198thousand),
this program can relatively easily solve problems with well
1199over one billion unknowns in less than a minute. On the other hand,
1200such big problems can no longer be visualized, so we also ran the
1201program on only 16 processors. Here are a mesh, along with its
1202partitioning onto the 16 processors, and the corresponding solution:
1207 <img src=
"https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt=
"">
1210 <img src=
"https://www.dealii.org/images/steps/developer/step-40.solution.png" alt=
"">
1215The mesh on the left has a mere 7,069 cells. This is of course a
1216problem we would easily have been able to solve already on a single
1217processor
using @ref step_6
"step-6", but the
point of the program was to show how
1218to write a program that scales to many more machines. For example,
1219here are two graphs that show how the
run time of a large number of parts
1220of the program scales on problems with around 52 and 375 million degrees of
1221freedom
if we take more and more processors (these and the next couple of
1222graphs are taken from an earlier version of the
1223@ref distributed_paper
"Distributed Computing paper"; updated graphs showing
1224data of runs on even larger
numbers of processors, and a lot
1225more interpretation can be found in the
final version of the paper):
1230 <img src=
"https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt=
"">
1233 <img src=
"https://www.dealii.org/images/steps/developer/step-40.strong.png" alt=
"">
1238As can clearly be seen, the program scales nicely to very large
1240(For a discussion of what we consider
"scalable" programs, see
1241@ref GlossParallelScaling
"this glossary entry".)
1242The curves, in particular the linear solver, become a
1243bit wobbly at the right
end of the graphs since each processor has too little
1244to do to offset the cost of communication (the part of the whole problem each
1245processor has to solve in the above two examples is only 13,000 and 90,000
1246degrees of freedom when 4,096 processors are used; a good rule of thumb is that
1247parallel programs work well
if each processor has at least 100,000 unknowns).
1249While the strong scaling graphs above show that we can solve a problem of
1250fixed
size faster and faster
if we take more and more processors, the more
1251interesting question may be how big problems can become so that they can still
1252be solved within a reasonable time on a machine of a particular
size. We show
1253this in the following two graphs
for 256 and 4096 processors:
1258 <img src=
"https://www.dealii.org/images/steps/developer/step-40.256.png" alt=
"">
1261 <img src=
"https://www.dealii.org/images/steps/developer/step-40.4096.png" alt=
"">
1266What these graphs show is that all parts of the program
scale linearly with
1267the number of degrees of freedom. This time, lines are wobbly at the left as
1268the
size of local problems is too small. For more discussions of these results
1269we refer to the @ref distributed_paper
"Distributed Computing paper".
1271So how large are the largest problems one can solve? At the time of writing
1273limiting factor is that the program uses the BoomerAMG algebraic
1274multigrid method from the <a
1275href=
"http://acts.nersc.gov/hypre/" target=
"_top">Hypre package</a> as
1276a preconditioner, which unfortunately uses signed 32-bit integers to
1278problems to @f$2^{31}-1=2,147,483,647@f$ degrees of freedom. From the graphs
1279above it is obvious that the scalability would extend beyond
this
1280number, and one could expect that given more than the 4,096 machines
1281shown above would also further
reduce the compute time. That said, one
1282can certainly expect that
this limit will eventually be lifted by the
1285On the other hand,
this does not
mean that deal.II cannot solve bigger
1286problems. Indeed, @ref step_37
"step-37" shows how one can solve problems that are not
1287just a little, but very substantially larger than anything we have shown
1292<a name=
"step-40-extensions"></a>
1293<a name=
"step_40-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1296In a sense,
this program is the ultimate solver
for the Laplace
1297equation: it can essentially solve the equation to whatever accuracy
1298you want,
if only you have enough processors available. Since the
1299Laplace equation by itself is not terribly interesting at
this level
1300of accuracy, the more interesting possibilities
for extension
1301therefore concern not so much
this program but what comes beyond
1302it. For example, several of the other programs in
this tutorial have
1303significant
run times, especially in 3
d. It would therefore be
1304interesting to use the techniques explained here to extend other
1305programs to support
parallel distributed computations. We have done
1306this for @ref step_31
"step-31" in the @ref step_32
"step-32" tutorial program, but the same would
1307apply to,
for example, @ref step_23
"step-23" and @ref step_25
"step-25" for hyperbolic time
1308dependent problems, @ref step_33
"step-33" for gas dynamics, or @ref step_35
"step-35" for the
1309Navier-Stokes equations.
1311Maybe equally interesting is the problem of postprocessing. As
1312mentioned above, we only show pictures of the solution and the mesh
1313for 16 processors because 4,096 processors solving 1 billion unknowns
1314would produce graphical output on the order of several 10
1315gigabyte. Currently, no program is able to visualize
this amount of
1316data in any reasonable way unless it also runs on at least several
1317hundred processors. There are, however, approaches where visualization
1318programs directly communicate with solvers on each processor with each
1319visualization process rendering the part of the scene computed by the
1320solver on
this processor. Implementing such an
interface would allow
1321to quickly visualize things that are otherwise not amenable to
1325<a name=
"step_40-PlainProg"></a>
1326<h1> The plain program</h1>
1327@include
"step-40.cc"
~MPI_InitFinalize()=default
std::vector< index_type > data
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code * * MPI_Finalize()