860 * LA::MPI::PreconditionAMG preconditioner;
861 * preconditioner.initialize(system_matrix, data);
863 * solver.solve(system_matrix,
864 * completely_distributed_solution,
868 * pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
871 * constraints.distribute(completely_distributed_solution);
873 * locally_relevant_solution = completely_distributed_solution;
881 * <a name=
"step_40-LaplaceProblemrefine_grid"></a>
882 * <h4>LaplaceProblem::refine_grid</h4>
886 * The
function that estimates the error and refines the grid is again
887 * almost exactly like the one in @ref step_6
"step-6". The only difference is that the
888 *
function that flags cells to be refined is now in
namespace
890 * that can communicate between all involved processors and determine global
891 * thresholds to use in deciding which cells to
refine and which to
coarsen.
895 * Note that we didn
't have to do anything special about the
896 * KellyErrorEstimator class: we just give it a vector with as many elements
897 * as the local triangulation has cells (locally owned cells, ghost cells,
898 * and artificial ones), but it only fills those entries that correspond to
899 * cells that are locally owned.
903 * void LaplaceProblem<dim>::refine_grid()
905 * TimerOutput::Scope t(computing_timer, "refine");
907 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
908 * KellyErrorEstimator<dim>::estimate(
910 * QGauss<dim - 1>(fe.degree + 1),
911 * std::map<types::boundary_id, const Function<dim> *>(),
912 * locally_relevant_solution,
913 * estimated_error_per_cell);
914 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
915 * triangulation, estimated_error_per_cell, 0.3, 0.03);
916 * triangulation.execute_coarsening_and_refinement();
924 * <a name="step_40-LaplaceProblemoutput_results"></a>
925 * <h4>LaplaceProblem::output_results</h4>
929 * Compared to the corresponding function in @ref step_6 "step-6", the one here is
930 * a tad more complicated. There are two reasons: the first one is
931 * that we do not just want to output the solution but also for each
932 * cell which processor owns it (i.e. which "subdomain" it is
933 * in). Secondly, as discussed at length in @ref step_17 "step-17" and @ref step_18 "step-18",
934 * generating graphical data can be a bottleneck in
935 * parallelizing. In those two programs, we simply generate one
936 * output file per process. That worked because the
937 * parallel::shared::Triangulation cannot be used with large numbers
938 * of MPI processes anyway. But this doesn't
scale: Creating a
939 * single file per processor will overwhelm the filesystem with a
940 * large number of processors.
944 * We here follow a more sophisticated approach that uses
945 * high-performance,
parallel IO routines
using MPI I/O to write to
946 * a small, fixed number of visualization files (here 8). We also
947 * generate a .pvtu record referencing these .vtu files, which can
948 * be opened directly in visualizatin tools like ParaView and VisIt.
952 * To start, the top of the
function looks like
it usually does. In addition
953 * to attaching the solution vector (the one that has entries
for all locally
954 * relevant, not only the locally owned, elements), we attach a data vector
955 * that stores,
for each cell, the subdomain the cell belongs to. This is
956 * slightly tricky, because of course not every processor knows about every
957 * cell. The vector we attach therefore has an entry
for every cell that the
958 * current processor has in its
mesh (locally owned ones, ghost cells, and
959 * artificial cells), but the
DataOut class will ignore all entries that
960 * correspond to cells that are not owned by the current processor. As a
961 * consequence,
it doesn
't actually matter what values we write into these
962 * vector entries: we simply fill the entire vector with the number of the
963 * current MPI process (i.e. the subdomain_id of the current process); this
964 * correctly sets the values we care for, i.e. the entries that correspond
965 * to locally owned cells, while providing the wrong value for all other
966 * elements -- but these are then ignored anyway.
970 * void LaplaceProblem<dim>::output_results(const unsigned int cycle)
972 * TimerOutput::Scope t(computing_timer, "output");
974 * DataOut<dim> data_out;
975 * data_out.attach_dof_handler(dof_handler);
976 * data_out.add_data_vector(locally_relevant_solution, "u");
978 * Vector<float> subdomain(triangulation.n_active_cells());
979 * for (unsigned int i = 0; i < subdomain.size(); ++i)
980 * subdomain(i) = triangulation.locally_owned_subdomain();
981 * data_out.add_data_vector(subdomain, "subdomain");
983 * data_out.build_patches();
987 * The final step is to write this data to disk. We write up to 8 VTU files
988 * in parallel with the help of MPI-IO. Additionally a PVTU record is
989 * generated, which groups the written VTU files.
992 * data_out.write_vtu_with_pvtu_record(
993 * "./", "solution", cycle, mpi_communicator, 2, 8);
1001 * <a name="step_40-LaplaceProblemrun"></a>
1002 * <h4>LaplaceProblem::run</h4>
1006 * The function that controls the overall behavior of the program is again
1007 * like the one in @ref step_6 "step-6". The minor difference are the use of
1008 * <code>pcout</code> instead of <code>std::cout</code> for output to the
1009 * console (see also @ref step_17 "step-17").
1013 * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
1014 * we start with a slightly finer mesh (5 global refinement cycles) -- there
1015 * just isn't much of a
point showing a massively %
parallel program starting
1016 * on 4 cells (although admittedly the point is only slightly stronger
1017 * starting on 1024).
1020 *
template <
int dim>
1021 *
void LaplaceProblem<dim>::run()
1023 * pcout <<
"Running with "
1024 * #ifdef USE_PETSC_LA
1030 * <<
" MPI rank(s)..." << std::endl;
1032 *
const unsigned int n_cycles = 8;
1033 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1035 * pcout <<
"Cycle " << cycle <<
':' << std::endl;
1046 * assemble_system();
1048 * output_results(cycle);
1050 * computing_timer.print_summary();
1051 * computing_timer.reset();
1053 * pcout << std::endl;
1063 * <a name=
"step_40-main"></a>
1068 * The
final function, <code>main()</code>, again has the same structure as in
1069 * all other programs, in particular @ref step_6
"step-6". Like the other programs that use
1070 *
MPI, we have to initialize and finalize
MPI, which is done
using the helper
1072 * initializes libraries that depend on
MPI, such as p4est,
PETSc,
SLEPc, and
1073 *
Zoltan (though the last two are not used in
this tutorial). The order here
1074 * is important: we cannot use any of these libraries until they are
1075 * initialized, so
it does not make sense to
do anything before creating an
1080 * After the solver finishes, the LaplaceProblem destructor will
run followed
1083 * <code>PetscFinalize</code> (and finalization functions for other
1084 * libraries), which will delete any in-use
PETSc objects. This must be done
1085 * after we destruct the Laplace solver to avoid
double deletion
1086 * errors. Fortunately, due to the order of destructor call rules of C++, we
1087 * do not need to worry about any of this: everything happens in the correct
1088 * order (i.e., the reverse of the order of construction). The last function
1089 * called by
Utilities::
MPI::MPI_InitFinalize::~MPI_InitFinalize() is
1090 * <code>
MPI_Finalize</code>: i.e., once this
object is destructed the program
1091 * should exit since
MPI will no longer be available.
1094 *
int main(
int argc,
char *argv[])
1098 *
using namespace dealii;
1099 *
using namespace Step40;
1103 * LaplaceProblem<2> laplace_problem_2d;
1104 * laplace_problem_2d.run();
1106 *
catch (std::exception &exc)
1108 * std::cerr << std::endl
1110 * <<
"----------------------------------------------------"
1112 * std::cerr <<
"Exception on processing: " << std::endl
1113 * << exc.what() << std::endl
1114 * <<
"Aborting!" << std::endl
1115 * <<
"----------------------------------------------------"
1122 * std::cerr << std::endl
1124 * <<
"----------------------------------------------------"
1126 * std::cerr <<
"Unknown exception!" << std::endl
1127 * <<
"Aborting!" << std::endl
1128 * <<
"----------------------------------------------------"
1136<a name=
"step_40-Results"></a><h1>Results</h1>
1139When you
run the program, on a single processor or with your local
MPI
1140installation on a few, you should get output like
this:
1143 Number of active cells: 1024
1144 Number of degrees of freedom: 4225
1145 Solved in 10 iterations.
1148+---------------------------------------------+------------+------------+
1149| Total wallclock time elapsed since start | 0.176s | |
1151| Section | no. calls | wall time | % of total |
1152+---------------------------------+-----------+------------+------------+
1153| assembly | 1 | 0.0209s | 12% |
1154| output | 1 | 0.0189s | 11% |
1155| setup | 1 | 0.0299s | 17% |
1156| solve | 1 | 0.0419s | 24% |
1157+---------------------------------+-----------+------------+------------+
1161 Number of active cells: 1954
1162 Number of degrees of freedom: 8399
1163 Solved in 10 iterations.
1166+---------------------------------------------+------------+------------+
1167| Total wallclock time elapsed since start | 0.327s | |
1169| Section | no. calls | wall time | % of total |
1170+---------------------------------+-----------+------------+------------+
1171| assembly | 1 | 0.0368s | 11% |
1172| output | 1 | 0.0208s | 6.4% |
1173|
refine | 1 | 0.157s | 48% |
1174| setup | 1 | 0.0452s | 14% |
1175| solve | 1 | 0.0668s | 20% |
1176+---------------------------------+-----------+------------+------------+
1180 Number of active cells: 3664
1181 Number of degrees of freedom: 16183
1182 Solved in 11 iterations.
1187The exact
numbers differ, depending on how many processors we use;
1188this is due to the fact that the preconditioner depends on the
1189partitioning of the problem, the solution then differs in the last few
1190digits, and consequently the
mesh refinement differs slightly.
1191The primary thing to notice here, though, is that the number of
1192iterations does not increase with the size of the problem. This
1193guarantees that we can efficiently solve even the largest problems.
1195When
run on a sufficiently large number of machines (say a few
1196thousand),
this program can relatively easily solve problems with well
1197over one billion unknowns in less than a minute. On the other hand,
1198such big problems can no longer be visualized, so we also ran the
1199program on only 16 processors. Here are a
mesh, along with its
1200partitioning onto the 16 processors, and the corresponding solution:
1205 <img src=
"https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt=
"">
1208 <img src=
"https://www.dealii.org/images/steps/developer/step-40.solution.png" alt=
"">
1213The
mesh on the left has a mere 7,069 cells. This is of course a
1214problem we would easily have been able to solve already on a single
1215processor
using @ref step_6
"step-6", but the
point of the program was to show how
1216to write a program that scales to many more machines. For example,
1217here are two graphs that show how the
run time of a large number of parts
1218of the program scales on problems with around 52 and 375 million degrees of
1219freedom
if we take more and more processors (these and the next couple of
1220graphs are taken from an earlier version of the
1221@ref distributed_paper
"Distributed Computing paper"; updated graphs showing
1222data of runs on even larger
numbers of processors, and a lot
1223more interpretation can be found in the
final version of the paper):
1228 <img src=
"https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt=
"">
1231 <img src=
"https://www.dealii.org/images/steps/developer/step-40.strong.png" alt=
"">
1236As can clearly be seen, the program scales nicely to very large
1238(For a discussion of what we consider
"scalable" programs, see
1239@ref GlossParallelScaling
"this glossary entry".)
1240The curves, in particular the linear solver, become a
1241bit wobbly at the right
end of the graphs since each processor has too little
1242to do to offset the cost of communication (the part of the whole problem each
1243processor has to solve in the above two examples is only 13,000 and 90,000
1244degrees of freedom when 4,096 processors are
used; a good rule of thumb is that
1245parallel programs work well
if each processor has at least 100,000 unknowns).
1247While the strong scaling graphs above show that we can solve a problem of
1248fixed size faster and faster
if we take more and more processors, the more
1249interesting question may be how big problems can become so that they can still
1250be solved within a reasonable time on a machine of a particular size. We show
1251this in the following two graphs
for 256 and 4096 processors:
1256 <img src=
"https://www.dealii.org/images/steps/developer/step-40.256.png" alt=
"">
1259 <img src=
"https://www.dealii.org/images/steps/developer/step-40.4096.png" alt=
"">
1264What these graphs show is that all parts of the program
scale linearly with
1265the number of degrees of freedom. This time, lines are wobbly at the left as
1266the size of local problems is too small. For more discussions of these results
1267we refer to the @ref distributed_paper
"Distributed Computing paper".
1269So how large are the largest problems one can solve? At the time of writing
1271limiting factor is that the program uses the BoomerAMG algebraic
1272multigrid method from the <a
1273href=
"http://acts.nersc.gov/hypre/" target=
"_top">Hypre package</a> as
1274a preconditioner, which unfortunately uses signed 32-bit integers to
1275index the elements of a %distributed
matrix. This limits the size of
1276problems to @f$2^{31}-1=2,147,483,647@f$ degrees of freedom. From the graphs
1277above
it is obvious that the scalability would extend beyond
this
1278number, and one could expect that given more than the 4,096 machines
1279shown above would also further
reduce the compute time. That said, one
1280can certainly expect that
this limit will eventually be lifted by the
1283On the other hand,
this does not
mean that deal.II cannot solve bigger
1284problems. Indeed, @ref step_37
"step-37" shows how one can solve problems that are not
1285just a little, but very substantially larger than anything we have shown
1290<a name=
"step-40-extensions"></a>
1291<a name=
"step_40-Possibilitiesforextensions"></a><h3>Possibilities
for extensions</h3>
1294In a sense,
this program is the ultimate solver
for the Laplace
1295equation:
it can essentially solve the equation to whatever accuracy
1296you want,
if only you have enough processors available. Since the
1297Laplace equation by itself is not terribly interesting at
this level
1298of accuracy, the more interesting possibilities
for extension
1299therefore concern not so much
this program but what comes beyond
1300it. For example, several of the other programs in
this tutorial have
1301significant
run times, especially in 3
d. It would therefore be
1302interesting to use the techniques explained here to extend other
1303programs to support
parallel distributed computations. We have done
1304this for @ref step_31
"step-31" in the @ref step_32
"step-32" tutorial program, but the same would
1305apply to,
for example, @ref step_23
"step-23" and @ref step_25
"step-25" for hyperbolic time
1306dependent problems, @ref step_33
"step-33" for gas dynamics, or @ref step_35
"step-35" for the
1307Navier-Stokes equations.
1309Maybe equally interesting is the problem of postprocessing. As
1310mentioned above, we only show pictures of the solution and the
mesh
1311for 16 processors because 4,096 processors solving 1 billion unknowns
1312would produce graphical output on the order of several 10
1313gigabyte. Currently, no program is able to visualize
this amount of
1314data in any reasonable way unless
it also runs on at least several
1315hundred processors. There are, however, approaches where visualization
1316programs directly communicate with solvers on each processor with each
1317visualization process rendering the part of the scene computed by the
1318solver on
this processor. Implementing such an
interface would allow
1319to quickly visualize things that are otherwise not amenable to
1323<a name=
"step_40-PlainProg"></a>
1324<h1> The plain program</h1>
1325@include
"step-40.cc"
~MPI_InitFinalize()=default
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
const Iterator const std_cxx20::type_identity_t< Iterator > & end
const InputIterator OutputIterator const Function & function
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code * * MPI_Finalize()