Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
step-40.h
Go to the documentation of this file.
1 
754  * #endif
755  * preconditioner.initialize(system_matrix, data);
756  *
757  * solver.solve(system_matrix,
758  * completely_distributed_solution,
759  * system_rhs,
760  * preconditioner);
761  *
762  * pcout << " Solved in " << solver_control.last_step() << " iterations."
763  * << std::endl;
764  *
765  * constraints.distribute(completely_distributed_solution);
766  *
767  * locally_relevant_solution = completely_distributed_solution;
768  * }
769  *
770  *
771  *
772  * @endcode
773  *
774  *
775  * <a name="LaplaceProblemrefine_grid"></a>
776  * <h4>LaplaceProblem::refine_grid</h4>
777  *
778 
779  *
780  * The function that estimates the error and refines the grid is again
781  * almost exactly like the one in @ref step_6 "step-6". The only difference is that the
782  * function that flags cells to be refined is now in namespace
783  * parallel::distributed::GridRefinement -- a namespace that has functions
784  * that can communicate between all involved processors and determine global
785  * thresholds to use in deciding which cells to refine and which to coarsen.
786  *
787 
788  *
789  * Note that we didn't have to do anything special about the
790  * KellyErrorEstimator class: we just give it a vector with as many elements
791  * as the local triangulation has cells (locally owned cells, ghost cells,
792  * and artificial ones), but it only fills those entries that correspond to
793  * cells that are locally owned.
794  *
795  * @code
796  * template <int dim>
797  * void LaplaceProblem<dim>::refine_grid()
798  * {
799  * TimerOutput::Scope t(computing_timer, "refine");
800  *
801  * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
802  * KellyErrorEstimator<dim>::estimate(
803  * dof_handler,
804  * QGauss<dim - 1>(fe.degree + 1),
805  * std::map<types::boundary_id, const Function<dim> *>(),
806  * locally_relevant_solution,
807  * estimated_error_per_cell);
808  * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
809  * triangulation, estimated_error_per_cell, 0.3, 0.03);
810  * triangulation.execute_coarsening_and_refinement();
811  * }
812  *
813  *
814  *
815  * @endcode
816  *
817  *
818  * <a name="LaplaceProblemoutput_results"></a>
819  * <h4>LaplaceProblem::output_results</h4>
820  *
821 
822  *
823  * Compared to the corresponding function in @ref step_6 "step-6", the one here is a tad
824  * more complicated. There are two reasons: the first one is that we do not
825  * just want to output the solution but also for each cell which processor
826  * owns it (i.e. which "subdomain" it is in). Secondly, as discussed at
827  * length in @ref step_17 "step-17" and @ref step_18 "step-18", generating graphical data can be a
828  * bottleneck in parallelizing. In @ref step_18 "step-18", we have moved this step out of
829  * the actual computation but shifted it into a separate program that later
830  * combined the output from various processors into a single file. But this
831  * doesn't scale: if the number of processors is large, this may mean that
832  * the step of combining data on a single processor later becomes the
833  * longest running part of the program, or it may produce a file that's so
834  * large that it can't be visualized any more. We here follow a more
835  * sensible approach, namely creating individual files for each MPI process
836  * and leaving it to the visualization program to make sense of that.
837  *
838 
839  *
840  * To start, the top of the function looks like it usually does. In addition
841  * to attaching the solution vector (the one that has entries for all locally
842  * relevant, not only the locally owned, elements), we attach a data vector
843  * that stores, for each cell, the subdomain the cell belongs to. This is
844  * slightly tricky, because of course not every processor knows about every
845  * cell. The vector we attach therefore has an entry for every cell that the
846  * current processor has in its mesh (locally owned ones, ghost cells, and
847  * artificial cells), but the DataOut class will ignore all entries that
848  * correspond to cells that are not owned by the current processor. As a
849  * consequence, it doesn't actually matter what values we write into these
850  * vector entries: we simply fill the entire vector with the number of the
851  * current MPI process (i.e. the subdomain_id of the current process); this
852  * correctly sets the values we care for, i.e. the entries that correspond
853  * to locally owned cells, while providing the wrong value for all other
854  * elements -- but these are then ignored anyway.
855  *
856  * @code
857  * template <int dim>
858  * void LaplaceProblem<dim>::output_results(const unsigned int cycle) const
859  * {
860  * DataOut<dim> data_out;
861  * data_out.attach_dof_handler(dof_handler);
862  * data_out.add_data_vector(locally_relevant_solution, "u");
863  *
864  * Vector<float> subdomain(triangulation.n_active_cells());
865  * for (unsigned int i = 0; i < subdomain.size(); ++i)
866  * subdomain(i) = triangulation.locally_owned_subdomain();
867  * data_out.add_data_vector(subdomain, "subdomain");
868  *
869  * data_out.build_patches();
870  *
871  * @endcode
872  *
873  * The next step is to write this data to disk. We write up to 8 VTU files
874  * in parallel with the help of MPI-IO. Additionally a PVTU record is
875  * generated, which groups the written VTU files.
876  *
877  * @code
878  * data_out.write_vtu_with_pvtu_record(
879  * "./", "solution", cycle, mpi_communicator, 2, 8);
880  * }
881  *
882  *
883  *
884  * @endcode
885  *
886  *
887  * <a name="LaplaceProblemrun"></a>
888  * <h4>LaplaceProblem::run</h4>
889  *
890 
891  *
892  * The function that controls the overall behavior of the program is again
893  * like the one in @ref step_6 "step-6". The minor difference are the use of
894  * <code>pcout</code> instead of <code>std::cout</code> for output to the
895  * console (see also @ref step_17 "step-17") and that we only generate graphical output if
896  * at most 32 processors are involved. Without this limit, it would be just
897  * too easy for people carelessly running this program without reading it
898  * first to bring down the cluster interconnect and fill any file system
899  * available :-)
900  *
901 
902  *
903  * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
904  * we start with a slightly finer mesh (5 global refinement cycles) -- there
905  * just isn't much of a point showing a massively %parallel program starting
906  * on 4 cells (although admittedly the point is only slightly stronger
907  * starting on 1024).
908  *
909  * @code
910  * template <int dim>
911  * void LaplaceProblem<dim>::run()
912  * {
913  * pcout << "Running with "
914  * #ifdef USE_PETSC_LA
915  * << "PETSc"
916  * #else
917  * << "Trilinos"
918  * #endif
919  * << " on " << Utilities::MPI::n_mpi_processes(mpi_communicator)
920  * << " MPI rank(s)..." << std::endl;
921  *
922  * const unsigned int n_cycles = 8;
923  * for (unsigned int cycle = 0; cycle < n_cycles; ++cycle)
924  * {
925  * pcout << "Cycle " << cycle << ':' << std::endl;
926  *
927  * if (cycle == 0)
928  * {
930  * triangulation.refine_global(5);
931  * }
932  * else
933  * refine_grid();
934  *
935  * setup_system();
936  *
937  * pcout << " Number of active cells: "
938  * << triangulation.n_global_active_cells() << std::endl
939  * << " Number of degrees of freedom: " << dof_handler.n_dofs()
940  * << std::endl;
941  *
942  * assemble_system();
943  * solve();
944  *
945  * if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
946  * {
947  * TimerOutput::Scope t(computing_timer, "output");
948  * output_results(cycle);
949  * }
950  *
951  * computing_timer.print_summary();
952  * computing_timer.reset();
953  *
954  * pcout << std::endl;
955  * }
956  * }
957  * } // namespace Step40
958  *
959  *
960  *
961  * @endcode
962  *
963  *
964  * <a name="main"></a>
965  * <h4>main()</h4>
966  *
967 
968  *
969  * The final function, <code>main()</code>, again has the same structure as in
970  * all other programs, in particular @ref step_6 "step-6". Like the other programs that use
971  * MPI, we have to initialize and finalize MPI, which is done using the helper
972  * object Utilities::MPI::MPI_InitFinalize. The constructor of that class also
973  * initializes libraries that depend on MPI, such as p4est, PETSc, SLEPc, and
974  * Zoltan (though the last two are not used in this tutorial). The order here
975  * is important: we cannot use any of these libraries until they are
976  * initialized, so it does not make sense to do anything before creating an
977  * instance of Utilities::MPI::MPI_InitFinalize.
978  *
979 
980  *
981  * After the solver finishes, the LaplaceProblem destructor will run followed
984  * <code>PetscFinalize</code> (and finalization functions for other
985  * libraries), which will delete any in-use PETSc objects. This must be done
986  * after we destruct the Laplace solver to avoid double deletion
987  * errors. Fortunately, due to the order of destructor call rules of C++, we
988  * do not need to worry about any of this: everything happens in the correct
989  * order (i.e., the reverse of the order of construction). The last function
990  * called by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() is
991  * <code>MPI_Finalize</code>: i.e., once this object is destructed the program
992  * should exit since MPI will no longer be available.
993  *
994  * @code
995  * int main(int argc, char *argv[])
996  * {
997  * try
998  * {
999  * using namespace dealii;
1000  * using namespace Step40;
1001  *
1002  * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1003  *
1004  * LaplaceProblem<2> laplace_problem_2d;
1005  * laplace_problem_2d.run();
1006  * }
1007  * catch (std::exception &exc)
1008  * {
1009  * std::cerr << std::endl
1010  * << std::endl
1011  * << "----------------------------------------------------"
1012  * << std::endl;
1013  * std::cerr << "Exception on processing: " << std::endl
1014  * << exc.what() << std::endl
1015  * << "Aborting!" << std::endl
1016  * << "----------------------------------------------------"
1017  * << std::endl;
1018  *
1019  * return 1;
1020  * }
1021  * catch (...)
1022  * {
1023  * std::cerr << std::endl
1024  * << std::endl
1025  * << "----------------------------------------------------"
1026  * << std::endl;
1027  * std::cerr << "Unknown exception!" << std::endl
1028  * << "Aborting!" << std::endl
1029  * << "----------------------------------------------------"
1030  * << std::endl;
1031  * return 1;
1032  * }
1033  *
1034  * return 0;
1035  * }
1036  * @endcode
1037 <a name="Results"></a><h1>Results</h1>
1038 
1039 
1040 When you run the program, on a single processor or with your local MPI
1041 installation on a few, you should get output like this:
1042 @code
1043 Cycle 0:
1044  Number of active cells: 1024
1045  Number of degrees of freedom: 4225
1046  Solved in 10 iterations.
1047 
1048 
1049 +---------------------------------------------+------------+------------+
1050 | Total wallclock time elapsed since start | 0.176s | |
1051 | | | |
1052 | Section | no. calls | wall time | % of total |
1053 +---------------------------------+-----------+------------+------------+
1054 | assembly | 1 | 0.0209s | 12% |
1055 | output | 1 | 0.0189s | 11% |
1056 | setup | 1 | 0.0299s | 17% |
1057 | solve | 1 | 0.0419s | 24% |
1058 +---------------------------------+-----------+------------+------------+
1059 
1060 
1061 Cycle 1:
1062  Number of active cells: 1954
1063  Number of degrees of freedom: 8399
1064  Solved in 10 iterations.
1065 
1066 
1067 +---------------------------------------------+------------+------------+
1068 | Total wallclock time elapsed since start | 0.327s | |
1069 | | | |
1070 | Section | no. calls | wall time | % of total |
1071 +---------------------------------+-----------+------------+------------+
1072 | assembly | 1 | 0.0368s | 11% |
1073 | output | 1 | 0.0208s | 6.4% |
1074 | refine | 1 | 0.157s | 48% |
1075 | setup | 1 | 0.0452s | 14% |
1076 | solve | 1 | 0.0668s | 20% |
1077 +---------------------------------+-----------+------------+------------+
1078 
1079 
1080 Cycle 2:
1081  Number of active cells: 3664
1082  Number of degrees of freedom: 16183
1083  Solved in 11 iterations.
1084 
1085 ...
1086 @endcode
1087 
1088 The exact numbers differ, depending on how many processors we use;
1089 this is due to the fact that the preconditioner depends on the
1090 partitioning of the problem, the solution then differs in the last few
1091 digits, and consequently the mesh refinement differs slightly.
1092 The primary thing to notice here, though, is that the number of
1093 iterations does not increase with the size of the problem. This
1094 guarantees that we can efficiently solve even the largest problems.
1095 
1096 When run on a sufficiently large number of machines (say a few
1097 thousand), this program can relatively easily solve problems with well
1098 over one billion unknowns in less than a minute. On the other hand,
1099 such big problems can no longer be visualized, so we also ran the
1100 program on only 16 processors. Here are a mesh, along with its
1101 partitioning onto the 16 processors, and the corresponding solution:
1102 
1103 <table width="100%">
1104 <tr>
1105 <td>
1106  <img src="https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt="">
1107 </td>
1108 <td>
1109  <img src="https://www.dealii.org/images/steps/developer/step-40.solution.png" alt="">
1110 </td>
1111 </tr>
1112 </table>
1113 
1114 The mesh on the left has a mere 7,069 cells. This is of course a
1115 problem we would easily have been able to solve already on a single
1116 processor using @ref step_6 "step-6", but the point of the program was to show how
1117 to write a program that scales to many more machines. For example,
1118 here are two graphs that show how the run time of a large number of parts
1119 of the program scales on problems with around 52 and 375 million degrees of
1120 freedom if we take more and more processors (these and the next couple of
1121 graphs are taken from an earlier version of the
1122 @ref distributed_paper "Distributed Computing paper"; updated graphs showing
1123 data of runs on even larger numbers of processors, and a lot
1124 more interpretation can be found in the final version of the paper):
1125 
1126 <table width="100%">
1127 <tr>
1128 <td>
1129  <img src="https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt="">
1130 </td>
1131 <td>
1132  <img src="https://www.dealii.org/images/steps/developer/step-40.strong.png" alt="">
1133 </td>
1134 </tr>
1135 </table>
1136 
1137 As can clearly be seen, the program scales nicely to very large
1138 numbers of processors.
1139 (For a discussion of what we consider "scalable" programs, see
1140 @ref GlossParallelScaling "this glossary entry".)
1141 The curves, in particular the linear solver, become a
1142 bit wobbly at the right end of the graphs since each processor has too little
1143 to do to offset the cost of communication (the part of the whole problem each
1144 processor has to solve in the above two examples is only 13,000 and 90,000
1145 degrees of freedom when 4,096 processors are used; a good rule of thumb is that
1146 parallel programs work well if each processor has at least 100,000 unknowns).
1147 
1148 While the strong scaling graphs above show that we can solve a problem of
1149 fixed size faster and faster if we take more and more processors, the more
1150 interesting question may be how big problems can become so that they can still
1151 be solved within a reasonable time on a machine of a particular size. We show
1152 this in the following two graphs for 256 and 4096 processors:
1153 
1154 <table width="100%">
1155 <tr>
1156 <td>
1157  <img src="https://www.dealii.org/images/steps/developer/step-40.256.png" alt="">
1158 </td>
1159 <td>
1160  <img src="https://www.dealii.org/images/steps/developer/step-40.4096.png" alt="">
1161 </td>
1162 </tr>
1163 </table>
1164 
1165 What these graphs show is that all parts of the program scale linearly with
1166 the number of degrees of freedom. This time, lines are wobbly at the left as
1167 the size of local problems is too small. For more discussions of these results
1168 we refer to the @ref distributed_paper "Distributed Computing paper".
1169 
1170 So how large are the largest problems one can solve? At the time of writing
1171 this problem, the
1172 limiting factor is that the program uses the BoomerAMG algebraic
1173 multigrid method from the <a
1174 href="http://acts.nersc.gov/hypre/" target="_top">Hypre package</a> as
1175 a preconditioner, which unfortunately uses signed 32-bit integers to
1176 index the elements of a %distributed matrix. This limits the size of
1177 problems to @f$2^{31}-1=2,147,483,647@f$ degrees of freedom. From the graphs
1178 above it is obvious that the scalability would extend beyond this
1179 number, and one could expect that given more than the 4,096 machines
1180 shown above would also further reduce the compute time. That said, one
1181 can certainly expect that this limit will eventually be lifted by the
1182 hypre developers.
1183 
1184 On the other hand, this does not mean that deal.II cannot solve bigger
1185 problems. Indeed, @ref step_37 "step-37" shows how one can solve problems that are not
1186 just a little, but very substantially larger than anything we have shown
1187 here.
1188 
1189 
1190 
1191 <a name="extensions"></a>
1192 <a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1193 
1194 
1195 In a sense, this program is the ultimate solver for the Laplace
1196 equation: it can essentially solve the equation to whatever accuracy
1197 you want, if only you have enough processors available. Since the
1198 Laplace equation by itself is not terribly interesting at this level
1199 of accuracy, the more interesting possibilities for extension
1200 therefore concern not so much this program but what comes beyond
1201 it. For example, several of the other programs in this tutorial have
1202 significant run times, especially in 3d. It would therefore be
1203 interesting to use the techniques explained here to extend other
1204 programs to support parallel distributed computations. We have done
1205 this for @ref step_31 "step-31" in the @ref step_32 "step-32" tutorial program, but the same would
1206 apply to, for example, @ref step_23 "step-23" and @ref step_25 "step-25" for hyperbolic time
1207 dependent problems, @ref step_33 "step-33" for gas dynamics, or @ref step_35 "step-35" for the
1208 Navier-Stokes equations.
1209 
1210 Maybe equally interesting is the problem of postprocessing. As
1211 mentioned above, we only show pictures of the solution and the mesh
1212 for 16 processors because 4,096 processors solving 1 billion unknowns
1213 would produce graphical output on the order of several 10
1214 gigabyte. Currently, no program is able to visualize this amount of
1215 data in any reasonable way unless it also runs on at least several
1216 hundred processors. There are, however, approaches where visualization
1217 programs directly communicate with solvers on each processor with each
1218 visualization process rendering the part of the scene computed by the
1219 solver on this processor. Implementing such an interface would allow
1220 to quickly visualize things that are otherwise not amenable to
1221 graphical display.
1222  *
1223  *
1224 <a name="PlainProg"></a>
1225 <h1> The plain program</h1>
1226 @include "step-40.cc"
1227 */
TimerOutput::Scope
Definition: timer.h:554
dealii
Definition: namespace_dealii.h:25
Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize
~MPI_InitFinalize()
Definition: mpi.cc:927
Physics::Elasticity::Kinematics::C
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
Physics::Elasticity::Kinematics::e
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
Physics::Elasticity::Kinematics::d
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
internal::p4est::functions
int(&) functions(const void *v1, const void *v2)
Definition: p4est_wrappers.cc:339
GridRefinement::coarsen
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
Definition: grid_refinement.cc:88
OpenCASCADE::point
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
WorkStream::run
void run(const std::vector< std::vector< Iterator >> &colored_iterators, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length=2 *MultithreadInfo::n_threads(), const unsigned int chunk_size=8)
Definition: work_stream.h:1185
level
unsigned int level
Definition: grid_out.cc:4355
LAPACKSupport::one
static const types::blas_int one
Definition: lapack_support.h:183
GridTools::scale
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:837
LAPACKSupport::matrix
@ matrix
Contents is actually a matrix.
Definition: lapack_support.h:60
VectorTools::mean
@ mean
Definition: vector_tools_common.h:79
std_cxx17::apply
auto apply(F &&fn, Tuple &&t) -> decltype(apply_impl(std::forward< F >(fn), std::forward< Tuple >(t), std_cxx14::make_index_sequence< std::tuple_size< typename std::remove_reference< Tuple >::type >::value >()))
Definition: tuple.h:40
Threads::internal::call
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
Definition: thread_management.h:607
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
numbers
Definition: numbers.h:207
GridGenerator::hyper_cube
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
parallel::distributed::GridRefinement
Definition: grid_refinement.h:105
triangulation
const typename ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
Definition: p4est_wrappers.cc:69
DataOut
Definition: data_out.h:148
GridRefinement::refine
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
Definition: grid_refinement.cc:41
parallel
Definition: distributed.h:416
Utilities
Definition: cuda.h:31
Utilities::MPI::MPI_InitFinalize
Definition: mpi.h:828