Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.4.1
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
step-40.h
Go to the documentation of this file.
1
764 * #endif
765 * preconditioner.initialize(system_matrix, data);
766 *
767 * solver.solve(system_matrix,
768 * completely_distributed_solution,
769 * system_rhs,
770 * preconditioner);
771 *
772 * pcout << " Solved in " << solver_control.last_step() << " iterations."
773 * << std::endl;
774 *
775 * constraints.distribute(completely_distributed_solution);
776 *
777 * locally_relevant_solution = completely_distributed_solution;
778 * }
779 *
780 *
781 *
782 * @endcode
783 *
784 *
785 * <a name="LaplaceProblemrefine_grid"></a>
786 * <h4>LaplaceProblem::refine_grid</h4>
787 *
788
789 *
790 * The function that estimates the error and refines the grid is again
791 * almost exactly like the one in @ref step_6 "step-6". The only difference is that the
792 * function that flags cells to be refined is now in namespace
793 * parallel::distributed::GridRefinement -- a namespace that has functions
794 * that can communicate between all involved processors and determine global
795 * thresholds to use in deciding which cells to refine and which to coarsen.
796 *
797
798 *
799 * Note that we didn't have to do anything special about the
800 * KellyErrorEstimator class: we just give it a vector with as many elements
801 * as the local triangulation has cells (locally owned cells, ghost cells,
802 * and artificial ones), but it only fills those entries that correspond to
803 * cells that are locally owned.
804 *
805 * @code
806 * template <int dim>
807 * void LaplaceProblem<dim>::refine_grid()
808 * {
809 * TimerOutput::Scope t(computing_timer, "refine");
810 *
811 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
812 * KellyErrorEstimator<dim>::estimate(
813 * dof_handler,
814 * QGauss<dim - 1>(fe.degree + 1),
815 * std::map<types::boundary_id, const Function<dim> *>(),
816 * locally_relevant_solution,
817 * estimated_error_per_cell);
818 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
819 * triangulation, estimated_error_per_cell, 0.3, 0.03);
820 * triangulation.execute_coarsening_and_refinement();
821 * }
822 *
823 *
824 *
825 * @endcode
826 *
827 *
828 * <a name="LaplaceProblemoutput_results"></a>
829 * <h4>LaplaceProblem::output_results</h4>
830 *
831
832 *
833 * Compared to the corresponding function in @ref step_6 "step-6", the one here is
834 * a tad more complicated. There are two reasons: the first one is
835 * that we do not just want to output the solution but also for each
836 * cell which processor owns it (i.e. which "subdomain" it is
837 * in). Secondly, as discussed at length in @ref step_17 "step-17" and @ref step_18 "step-18",
838 * generating graphical data can be a bottleneck in
839 * parallelizing. In those two programs, we simply generate one
840 * output file per process. That worked because the
841 * parallel::shared::Triangulation cannot be used with large numbers
842 * of MPI processes anyway. But this doesn't scale: Creating a
843 * single file per processor will overwhelm the filesystem with a
844 * large number of processors.
845 *
846
847 *
848 * We here follow a more sophisticated approach that uses
849 * high-performance, parallel IO routines using MPI I/O to write to
850 * a small, fixed number of visualization files (here 8). We also
851 * generate a .pvtu record referencing these .vtu files, which can
852 * be opened directly in visualizatin tools like ParaView and VisIt.
853 *
854
855 *
856 * To start, the top of the function looks like it usually does. In addition
857 * to attaching the solution vector (the one that has entries for all locally
858 * relevant, not only the locally owned, elements), we attach a data vector
859 * that stores, for each cell, the subdomain the cell belongs to. This is
860 * slightly tricky, because of course not every processor knows about every
861 * cell. The vector we attach therefore has an entry for every cell that the
862 * current processor has in its mesh (locally owned ones, ghost cells, and
863 * artificial cells), but the DataOut class will ignore all entries that
864 * correspond to cells that are not owned by the current processor. As a
865 * consequence, it doesn't actually matter what values we write into these
866 * vector entries: we simply fill the entire vector with the number of the
867 * current MPI process (i.e. the subdomain_id of the current process); this
868 * correctly sets the values we care for, i.e. the entries that correspond
869 * to locally owned cells, while providing the wrong value for all other
870 * elements -- but these are then ignored anyway.
871 *
872 * @code
873 * template <int dim>
874 * void LaplaceProblem<dim>::output_results(const unsigned int cycle) const
875 * {
876 * DataOut<dim> data_out;
877 * data_out.attach_dof_handler(dof_handler);
878 * data_out.add_data_vector(locally_relevant_solution, "u");
879 *
880 * Vector<float> subdomain(triangulation.n_active_cells());
881 * for (unsigned int i = 0; i < subdomain.size(); ++i)
882 * subdomain(i) = triangulation.locally_owned_subdomain();
883 * data_out.add_data_vector(subdomain, "subdomain");
884 *
885 * data_out.build_patches();
886 *
887 * @endcode
888 *
889 * The final step is to write this data to disk. We write up to 8 VTU files
890 * in parallel with the help of MPI-IO. Additionally a PVTU record is
891 * generated, which groups the written VTU files.
892 *
893 * @code
894 * data_out.write_vtu_with_pvtu_record(
895 * "./", "solution", cycle, mpi_communicator, 2, 8);
896 * }
897 *
898 *
899 *
900 * @endcode
901 *
902 *
903 * <a name="LaplaceProblemrun"></a>
904 * <h4>LaplaceProblem::run</h4>
905 *
906
907 *
908 * The function that controls the overall behavior of the program is again
909 * like the one in @ref step_6 "step-6". The minor difference are the use of
910 * <code>pcout</code> instead of <code>std::cout</code> for output to the
911 * console (see also @ref step_17 "step-17").
912 *
913
914 *
915 * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
916 * we start with a slightly finer mesh (5 global refinement cycles) -- there
917 * just isn't much of a point showing a massively %parallel program starting
918 * on 4 cells (although admittedly the point is only slightly stronger
919 * starting on 1024).
920 *
921 * @code
922 * template <int dim>
923 * void LaplaceProblem<dim>::run()
924 * {
925 * pcout << "Running with "
926 * #ifdef USE_PETSC_LA
927 * << "PETSc"
928 * #else
929 * << "Trilinos"
930 * #endif
931 * << " on " << Utilities::MPI::n_mpi_processes(mpi_communicator)
932 * << " MPI rank(s)..." << std::endl;
933 *
934 * const unsigned int n_cycles = 8;
935 * for (unsigned int cycle = 0; cycle < n_cycles; ++cycle)
936 * {
937 * pcout << "Cycle " << cycle << ':' << std::endl;
938 *
939 * if (cycle == 0)
940 * {
942 * triangulation.refine_global(5);
943 * }
944 * else
945 * refine_grid();
946 *
947 * setup_system();
948 *
949 * pcout << " Number of active cells: "
950 * << triangulation.n_global_active_cells() << std::endl
951 * << " Number of degrees of freedom: " << dof_handler.n_dofs()
952 * << std::endl;
953 *
954 * assemble_system();
955 * solve();
956 *
957 * {
958 * TimerOutput::Scope t(computing_timer, "output");
959 * output_results(cycle);
960 * }
961 *
962 * computing_timer.print_summary();
963 * computing_timer.reset();
964 *
965 * pcout << std::endl;
966 * }
967 * }
968 * } // namespace Step40
969 *
970 *
971 *
972 * @endcode
973 *
974 *
975 * <a name="main"></a>
976 * <h4>main()</h4>
977 *
978
979 *
980 * The final function, <code>main()</code>, again has the same structure as in
981 * all other programs, in particular @ref step_6 "step-6". Like the other programs that use
982 * MPI, we have to initialize and finalize MPI, which is done using the helper
983 * object Utilities::MPI::MPI_InitFinalize. The constructor of that class also
984 * initializes libraries that depend on MPI, such as p4est, PETSc, SLEPc, and
985 * Zoltan (though the last two are not used in this tutorial). The order here
986 * is important: we cannot use any of these libraries until they are
987 * initialized, so it does not make sense to do anything before creating an
989 *
990
991 *
992 * After the solver finishes, the LaplaceProblem destructor will run followed
995 * <code>PetscFinalize</code> (and finalization functions for other
996 * libraries), which will delete any in-use PETSc objects. This must be done
997 * after we destruct the Laplace solver to avoid double deletion
998 * errors. Fortunately, due to the order of destructor call rules of C++, we
999 * do not need to worry about any of this: everything happens in the correct
1000 * order (i.e., the reverse of the order of construction). The last function
1001 * called by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() is
1002 * <code>MPI_Finalize</code>: i.e., once this object is destructed the program
1003 * should exit since MPI will no longer be available.
1004 *
1005 * @code
1006 * int main(int argc, char *argv[])
1007 * {
1008 * try
1009 * {
1010 * using namespace dealii;
1011 * using namespace Step40;
1012 *
1013 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1014 *
1015 * LaplaceProblem<2> laplace_problem_2d;
1016 * laplace_problem_2d.run();
1017 * }
1018 * catch (std::exception &exc)
1019 * {
1020 * std::cerr << std::endl
1021 * << std::endl
1022 * << "----------------------------------------------------"
1023 * << std::endl;
1024 * std::cerr << "Exception on processing: " << std::endl
1025 * << exc.what() << std::endl
1026 * << "Aborting!" << std::endl
1027 * << "----------------------------------------------------"
1028 * << std::endl;
1029 *
1030 * return 1;
1031 * }
1032 * catch (...)
1033 * {
1034 * std::cerr << std::endl
1035 * << std::endl
1036 * << "----------------------------------------------------"
1037 * << std::endl;
1038 * std::cerr << "Unknown exception!" << std::endl
1039 * << "Aborting!" << std::endl
1040 * << "----------------------------------------------------"
1041 * << std::endl;
1042 * return 1;
1043 * }
1044 *
1045 * return 0;
1046 * }
1047 * @endcode
1048<a name="Results"></a><h1>Results</h1>
1049
1050
1051When you run the program, on a single processor or with your local MPI
1052installation on a few, you should get output like this:
1053@code
1054Cycle 0:
1055 Number of active cells: 1024
1056 Number of degrees of freedom: 4225
1057 Solved in 10 iterations.
1058
1059
1060+---------------------------------------------+------------+------------+
1061| Total wallclock time elapsed since start | 0.176s | |
1062| | | |
1063| Section | no. calls | wall time | % of total |
1064+---------------------------------+-----------+------------+------------+
1065| assembly | 1 | 0.0209s | 12% |
1066| output | 1 | 0.0189s | 11% |
1067| setup | 1 | 0.0299s | 17% |
1068| solve | 1 | 0.0419s | 24% |
1069+---------------------------------+-----------+------------+------------+
1070
1071
1072Cycle 1:
1073 Number of active cells: 1954
1074 Number of degrees of freedom: 8399
1075 Solved in 10 iterations.
1076
1077
1078+---------------------------------------------+------------+------------+
1079| Total wallclock time elapsed since start | 0.327s | |
1080| | | |
1081| Section | no. calls | wall time | % of total |
1082+---------------------------------+-----------+------------+------------+
1083| assembly | 1 | 0.0368s | 11% |
1084| output | 1 | 0.0208s | 6.4% |
1085| refine | 1 | 0.157s | 48% |
1086| setup | 1 | 0.0452s | 14% |
1087| solve | 1 | 0.0668s | 20% |
1088+---------------------------------+-----------+------------+------------+
1089
1090
1091Cycle 2:
1092 Number of active cells: 3664
1093 Number of degrees of freedom: 16183
1094 Solved in 11 iterations.
1095
1096...
1097@endcode
1098
1099The exact numbers differ, depending on how many processors we use;
1100this is due to the fact that the preconditioner depends on the
1101partitioning of the problem, the solution then differs in the last few
1102digits, and consequently the mesh refinement differs slightly.
1103The primary thing to notice here, though, is that the number of
1104iterations does not increase with the size of the problem. This
1105guarantees that we can efficiently solve even the largest problems.
1106
1107When run on a sufficiently large number of machines (say a few
1108thousand), this program can relatively easily solve problems with well
1109over one billion unknowns in less than a minute. On the other hand,
1110such big problems can no longer be visualized, so we also ran the
1111program on only 16 processors. Here are a mesh, along with its
1112partitioning onto the 16 processors, and the corresponding solution:
1113
1114<table width="100%">
1115<tr>
1116<td>
1117 <img src="https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt="">
1118</td>
1119<td>
1120 <img src="https://www.dealii.org/images/steps/developer/step-40.solution.png" alt="">
1121</td>
1122</tr>
1123</table>
1124
1125The mesh on the left has a mere 7,069 cells. This is of course a
1126problem we would easily have been able to solve already on a single
1127processor using @ref step_6 "step-6", but the point of the program was to show how
1128to write a program that scales to many more machines. For example,
1129here are two graphs that show how the run time of a large number of parts
1130of the program scales on problems with around 52 and 375 million degrees of
1131freedom if we take more and more processors (these and the next couple of
1132graphs are taken from an earlier version of the
1133@ref distributed_paper "Distributed Computing paper"; updated graphs showing
1134data of runs on even larger numbers of processors, and a lot
1135more interpretation can be found in the final version of the paper):
1136
1137<table width="100%">
1138<tr>
1139<td>
1140 <img src="https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt="">
1141</td>
1142<td>
1143 <img src="https://www.dealii.org/images/steps/developer/step-40.strong.png" alt="">
1144</td>
1145</tr>
1146</table>
1147
1148As can clearly be seen, the program scales nicely to very large
1149numbers of processors.
1150(For a discussion of what we consider "scalable" programs, see
1151@ref GlossParallelScaling "this glossary entry".)
1152The curves, in particular the linear solver, become a
1153bit wobbly at the right end of the graphs since each processor has too little
1154to do to offset the cost of communication (the part of the whole problem each
1155processor has to solve in the above two examples is only 13,000 and 90,000
1156degrees of freedom when 4,096 processors are used; a good rule of thumb is that
1157parallel programs work well if each processor has at least 100,000 unknowns).
1158
1159While the strong scaling graphs above show that we can solve a problem of
1160fixed size faster and faster if we take more and more processors, the more
1161interesting question may be how big problems can become so that they can still
1162be solved within a reasonable time on a machine of a particular size. We show
1163this in the following two graphs for 256 and 4096 processors:
1164
1165<table width="100%">
1166<tr>
1167<td>
1168 <img src="https://www.dealii.org/images/steps/developer/step-40.256.png" alt="">
1169</td>
1170<td>
1171 <img src="https://www.dealii.org/images/steps/developer/step-40.4096.png" alt="">
1172</td>
1173</tr>
1174</table>
1175
1176What these graphs show is that all parts of the program scale linearly with
1177the number of degrees of freedom. This time, lines are wobbly at the left as
1178the size of local problems is too small. For more discussions of these results
1179we refer to the @ref distributed_paper "Distributed Computing paper".
1180
1181So how large are the largest problems one can solve? At the time of writing
1182this problem, the
1183limiting factor is that the program uses the BoomerAMG algebraic
1184multigrid method from the <a
1185href="http://acts.nersc.gov/hypre/" target="_top">Hypre package</a> as
1186a preconditioner, which unfortunately uses signed 32-bit integers to
1187index the elements of a %distributed matrix. This limits the size of
1188problems to @f$2^{31}-1=2,147,483,647@f$ degrees of freedom. From the graphs
1189above it is obvious that the scalability would extend beyond this
1190number, and one could expect that given more than the 4,096 machines
1191shown above would also further reduce the compute time. That said, one
1192can certainly expect that this limit will eventually be lifted by the
1193hypre developers.
1194
1195On the other hand, this does not mean that deal.II cannot solve bigger
1196problems. Indeed, @ref step_37 "step-37" shows how one can solve problems that are not
1197just a little, but very substantially larger than anything we have shown
1198here.
1199
1200
1201
1202<a name="extensions"></a>
1203<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1204
1205
1206In a sense, this program is the ultimate solver for the Laplace
1207equation: it can essentially solve the equation to whatever accuracy
1208you want, if only you have enough processors available. Since the
1209Laplace equation by itself is not terribly interesting at this level
1210of accuracy, the more interesting possibilities for extension
1211therefore concern not so much this program but what comes beyond
1212it. For example, several of the other programs in this tutorial have
1213significant run times, especially in 3d. It would therefore be
1214interesting to use the techniques explained here to extend other
1215programs to support parallel distributed computations. We have done
1216this for @ref step_31 "step-31" in the @ref step_32 "step-32" tutorial program, but the same would
1217apply to, for example, @ref step_23 "step-23" and @ref step_25 "step-25" for hyperbolic time
1218dependent problems, @ref step_33 "step-33" for gas dynamics, or @ref step_35 "step-35" for the
1219Navier-Stokes equations.
1220
1221Maybe equally interesting is the problem of postprocessing. As
1222mentioned above, we only show pictures of the solution and the mesh
1223for 16 processors because 4,096 processors solving 1 billion unknowns
1224would produce graphical output on the order of several 10
1225gigabyte. Currently, no program is able to visualize this amount of
1226data in any reasonable way unless it also runs on at least several
1227hundred processors. There are, however, approaches where visualization
1228programs directly communicate with solvers on each processor with each
1229visualization process rendering the part of the scene computed by the
1230solver on this processor. Implementing such an interface would allow
1231to quickly visualize things that are otherwise not amenable to
1232graphical display.
1233 *
1234 *
1235<a name="PlainProg"></a>
1236<h1> The plain program</h1>
1237@include "step-40.cc"
1238*/
unsigned int level
Definition: grid_out.cc:4606
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2084
@ matrix
Contents is actually a matrix.
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:190
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
VectorType::value_type * end(VectorType &V)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:140
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:474
int(&) functions(const void *v1, const void *v2)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code ** MPI_Finalize()