Loading [MathJax]/extensions/TeX/newcommand.js
 Reference documentation for deal.II version 9.3.3
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
step-40.h
Go to the documentation of this file.
1
763 * #endif
764 * preconditioner.initialize(system_matrix, data);
765 *
766 * solver.solve(system_matrix,
767 * completely_distributed_solution,
768 * system_rhs,
769 * preconditioner);
770 *
771 * pcout << " Solved in " << solver_control.last_step() << " iterations."
772 * << std::endl;
773 *
774 * constraints.distribute(completely_distributed_solution);
775 *
776 * locally_relevant_solution = completely_distributed_solution;
777 * }
778 *
779 *
780 *
781 * @endcode
782 *
783 *
784 * <a name="LaplaceProblemrefine_grid"></a>
785 * <h4>LaplaceProblem::refine_grid</h4>
786 *
787
788 *
789 * The function that estimates the error and refines the grid is again
790 * almost exactly like the one in @ref step_6 "step-6". The only difference is that the
791 * function that flags cells to be refined is now in namespace
792 * parallel::distributed::GridRefinement -- a namespace that has functions
793 * that can communicate between all involved processors and determine global
794 * thresholds to use in deciding which cells to refine and which to coarsen.
795 *
796
797 *
798 * Note that we didn't have to do anything special about the
799 * KellyErrorEstimator class: we just give it a vector with as many elements
800 * as the local triangulation has cells (locally owned cells, ghost cells,
801 * and artificial ones), but it only fills those entries that correspond to
802 * cells that are locally owned.
803 *
804 * @code
805 * template <int dim>
806 * void LaplaceProblem<dim>::refine_grid()
807 * {
808 * TimerOutput::Scope t(computing_timer, "refine");
809 *
810 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
811 * KellyErrorEstimator<dim>::estimate(
812 * dof_handler,
813 * QGauss<dim - 1>(fe.degree + 1),
814 * std::map<types::boundary_id, const Function<dim> *>(),
815 * locally_relevant_solution,
816 * estimated_error_per_cell);
817 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
818 * triangulation, estimated_error_per_cell, 0.3, 0.03);
819 * triangulation.execute_coarsening_and_refinement();
820 * }
821 *
822 *
823 *
824 * @endcode
825 *
826 *
827 * <a name="LaplaceProblemoutput_results"></a>
828 * <h4>LaplaceProblem::output_results</h4>
829 *
830
831 *
832 * Compared to the corresponding function in @ref step_6 "step-6", the one here is a tad
833 * more complicated. There are two reasons: the first one is that we do not
834 * just want to output the solution but also for each cell which processor
835 * owns it (i.e. which "subdomain" it is in). Secondly, as discussed at
836 * length in @ref step_17 "step-17" and @ref step_18 "step-18", generating graphical data can be a
837 * bottleneck in parallelizing. In @ref step_18 "step-18", we have moved this step out of
838 * the actual computation but shifted it into a separate program that later
839 * combined the output from various processors into a single file. But this
840 * doesn't scale: if the number of processors is large, this may mean that
841 * the step of combining data on a single processor later becomes the
842 * longest running part of the program, or it may produce a file that's so
843 * large that it can't be visualized any more. We here follow a more
844 * sensible approach, namely creating individual files for each MPI process
845 * and leaving it to the visualization program to make sense of that.
846 *
847
848 *
849 * To start, the top of the function looks like it usually does. In addition
850 * to attaching the solution vector (the one that has entries for all locally
851 * relevant, not only the locally owned, elements), we attach a data vector
852 * that stores, for each cell, the subdomain the cell belongs to. This is
853 * slightly tricky, because of course not every processor knows about every
854 * cell. The vector we attach therefore has an entry for every cell that the
855 * current processor has in its mesh (locally owned ones, ghost cells, and
856 * artificial cells), but the DataOut class will ignore all entries that
857 * correspond to cells that are not owned by the current processor. As a
858 * consequence, it doesn't actually matter what values we write into these
859 * vector entries: we simply fill the entire vector with the number of the
860 * current MPI process (i.e. the subdomain_id of the current process); this
861 * correctly sets the values we care for, i.e. the entries that correspond
862 * to locally owned cells, while providing the wrong value for all other
863 * elements -- but these are then ignored anyway.
864 *
865 * @code
866 * template <int dim>
867 * void LaplaceProblem<dim>::output_results(const unsigned int cycle) const
868 * {
869 * DataOut<dim> data_out;
870 * data_out.attach_dof_handler(dof_handler);
871 * data_out.add_data_vector(locally_relevant_solution, "u");
872 *
873 * Vector<float> subdomain(triangulation.n_active_cells());
874 * for (unsigned int i = 0; i < subdomain.size(); ++i)
875 * subdomain(i) = triangulation.locally_owned_subdomain();
876 * data_out.add_data_vector(subdomain, "subdomain");
877 *
878 * data_out.build_patches();
879 *
880 * @endcode
881 *
882 * The next step is to write this data to disk. We write up to 8 VTU files
883 * in parallel with the help of MPI-IO. Additionally a PVTU record is
884 * generated, which groups the written VTU files.
885 *
886 * @code
887 * data_out.write_vtu_with_pvtu_record(
888 * "./", "solution", cycle, mpi_communicator, 2, 8);
889 * }
890 *
891 *
892 *
893 * @endcode
894 *
895 *
896 * <a name="LaplaceProblemrun"></a>
897 * <h4>LaplaceProblem::run</h4>
898 *
899
900 *
901 * The function that controls the overall behavior of the program is again
902 * like the one in @ref step_6 "step-6". The minor difference are the use of
903 * <code>pcout</code> instead of <code>std::cout</code> for output to the
904 * console (see also @ref step_17 "step-17") and that we only generate graphical output if
905 * at most 32 processors are involved. Without this limit, it would be just
906 * too easy for people carelessly running this program without reading it
907 * first to bring down the cluster interconnect and fill any file system
908 * available :-)
909 *
910
911 *
912 * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
913 * we start with a slightly finer mesh (5 global refinement cycles) -- there
914 * just isn't much of a point showing a massively %parallel program starting
915 * on 4 cells (although admittedly the point is only slightly stronger
916 * starting on 1024).
917 *
918 * @code
919 * template <int dim>
921 * {
922 * pcout << "Running with "
923 * #ifdef USE_PETSC_LA
924 * << "PETSc"
925 * #else
926 * << "Trilinos"
927 * #endif
928 * << " on " << Utilities::MPI::n_mpi_processes(mpi_communicator)
929 * << " MPI rank(s)..." << std::endl;
930 *
931 * const unsigned int n_cycles = 8;
932 * for (unsigned int cycle = 0; cycle < n_cycles; ++cycle)
933 * {
934 * pcout << "Cycle " << cycle << ':' << std::endl;
935 *
936 * if (cycle == 0)
937 * {
939 * triangulation.refine_global(5);
940 * }
941 * else
942 * refine_grid();
943 *
944 * setup_system();
945 *
946 * pcout << " Number of active cells: "
947 * << triangulation.n_global_active_cells() << std::endl
948 * << " Number of degrees of freedom: " << dof_handler.n_dofs()
949 * << std::endl;
950 *
951 * assemble_system();
952 * solve();
953 *
954 * if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 32)
955 * {
956 * TimerOutput::Scope t(computing_timer, "output");
957 * output_results(cycle);
958 * }
959 *
960 * computing_timer.print_summary();
961 * computing_timer.reset();
962 *
963 * pcout << std::endl;
964 * }
965 * }
966 * } // namespace Step40
967 *
968 *
969 *
970 * @endcode
971 *
972 *
973 * <a name="main"></a>
974 * <h4>main()</h4>
975 *
976
977 *
978 * The final function, <code>main()</code>, again has the same structure as in
979 * all other programs, in particular @ref step_6 "step-6". Like the other programs that use
980 * MPI, we have to initialize and finalize MPI, which is done using the helper
981 * object Utilities::MPI::MPI_InitFinalize. The constructor of that class also
982 * initializes libraries that depend on MPI, such as p4est, PETSc, SLEPc, and
983 * Zoltan (though the last two are not used in this tutorial). The order here
984 * is important: we cannot use any of these libraries until they are
985 * initialized, so it does not make sense to do anything before creating an
987 *
988
989 *
990 * After the solver finishes, the LaplaceProblem destructor will run followed
993 * <code>PetscFinalize</code> (and finalization functions for other
994 * libraries), which will delete any in-use PETSc objects. This must be done
995 * after we destruct the Laplace solver to avoid double deletion
996 * errors. Fortunately, due to the order of destructor call rules of C++, we
997 * do not need to worry about any of this: everything happens in the correct
998 * order (i.e., the reverse of the order of construction). The last function
999 * called by Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize() is
1000 * <code>MPI_Finalize</code>: i.e., once this object is destructed the program
1001 * should exit since MPI will no longer be available.
1002 *
1003 * @code
1004 * int main(int argc, char *argv[])
1005 * {
1006 * try
1007 * {
1008 * using namespace dealii;
1009 * using namespace Step40;
1010 *
1011 * Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
1012 *
1013 * LaplaceProblem<2> laplace_problem_2d;
1014 * laplace_problem_2d.run();
1015 * }
1016 * catch (std::exception &exc)
1017 * {
1018 * std::cerr << std::endl
1019 * << std::endl
1020 * << "----------------------------------------------------"
1021 * << std::endl;
1022 * std::cerr << "Exception on processing: " << std::endl
1023 * << exc.what() << std::endl
1024 * << "Aborting!" << std::endl
1025 * << "----------------------------------------------------"
1026 * << std::endl;
1027 *
1028 * return 1;
1029 * }
1030 * catch (...)
1031 * {
1032 * std::cerr << std::endl
1033 * << std::endl
1034 * << "----------------------------------------------------"
1035 * << std::endl;
1036 * std::cerr << "Unknown exception!" << std::endl
1037 * << "Aborting!" << std::endl
1038 * << "----------------------------------------------------"
1039 * << std::endl;
1040 * return 1;
1041 * }
1042 *
1043 * return 0;
1044 * }
1045 * @endcode
1046<a name="Results"></a><h1>Results</h1>
1047
1048
1049When you run the program, on a single processor or with your local MPI
1050installation on a few, you should get output like this:
1051@code
1052Cycle 0:
1053 Number of active cells: 1024
1054 Number of degrees of freedom: 4225
1055 Solved in 10 iterations.
1056
1057
1058+---------------------------------------------+------------+------------+
1059| Total wallclock time elapsed since start | 0.176s | |
1060| | | |
1061| Section | no. calls | wall time | % of total |
1062+---------------------------------+-----------+------------+------------+
1063| assembly | 1 | 0.0209s | 12% |
1064| output | 1 | 0.0189s | 11% |
1065| setup | 1 | 0.0299s | 17% |
1066| solve | 1 | 0.0419s | 24% |
1067+---------------------------------+-----------+------------+------------+
1068
1069
1070Cycle 1:
1071 Number of active cells: 1954
1072 Number of degrees of freedom: 8399
1073 Solved in 10 iterations.
1074
1075
1076+---------------------------------------------+------------+------------+
1077| Total wallclock time elapsed since start | 0.327s | |
1078| | | |
1079| Section | no. calls | wall time | % of total |
1080+---------------------------------+-----------+------------+------------+
1081| assembly | 1 | 0.0368s | 11% |
1082| output | 1 | 0.0208s | 6.4% |
1083| refine | 1 | 0.157s | 48% |
1084| setup | 1 | 0.0452s | 14% |
1085| solve | 1 | 0.0668s | 20% |
1086+---------------------------------+-----------+------------+------------+
1087
1088
1089Cycle 2:
1090 Number of active cells: 3664
1091 Number of degrees of freedom: 16183
1092 Solved in 11 iterations.
1093
1094...
1095@endcode
1096
1097The exact numbers differ, depending on how many processors we use;
1098this is due to the fact that the preconditioner depends on the
1099partitioning of the problem, the solution then differs in the last few
1100digits, and consequently the mesh refinement differs slightly.
1101The primary thing to notice here, though, is that the number of
1102iterations does not increase with the size of the problem. This
1103guarantees that we can efficiently solve even the largest problems.
1104
1105When run on a sufficiently large number of machines (say a few
1106thousand), this program can relatively easily solve problems with well
1107over one billion unknowns in less than a minute. On the other hand,
1108such big problems can no longer be visualized, so we also ran the
1109program on only 16 processors. Here are a mesh, along with its
1110partitioning onto the 16 processors, and the corresponding solution:
1111
1112<table width="100%">
1113<tr>
1114<td>
1115 <img src="https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt="">
1116</td>
1117<td>
1118 <img src="https://www.dealii.org/images/steps/developer/step-40.solution.png" alt="">
1119</td>
1120</tr>
1121</table>
1122
1123The mesh on the left has a mere 7,069 cells. This is of course a
1124problem we would easily have been able to solve already on a single
1125processor using @ref step_6 "step-6", but the point of the program was to show how
1126to write a program that scales to many more machines. For example,
1127here are two graphs that show how the run time of a large number of parts
1128of the program scales on problems with around 52 and 375 million degrees of
1129freedom if we take more and more processors (these and the next couple of
1130graphs are taken from an earlier version of the
1131@ref distributed_paper "Distributed Computing paper"; updated graphs showing
1132data of runs on even larger numbers of processors, and a lot
1133more interpretation can be found in the final version of the paper):
1134
1135<table width="100%">
1136<tr>
1137<td>
1138 <img src="https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt="">
1139</td>
1140<td>
1141 <img src="https://www.dealii.org/images/steps/developer/step-40.strong.png" alt="">
1142</td>
1143</tr>
1144</table>
1145
1146As can clearly be seen, the program scales nicely to very large
1147numbers of processors.
1148(For a discussion of what we consider "scalable" programs, see
1149@ref GlossParallelScaling "this glossary entry".)
1150The curves, in particular the linear solver, become a
1151bit wobbly at the right end of the graphs since each processor has too little
1152to do to offset the cost of communication (the part of the whole problem each
1153processor has to solve in the above two examples is only 13,000 and 90,000
1154degrees of freedom when 4,096 processors are used; a good rule of thumb is that
1155parallel programs work well if each processor has at least 100,000 unknowns).
1156
1157While the strong scaling graphs above show that we can solve a problem of
1158fixed size faster and faster if we take more and more processors, the more
1159interesting question may be how big problems can become so that they can still
1160be solved within a reasonable time on a machine of a particular size. We show
1161this in the following two graphs for 256 and 4096 processors:
1162
1163<table width="100%">
1164<tr>
1165<td>
1166 <img src="https://www.dealii.org/images/steps/developer/step-40.256.png" alt="">
1167</td>
1168<td>
1169 <img src="https://www.dealii.org/images/steps/developer/step-40.4096.png" alt="">
1170</td>
1171</tr>
1172</table>
1173
1174What these graphs show is that all parts of the program scale linearly with
1175the number of degrees of freedom. This time, lines are wobbly at the left as
1176the size of local problems is too small. For more discussions of these results
1177we refer to the @ref distributed_paper "Distributed Computing paper".
1178
1179So how large are the largest problems one can solve? At the time of writing
1180this problem, the
1181limiting factor is that the program uses the BoomerAMG algebraic
1182multigrid method from the <a
1183href="http://acts.nersc.gov/hypre/" target="_top">Hypre package</a> as
1184a preconditioner, which unfortunately uses signed 32-bit integers to
1185index the elements of a %distributed matrix. This limits the size of
1186problems to @f$2^{31}-1=2,147,483,647@f$ degrees of freedom. From the graphs
1187above it is obvious that the scalability would extend beyond this
1188number, and one could expect that given more than the 4,096 machines
1189shown above would also further reduce the compute time. That said, one
1190can certainly expect that this limit will eventually be lifted by the
1191hypre developers.
1192
1193On the other hand, this does not mean that deal.II cannot solve bigger
1194problems. Indeed, @ref step_37 "step-37" shows how one can solve problems that are not
1195just a little, but very substantially larger than anything we have shown
1196here.
1197
1198
1199
1200<a name="extensions"></a>
1201<a name="Possibilitiesforextensions"></a><h3>Possibilities for extensions</h3>
1202
1203
1204In a sense, this program is the ultimate solver for the Laplace
1205equation: it can essentially solve the equation to whatever accuracy
1206you want, if only you have enough processors available. Since the
1207Laplace equation by itself is not terribly interesting at this level
1208of accuracy, the more interesting possibilities for extension
1209therefore concern not so much this program but what comes beyond
1210it. For example, several of the other programs in this tutorial have
1211significant run times, especially in 3d. It would therefore be
1212interesting to use the techniques explained here to extend other
1213programs to support parallel distributed computations. We have done
1214this for @ref step_31 "step-31" in the @ref step_32 "step-32" tutorial program, but the same would
1215apply to, for example, @ref step_23 "step-23" and @ref step_25 "step-25" for hyperbolic time
1216dependent problems, @ref step_33 "step-33" for gas dynamics, or @ref step_35 "step-35" for the
1217Navier-Stokes equations.
1218
1219Maybe equally interesting is the problem of postprocessing. As
1220mentioned above, we only show pictures of the solution and the mesh
1221for 16 processors because 4,096 processors solving 1 billion unknowns
1222would produce graphical output on the order of several 10
1223gigabyte. Currently, no program is able to visualize this amount of
1224data in any reasonable way unless it also runs on at least several
1225hundred processors. There are, however, approaches where visualization
1226programs directly communicate with solvers on each processor with each
1227visualization process rendering the part of the scene computed by the
1228solver on this processor. Implementing such an interface would allow
1229to quickly visualize things that are otherwise not amenable to
1230graphical display.
1231 *
1232 *
1233<a name="PlainProg"></a>
1234<h1> The plain program</h1>
1235@include "step-40.cc"
1236*/
unsigned int level
Definition: grid_out.cc:4590
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
void scale(const double scaling_factor, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2042
@ matrix
Contents is actually a matrix.
static const types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
Definition: utilities.cc:188
SymmetricTensor< 2, dim, Number > C(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void call(const std::function< RT()> &function, internal::return_value< RT > &ret_val)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
void run(const Iterator &begin, const typename identity< Iterator >::type &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
Definition: work_stream.h:472
int(&) functions(const void *v1, const void *v2)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code ** MPI_Finalize()