861 *
LA::MPI::PreconditionAMG preconditioner;
862 *
preconditioner.initialize(system_matrix,
data);
864 *
solver.solve(system_matrix,
869 *
pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
882 * <a name=
"step_40-LaplaceProblemrefine_grid"></a>
883 * <
h4>LaplaceProblem::refine_grid</
h4>
897 * KellyErrorEstimator class: we just give it a vector with as many elements
898 * as the local triangulation has cells (locally owned cells, ghost cells,
899 * and artificial ones), but it only fills those entries that correspond to
900 * cells that are locally owned.
904 * void LaplaceProblem<dim>::refine_grid()
906 * TimerOutput::Scope t(computing_timer, "refine");
908 * Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
909 * KellyErrorEstimator<dim>::estimate(
911 * QGauss<dim - 1>(fe.degree + 1),
912 * std::map<types::boundary_id, const Function<dim> *>(),
913 * locally_relevant_solution,
914 * estimated_error_per_cell);
915 * parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number(
916 * triangulation, estimated_error_per_cell, 0.3, 0.03);
917 * triangulation.execute_coarsening_and_refinement();
925 * <a name="step_40-LaplaceProblemoutput_results"></a>
926 * <h4>LaplaceProblem::output_results</h4>
930 * Compared to the corresponding function in @ref step_6 "step-6", the one here is
931 * a tad more complicated. There are two reasons: the first one is
932 * that we do not just want to output the solution but also for each
933 * cell which processor owns it (i.e. which "subdomain" it is
934 * in). Secondly, as discussed at length in @ref step_17 "step-17" and @ref step_18 "step-18",
935 * generating graphical data can be a bottleneck in
936 * parallelizing. In those two programs, we simply generate one
937 * output file per process. That worked because the
938 * parallel::shared::Triangulation cannot be used with large numbers
939 * of MPI processes anyway. But this doesn't
scale:
Creating a
963 * vector entries: we simply fill the entire vector with the number of the
964 * current MPI process (i.e. the subdomain_id of the current process); this
965 * correctly sets the values we care for, i.e. the entries that correspond
966 * to locally owned cells, while providing the wrong value for all other
967 * elements -- but these are then ignored anyway.
971 * void LaplaceProblem<dim>::output_results(const unsigned int cycle)
973 * TimerOutput::Scope t(computing_timer, "output");
975 * DataOut<dim> data_out;
976 * data_out.attach_dof_handler(dof_handler);
977 * data_out.add_data_vector(locally_relevant_solution, "u");
979 * Vector<float> subdomain(triangulation.n_active_cells());
980 * for (unsigned int i = 0; i < subdomain.size(); ++i)
981 * subdomain(i) = triangulation.locally_owned_subdomain();
982 * data_out.add_data_vector(subdomain, "subdomain");
984 * data_out.build_patches();
988 * The final step is to write this data to disk. We write up to 8 VTU files
989 * in parallel with the help of MPI-IO. Additionally a PVTU record is
990 * generated, which groups the written VTU files.
993 * data_out.write_vtu_with_pvtu_record(
994 * "./", "solution", cycle, mpi_communicator, 2, 8);
1002 * <a name="step_40-LaplaceProblemrun"></a>
1003 * <h4>LaplaceProblem::run</h4>
1007 * The function that controls the overall behavior of the program is again
1008 * like the one in @ref step_6 "step-6". The minor difference are the use of
1009 * <code>pcout</code> instead of <code>std::cout</code> for output to the
1010 * console (see also @ref step_17 "step-17").
1014 * A functional difference to @ref step_6 "step-6" is the use of a square domain and that
1015 * we start with a slightly finer mesh (5 global refinement cycles) -- there
1021 *
template <
int dim>
1024 *
pcout <<
"Running with "
1031 *
<<
" MPI rank(s)..." << std::endl;
1033 *
const unsigned int n_cycles = 8;
1034 *
for (
unsigned int cycle = 0; cycle < n_cycles; ++cycle)
1036 *
pcout <<
"Cycle " << cycle <<
':' << std::endl;
1054 *
pcout << std::endl;
1064 * <a name=
"step_40-main"></a>
1099 *
using namespace dealii;
1100 *
using namespace Step40;
1107 *
catch (std::exception &exc)
1109 *
std::cerr << std::endl
1111 *
<<
"----------------------------------------------------"
1113 *
std::cerr <<
"Exception on processing: " << std::endl
1114 *
<< exc.what() << std::endl
1115 *
<<
"Aborting!" << std::endl
1116 *
<<
"----------------------------------------------------"
1123 *
std::cerr << std::endl
1125 *
<<
"----------------------------------------------------"
1127 *
std::cerr <<
"Unknown exception!" << std::endl
1128 *
<<
"Aborting!" << std::endl
1129 *
<<
"----------------------------------------------------"
1150+---------------------------------------------+------------+------------+
1154+---------------------------------+-----------+------------+------------+
1156| output | 1 | 0.0495s | 9% |
1157| setup | 1 | 0.102s | 19% |
1158| solve | 1 | 0.0283s | 5.2% |
1159+---------------------------------+-----------+------------+------------+
1168+---------------------------------------------+------------+------------+
1172+---------------------------------+-----------+------------+------------+
1174| output | 1 | 0.0899s | 7.6% |
1175|
refine | 1 | 0.429s | 36% |
1176| setup | 1 | 0.177s | 15% |
1177| solve | 1 | 0.0204s | 1.7% |
1178+---------------------------------+-----------+------------+------------+
1207 <
img src=
"https://www.dealii.org/images/steps/developer/step-40.mesh.png" alt=
"">
1210 <
img src=
"https://www.dealii.org/images/steps/developer/step-40.solution.png" alt=
"">
1230 <
img src=
"https://www.dealii.org/images/steps/developer/step-40.strong2.png" alt=
"">
1233 <
img src=
"https://www.dealii.org/images/steps/developer/step-40.strong.png" alt=
"">
1258 <
img src=
"https://www.dealii.org/images/steps/developer/step-40.256.png" alt=
"">
1261 <
img src=
"https://www.dealii.org/images/steps/developer/step-40.4096.png" alt=
"">
1292<a name=
"step-40-extensions"></a>
1293<a name=
"step_40-Possibilitiesforextensions"></a><
h3>Possibilities
for extensions</
h3>
1325<a name=
"step_40-PlainProg"></a>
~MPI_InitFinalize()=default
std::vector< index_type > data
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void refine(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold, const unsigned int max_to_mark=numbers::invalid_unsigned_int)
void coarsen(Triangulation< dim, spacedim > &tria, const Vector< Number > &criteria, const double threshold)
@ matrix
Contents is actually a matrix.
constexpr types::blas_int one
Point< spacedim > point(const gp_Pnt &p, const double tolerance=1e-10)
SymmetricTensor< 2, dim, Number > d(const Tensor< 2, dim, Number > &F, const Tensor< 2, dim, Number > &dF_dt)
void apply(const Kokkos::TeamPolicy< MemorySpace::Default::kokkos_space::execution_space >::member_type &team_member, const Kokkos::View< Number *, MemorySpace::Default::kokkos_space > shape_data, const ViewTypeIn in, ViewTypeOut out)
VectorType::value_type * end(VectorType &V)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
void run(const Iterator &begin, const std_cxx20::type_identity_t< Iterator > &end, Worker worker, Copier copier, const ScratchData &sample_scratch_data, const CopyData &sample_copy_data, const unsigned int queue_length, const unsigned int chunk_size)
int(&) functions(const void *v1, const void *v2)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
****code * * MPI_Finalize()