deal.II version GIT relicensing-2165-gc91f007519 2024-11-20 01:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
shared_tria.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2015 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#include <deal.II/base/mpi.h>
16#include <deal.II/base/mpi.templates.h>
18
21
24#include <deal.II/grid/tria.h>
27
29
30#include <type_traits>
31
32
34
35#ifdef DEAL_II_WITH_MPI
36namespace parallel
37{
38 namespace shared
39 {
40 template <int dim, int spacedim>
43 const MPI_Comm mpi_communicator,
44 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
45 smooth_grid,
46 const bool allow_artificial_cells,
47 const Settings settings)
48 : ::parallel::TriangulationBase<dim, spacedim>(mpi_communicator,
49 smooth_grid,
50 false)
51 , settings(settings)
52 , allow_artificial_cells(allow_artificial_cells)
53 {
54 const auto partition_settings =
55 (partition_zoltan | partition_metis | partition_zorder |
56 partition_custom_signal) &
57 settings;
58 (void)partition_settings;
59 Assert(partition_settings == partition_auto ||
60 partition_settings == partition_metis ||
61 partition_settings == partition_zoltan ||
62 partition_settings == partition_zorder ||
63 partition_settings == partition_custom_signal,
64 ExcMessage("Settings must contain exactly one type of the active "
65 "cell partitioning scheme."));
66
67 if (settings & construct_multigrid_hierarchy)
68 Assert(allow_artificial_cells,
69 ExcMessage("construct_multigrid_hierarchy requires "
70 "allow_artificial_cells to be set to true."));
71 }
72
73
74
75 template <int dim, int spacedim>
77 bool Triangulation<dim, spacedim>::is_multilevel_hierarchy_constructed()
78 const
79 {
80 return (settings & construct_multigrid_hierarchy);
81 }
82
83
84
85 template <int dim, int spacedim>
87 void Triangulation<dim, spacedim>::partition()
88 {
89# ifdef DEBUG
90 // Check that all meshes are the same (or at least have the same
91 // total number of active cells):
92 const unsigned int max_active_cells =
93 Utilities::MPI::max(this->n_active_cells(),
94 this->get_mpi_communicator());
95 Assert(
96 max_active_cells == this->n_active_cells(),
98 "A parallel::shared::Triangulation needs to be refined in the same "
99 "way on all processors, but the participating processors don't "
100 "agree on the number of active cells."));
101# endif
102
103 auto partition_settings = (partition_zoltan | partition_metis |
104 partition_zorder | partition_custom_signal) &
105 settings;
106 if (partition_settings == partition_auto)
107# ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
108 partition_settings = partition_zoltan;
109# elif defined DEAL_II_WITH_METIS
110 partition_settings = partition_metis;
111# else
112 partition_settings = partition_zorder;
113# endif
114
115 if (partition_settings == partition_zoltan)
116 {
117# ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
118 AssertThrow(false,
120 "Choosing 'partition_zoltan' requires the library "
121 "to be compiled with support for Zoltan! "
122 "Instead, you might use 'partition_auto' to select "
123 "a partitioning algorithm that is supported "
124 "by your current configuration."));
125# else
127 this->n_subdomains, *this, SparsityTools::Partitioner::zoltan);
128# endif
129 }
130 else if (partition_settings == partition_metis)
131 {
132# ifndef DEAL_II_WITH_METIS
133 AssertThrow(false,
135 "Choosing 'partition_metis' requires the library "
136 "to be compiled with support for METIS! "
137 "Instead, you might use 'partition_auto' to select "
138 "a partitioning algorithm that is supported "
139 "by your current configuration."));
140# else
141 GridTools::partition_triangulation(this->n_subdomains,
142 *this,
144# endif
145 }
146 else if (partition_settings == partition_zorder)
147 {
148 GridTools::partition_triangulation_zorder(this->n_subdomains, *this);
149 }
150 else if (partition_settings == partition_custom_signal)
151 {
152 // User partitions mesh manually
153 }
154 else
155 {
157 }
158
159 // do not partition multigrid levels if user is
160 // defining a custom partition
161 if ((settings & construct_multigrid_hierarchy) &&
162 !(settings & partition_custom_signal))
164
165 true_subdomain_ids_of_cells.resize(this->n_active_cells());
166
167 // loop over all cells and mark artificial:
169 spacedim>::active_cell_iterator
170 cell = this->begin_active(),
171 endc = this->end();
172
173 if (allow_artificial_cells)
174 {
175 // get active halo layer of (ghost) cells
176 // parallel::shared::Triangulation<dim>::
177 std::function<bool(
180 predicate = IteratorFilters::SubdomainEqualTo(this->my_subdomain);
181
182 const std::vector<typename parallel::shared::Triangulation<
183 dim,
184 spacedim>::active_cell_iterator>
185 active_halo_layer_vector =
187 predicate);
188 std::set<typename parallel::shared::Triangulation<dim, spacedim>::
189 active_cell_iterator>
190 active_halo_layer(active_halo_layer_vector.begin(),
191 active_halo_layer_vector.end());
192
193 for (unsigned int index = 0; cell != endc; cell++, index++)
194 {
195 // store original/true subdomain ids:
196 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
197
198 if (cell->is_locally_owned() == false &&
199 active_halo_layer.find(cell) == active_halo_layer.end())
200 cell->set_subdomain_id(numbers::artificial_subdomain_id);
201 }
202
203 // loop over all cells in multigrid hierarchy and mark artificial:
204 if (settings & construct_multigrid_hierarchy)
205 {
206 true_level_subdomain_ids_of_cells.resize(this->n_levels());
207
208 std::function<bool(
210 cell_iterator &)>
212 for (unsigned int lvl = 0; lvl < this->n_levels(); ++lvl)
213 {
214 true_level_subdomain_ids_of_cells[lvl].resize(
215 this->n_cells(lvl));
216
217 const std::vector<typename parallel::shared::Triangulation<
218 dim,
219 spacedim>::cell_iterator>
220 level_halo_layer_vector =
222 *this, predicate, lvl);
223 std::set<typename parallel::shared::
224 Triangulation<dim, spacedim>::cell_iterator>
225 level_halo_layer(level_halo_layer_vector.begin(),
226 level_halo_layer_vector.end());
227
229 cell_iterator cell = this->begin(lvl),
230 endc = this->end(lvl);
231 for (unsigned int index = 0; cell != endc; cell++, index++)
232 {
233 // Store true level subdomain IDs before setting
234 // artificial
235 true_level_subdomain_ids_of_cells[lvl][index] =
236 cell->level_subdomain_id();
237
238 // for active cells, we must have knowledge of level
239 // subdomain ids of all neighbors to our subdomain, not
240 // just neighbors on the same level. if the cells
241 // subdomain id was not set to artitficial above, we will
242 // also keep its level subdomain id since it is either
243 // owned by this processor or in the ghost layer of the
244 // active mesh.
245 if (cell->is_active() &&
246 cell->subdomain_id() !=
248 continue;
249
250 // we must have knowledge of our parent in the hierarchy
251 if (cell->has_children())
252 {
253 bool keep_cell = false;
254 for (unsigned int c = 0;
255 c < GeometryInfo<dim>::max_children_per_cell;
256 ++c)
257 if (cell->child(c)->level_subdomain_id() ==
258 this->my_subdomain)
259 {
260 keep_cell = true;
261 break;
262 }
263 if (keep_cell)
264 continue;
265 }
266
267 // we must have knowledge of our neighbors on the same
268 // level
269 if (!cell->is_locally_owned_on_level() &&
270 level_halo_layer.find(cell) != level_halo_layer.end())
271 continue;
272
273 // mark all other cells to artificial
274 cell->set_level_subdomain_id(
276 }
277 }
278 }
279 }
280 else
281 {
282 // just store true subdomain ids
283 for (unsigned int index = 0; cell != endc; cell++, index++)
284 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
285 }
286
287# ifdef DEBUG
288 {
289 // Assert that each cell is owned by a processor
290 const unsigned int n_my_cells = std::count_if(
291 this->begin_active(),
293 this->end()),
294 [](const auto &i) { return (i.is_locally_owned()); });
295
296 const unsigned int total_cells =
297 Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
298 Assert(total_cells == this->n_active_cells(),
299 ExcMessage("Not all cells are assigned to a processor."));
300 }
301
302 // If running with multigrid, assert that each level
303 // cell is owned by a processor
304 if (settings & construct_multigrid_hierarchy)
305 {
306 const unsigned int n_my_cells =
307 std::count_if(this->begin(), this->end(), [](const auto &i) {
308 return (i.is_locally_owned_on_level());
309 });
310
311
312 const unsigned int total_cells =
313 Utilities::MPI::sum(n_my_cells, this->get_mpi_communicator());
314 Assert(total_cells == this->n_cells(),
315 ExcMessage("Not all cells are assigned to a processor."));
316 }
317# endif
318 }
319
320
321
322 template <int dim, int spacedim>
324 bool Triangulation<dim, spacedim>::with_artificial_cells() const
325 {
326 return allow_artificial_cells;
327 }
328
329
330
331 template <int dim, int spacedim>
333 const std::vector<types::subdomain_id>
335 {
336 return true_subdomain_ids_of_cells;
337 }
338
339
340
341 template <int dim, int spacedim>
343 const std::vector<types::subdomain_id>
345 const unsigned int level) const
346 {
347 Assert(level < true_level_subdomain_ids_of_cells.size(),
349 Assert(true_level_subdomain_ids_of_cells[level].size() ==
350 this->n_cells(level),
352 return true_level_subdomain_ids_of_cells[level];
353 }
354
355
356
357 template <int dim, int spacedim>
359 void Triangulation<dim, spacedim>::execute_coarsening_and_refinement()
360 {
361 // make sure that all refinement/coarsening flags are the same on all
362 // processes
363 {
364 // Obtain the type used to store the different possibilities
365 // a cell can be refined. This is a bit awkward because
366 // what `cell->refine_flag_set()` returns is a struct
367 // type, RefinementCase, which internally stores a
368 // std::uint8_t, which actually holds integers of
369 // enum type RefinementPossibilities<dim>::Possibilities.
370 // In the following, use the actual name of the enum, but
371 // make sure that it is in fact a `std::uint8_t` or
372 // equally sized type.
373 using int_type = std::underlying_type_t<
375 static_assert(sizeof(int_type) == sizeof(std::uint8_t),
376 "Internal type mismatch.");
377
378 std::vector<int_type> refinement_configurations(this->n_active_cells() *
379 2,
380 int_type(0));
381 for (const auto &cell : this->active_cell_iterators())
382 if (cell->is_locally_owned())
383 {
384 refinement_configurations[cell->active_cell_index() * 2 + 0] =
385 static_cast<int_type>(cell->refine_flag_set());
386 refinement_configurations[cell->active_cell_index() * 2 + 1] =
387 static_cast<int_type>(cell->coarsen_flag_set() ? 1 : 0);
388 }
389
390 Utilities::MPI::max(refinement_configurations,
391 this->get_mpi_communicator(),
392 refinement_configurations);
393
394 for (const auto &cell : this->active_cell_iterators())
395 {
396 cell->clear_refine_flag();
397 cell->clear_coarsen_flag();
398
399 Assert(
400 (refinement_configurations[cell->active_cell_index() * 2 + 0] >
401 0 ?
402 1 :
403 0) +
404 refinement_configurations[cell->active_cell_index() * 2 +
405 1] <=
406 1,
408 "Refinement/coarsening flags of cells are not consistent in parallel!"));
409
410 if (refinement_configurations[cell->active_cell_index() * 2 + 0] !=
411 0)
412 cell->set_refine_flag(RefinementCase<dim>(
413 refinement_configurations[cell->active_cell_index() * 2 + 0]));
414
415 if (refinement_configurations[cell->active_cell_index() * 2 + 1] >
416 0)
417 cell->set_coarsen_flag();
418 }
419 }
420
422 partition();
423 this->update_number_cache();
424 }
425
426
427
428 template <int dim, int spacedim>
430 void Triangulation<dim, spacedim>::create_triangulation(
431 const std::vector<Point<spacedim>> &vertices,
432 const std::vector<CellData<dim>> &cells,
433 const SubCellData &subcelldata)
434 {
435 try
436 {
438 vertices, cells, subcelldata);
439 }
440 catch (
441 const typename ::Triangulation<dim, spacedim>::DistortedCellList
442 &)
443 {
444 // the underlying triangulation should not be checking for distorted
445 // cells
447 }
448 partition();
449 this->update_number_cache();
450 }
451
452
453
454 template <int dim, int spacedim>
456 void Triangulation<dim, spacedim>::create_triangulation(
457 const TriangulationDescription::Description<dim, spacedim>
458 &construction_data)
459 {
460 (void)construction_data;
461
463 }
464
465
466
467 template <int dim, int spacedim>
469 void Triangulation<dim, spacedim>::copy_triangulation(
470 const ::Triangulation<dim, spacedim> &other_tria)
471 {
472 Assert(
473 (dynamic_cast<
474 const ::parallel::DistributedTriangulationBase<dim, spacedim>
475 *>(&other_tria) == nullptr),
477 "Cannot use this function on parallel::distributed::Triangulation."));
478
480 other_tria);
481 partition();
482 this->update_number_cache();
483 }
484 } // namespace shared
485} // namespace parallel
486
487#else
488
489namespace parallel
490{
491 namespace shared
492 {
493 template <int dim, int spacedim>
496 {
498 return true;
499 }
500
501
502
503 template <int dim, int spacedim>
506 const
507 {
508 return false;
509 }
510
511
512
513 template <int dim, int spacedim>
515 const std::vector<unsigned int>
517 {
519 return true_subdomain_ids_of_cells;
520 }
521
522
523
524 template <int dim, int spacedim>
526 const std::vector<unsigned int>
528 const unsigned int) const
529 {
531 return true_level_subdomain_ids_of_cells;
532 }
533 } // namespace shared
534} // namespace parallel
535
536
537#endif
538
539
540
541namespace internal
542{
543 namespace parallel
544 {
545 namespace shared
546 {
547 template <int dim, int spacedim>
550 : shared_tria(
551 dynamic_cast<
552 const ::parallel::shared::Triangulation<dim, spacedim> *>(
553 &tria))
554 {
555 if (shared_tria && shared_tria->with_artificial_cells())
556 {
557 // Save the current set of subdomain IDs, and set subdomain IDs
558 // to the "true" owner of each cell.
559 const std::vector<types::subdomain_id> &true_subdomain_ids =
560 shared_tria->get_true_subdomain_ids_of_cells();
561
562 saved_subdomain_ids.resize(shared_tria->n_active_cells());
563 for (const auto &cell : shared_tria->active_cell_iterators())
564 {
565 const unsigned int index = cell->active_cell_index();
566 saved_subdomain_ids[index] = cell->subdomain_id();
567 cell->set_subdomain_id(true_subdomain_ids[index]);
568 }
569 }
570 }
571
572
573
574 template <int dim, int spacedim>
577 {
578 if (shared_tria && shared_tria->with_artificial_cells())
579 {
580 // Undo the subdomain modification.
581 for (const auto &cell : shared_tria->active_cell_iterators())
582 {
583 const unsigned int index = cell->active_cell_index();
584 cell->set_subdomain_id(saved_subdomain_ids[index]);
585 }
586 }
587 }
588 } // namespace shared
589 } // namespace parallel
590} // namespace internal
591
592
593/*-------------- Explicit Instantiations -------------------------------*/
594#include "shared_tria.inst"
595
Definition point.h:111
cell_iterator end() const
const ObserverPointer< const ::parallel::shared::Triangulation< dim, spacedim > > shared_tria
TemporarilyRestoreSubdomainIds(const Triangulation< dim, spacedim > &tria)
types::subdomain_id my_subdomain
Definition tria_base.h:326
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
Definition tria_base.cc:67
virtual void execute_coarsening_and_refinement() override
typename ::Triangulation< dim, spacedim >::active_cell_iterator active_cell_iterator
const std::vector< types::subdomain_id > & get_true_subdomain_ids_of_cells() const
typename ::Triangulation< dim, spacedim >::cell_iterator cell_iterator
const std::vector< types::subdomain_id > & get_true_level_subdomain_ids_of_cells(const unsigned int level) const
virtual bool is_multilevel_hierarchy_constructed() const override
virtual void create_triangulation(const std::vector< Point< spacedim > > &vertices, const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata) override
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_CXX20_REQUIRES(condition)
Definition config.h:175
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
unsigned int level
Definition grid_out.cc:4626
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_ASSERT_UNREACHABLE()
#define DEAL_II_NOT_IMPLEMENTED()
std::size_t size
Definition mpi.cc:734
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
std::vector< typename MeshType::active_cell_iterator > compute_active_cell_halo_layer(const MeshType &mesh, const std::function< bool(const typename MeshType::active_cell_iterator &)> &predicate)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
std::vector< typename MeshType::cell_iterator > compute_cell_halo_layer_on_level(const MeshType &mesh, const std::function< bool(const typename MeshType::cell_iterator &)> &predicate, const unsigned int level)
void partition_triangulation(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const SparsityTools::Partitioner partitioner=SparsityTools::Partitioner::metis)
T sum(const T &t, const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
const types::subdomain_id artificial_subdomain_id
Definition types.h:362
STL namespace.