Reference documentation for deal.II version 9.4.1
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
shared_tria.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2015 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#include <deal.II/base/mpi.h>
18
21
24#include <deal.II/grid/tria.h>
27
29
30
32
33#ifdef DEAL_II_WITH_MPI
34namespace parallel
35{
36 namespace shared
37 {
38 template <int dim, int spacedim>
40 const MPI_Comm &mpi_communicator,
41 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
42 smooth_grid,
43 const bool allow_artificial_cells,
44 const Settings settings)
45 : ::parallel::TriangulationBase<dim, spacedim>(mpi_communicator,
46 smooth_grid,
47 false)
49 , allow_artificial_cells(allow_artificial_cells)
50 {
51 const auto partition_settings =
55 (void)partition_settings;
56 Assert(partition_settings == partition_auto ||
57 partition_settings == partition_metis ||
58 partition_settings == partition_zoltan ||
59 partition_settings == partition_zorder ||
60 partition_settings == partition_custom_signal,
61 ExcMessage("Settings must contain exactly one type of the active "
62 "cell partitioning scheme."));
63
66 ExcMessage("construct_multigrid_hierarchy requires "
67 "allow_artificial_cells to be set to true."));
68 }
69
70
71
72 template <int dim, int spacedim>
73 bool
75 {
76 return (settings & construct_multigrid_hierarchy);
77 }
78
79
80
81 template <int dim, int spacedim>
82 void
84 {
85# ifdef DEBUG
86 // Check that all meshes are the same (or at least have the same
87 // total number of active cells):
88 const unsigned int max_active_cells =
89 Utilities::MPI::max(this->n_active_cells(), this->get_communicator());
90 Assert(
91 max_active_cells == this->n_active_cells(),
93 "A parallel::shared::Triangulation needs to be refined in the same "
94 "way on all processors, but the participating processors don't "
95 "agree on the number of active cells."));
96# endif
97
98 auto partition_settings = (partition_zoltan | partition_metis |
99 partition_zorder | partition_custom_signal) &
100 settings;
101 if (partition_settings == partition_auto)
102# ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
103 partition_settings = partition_zoltan;
104# elif defined DEAL_II_WITH_METIS
105 partition_settings = partition_metis;
106# else
107 partition_settings = partition_zorder;
108# endif
109
110 if (partition_settings == partition_zoltan)
111 {
112# ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
113 AssertThrow(false,
115 "Choosing 'partition_zoltan' requires the library "
116 "to be compiled with support for Zoltan! "
117 "Instead, you might use 'partition_auto' to select "
118 "a partitioning algorithm that is supported "
119 "by your current configuration."));
120# else
122 this->n_subdomains, *this, SparsityTools::Partitioner::zoltan);
123# endif
124 }
125 else if (partition_settings == partition_metis)
126 {
127# ifndef DEAL_II_WITH_METIS
128 AssertThrow(false,
130 "Choosing 'partition_metis' requires the library "
131 "to be compiled with support for METIS! "
132 "Instead, you might use 'partition_auto' to select "
133 "a partitioning algorithm that is supported "
134 "by your current configuration."));
135# else
136 GridTools::partition_triangulation(this->n_subdomains,
137 *this,
139# endif
140 }
141 else if (partition_settings == partition_zorder)
142 {
143 GridTools::partition_triangulation_zorder(this->n_subdomains, *this);
144 }
145 else if (partition_settings == partition_custom_signal)
146 {
147 // User partitions mesh manually
148 }
149 else
150 {
152 }
153
154 // do not partition multigrid levels if user is
155 // defining a custom partition
156 if ((settings & construct_multigrid_hierarchy) &&
157 !(settings & partition_custom_signal))
159
160 true_subdomain_ids_of_cells.resize(this->n_active_cells());
161
162 // loop over all cells and mark artificial:
164 spacedim>::active_cell_iterator
165 cell = this->begin_active(),
166 endc = this->end();
167
168 if (allow_artificial_cells)
169 {
170 // get active halo layer of (ghost) cells
171 // parallel::shared::Triangulation<dim>::
172 std::function<bool(
175 predicate = IteratorFilters::SubdomainEqualTo(this->my_subdomain);
176
177 const std::vector<typename parallel::shared::Triangulation<
178 dim,
179 spacedim>::active_cell_iterator>
180 active_halo_layer_vector =
182 predicate);
183 std::set<typename parallel::shared::Triangulation<dim, spacedim>::
184 active_cell_iterator>
185 active_halo_layer(active_halo_layer_vector.begin(),
186 active_halo_layer_vector.end());
187
188 for (unsigned int index = 0; cell != endc; cell++, index++)
189 {
190 // store original/true subdomain ids:
191 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
192
193 if (cell->is_locally_owned() == false &&
194 active_halo_layer.find(cell) == active_halo_layer.end())
195 cell->set_subdomain_id(numbers::artificial_subdomain_id);
196 }
197
198 // loop over all cells in multigrid hierarchy and mark artificial:
199 if (settings & construct_multigrid_hierarchy)
200 {
201 true_level_subdomain_ids_of_cells.resize(this->n_levels());
202
203 std::function<bool(
205 cell_iterator &)>
207 for (unsigned int lvl = 0; lvl < this->n_levels(); ++lvl)
208 {
209 true_level_subdomain_ids_of_cells[lvl].resize(
210 this->n_cells(lvl));
211
212 const std::vector<typename parallel::shared::Triangulation<
213 dim,
214 spacedim>::cell_iterator>
215 level_halo_layer_vector =
217 *this, predicate, lvl);
218 std::set<typename parallel::shared::
219 Triangulation<dim, spacedim>::cell_iterator>
220 level_halo_layer(level_halo_layer_vector.begin(),
221 level_halo_layer_vector.end());
222
224 cell_iterator cell = this->begin(lvl),
225 endc = this->end(lvl);
226 for (unsigned int index = 0; cell != endc; cell++, index++)
227 {
228 // Store true level subdomain IDs before setting
229 // artificial
230 true_level_subdomain_ids_of_cells[lvl][index] =
231 cell->level_subdomain_id();
232
233 // for active cells, we must have knowledge of level
234 // subdomain ids of all neighbors to our subdomain, not
235 // just neighbors on the same level. if the cells
236 // subdomain id was not set to artitficial above, we will
237 // also keep its level subdomain id since it is either
238 // owned by this processor or in the ghost layer of the
239 // active mesh.
240 if (cell->is_active() &&
241 cell->subdomain_id() !=
243 continue;
244
245 // we must have knowledge of our parent in the hierarchy
246 if (cell->has_children())
247 {
248 bool keep_cell = false;
249 for (unsigned int c = 0;
250 c < GeometryInfo<dim>::max_children_per_cell;
251 ++c)
252 if (cell->child(c)->level_subdomain_id() ==
253 this->my_subdomain)
254 {
255 keep_cell = true;
256 break;
257 }
258 if (keep_cell)
259 continue;
260 }
261
262 // we must have knowledge of our neighbors on the same
263 // level
264 if (!cell->is_locally_owned_on_level() &&
265 level_halo_layer.find(cell) != level_halo_layer.end())
266 continue;
267
268 // mark all other cells to artificial
269 cell->set_level_subdomain_id(
271 }
272 }
273 }
274 }
275 else
276 {
277 // just store true subdomain ids
278 for (unsigned int index = 0; cell != endc; cell++, index++)
279 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
280 }
281
282# ifdef DEBUG
283 {
284 // Assert that each cell is owned by a processor
285 const unsigned int n_my_cells = std::count_if(
286 this->begin_active(),
288 this->end()),
289 [](const auto &i) { return (i.is_locally_owned()); });
290
291 const unsigned int total_cells =
292 Utilities::MPI::sum(n_my_cells, this->get_communicator());
293 Assert(total_cells == this->n_active_cells(),
294 ExcMessage("Not all cells are assigned to a processor."));
295 }
296
297 // If running with multigrid, assert that each level
298 // cell is owned by a processor
299 if (settings & construct_multigrid_hierarchy)
300 {
301 const unsigned int n_my_cells =
302 std::count_if(this->begin(), this->end(), [](const auto &i) {
303 return (i.is_locally_owned_on_level());
304 });
305
306
307 const unsigned int total_cells =
308 Utilities::MPI::sum(n_my_cells, this->get_communicator());
309 Assert(total_cells == this->n_cells(),
310 ExcMessage("Not all cells are assigned to a processor."));
311 }
312# endif
313 }
314
315
316
317 template <int dim, int spacedim>
318 bool
320 {
321 return allow_artificial_cells;
322 }
323
324
325
326 template <int dim, int spacedim>
327 const std::vector<types::subdomain_id> &
329 {
330 return true_subdomain_ids_of_cells;
331 }
332
333
334
335 template <int dim, int spacedim>
336 const std::vector<types::subdomain_id> &
338 const unsigned int level) const
339 {
340 Assert(level < true_level_subdomain_ids_of_cells.size(),
342 Assert(true_level_subdomain_ids_of_cells[level].size() ==
343 this->n_cells(level),
345 return true_level_subdomain_ids_of_cells[level];
346 }
347
348
349
350 template <int dim, int spacedim>
351 void
353 {
355 partition();
356 this->update_number_cache();
357 }
358
359
360
361 template <int dim, int spacedim>
362 void
364 const std::vector<Point<spacedim>> &vertices,
365 const std::vector<CellData<dim>> & cells,
366 const SubCellData & subcelldata)
367 {
368 try
369 {
371 vertices, cells, subcelldata);
372 }
373 catch (
374 const typename ::Triangulation<dim, spacedim>::DistortedCellList
375 &)
376 {
377 // the underlying triangulation should not be checking for distorted
378 // cells
379 Assert(false, ExcInternalError());
380 }
381 partition();
382 this->update_number_cache();
383 }
384
385
386
387 template <int dim, int spacedim>
388 void
391 &construction_data)
392 {
393 (void)construction_data;
394
395 Assert(false, ExcInternalError());
396 }
397
398
399
400 template <int dim, int spacedim>
401 void
403 const ::Triangulation<dim, spacedim> &other_tria)
404 {
405 Assert(
406 (dynamic_cast<
407 const ::parallel::DistributedTriangulationBase<dim, spacedim>
408 *>(&other_tria) == nullptr),
410 "Cannot use this function on parallel::distributed::Triangulation."));
411
413 other_tria);
414 partition();
415 this->update_number_cache();
416 }
417 } // namespace shared
418} // namespace parallel
419
420#else
421
422namespace parallel
423{
424 namespace shared
425 {
426 template <int dim, int spacedim>
427 bool
429 {
430 Assert(false, ExcNotImplemented());
431 return true;
432 }
433
434 template <int dim, int spacedim>
435 bool
437 {
438 return false;
439 }
440
441 template <int dim, int spacedim>
442 const std::vector<unsigned int> &
444 {
445 Assert(false, ExcNotImplemented());
446 return true_subdomain_ids_of_cells;
447 }
448
449
450
451 template <int dim, int spacedim>
452 const std::vector<unsigned int> &
454 const unsigned int) const
455 {
456 Assert(false, ExcNotImplemented());
457 return true_level_subdomain_ids_of_cells;
458 }
459 } // namespace shared
460} // namespace parallel
461
462
463#endif
464
465
466
467namespace internal
468{
469 namespace parallel
470 {
471 namespace shared
472 {
473 template <int dim, int spacedim>
476 : shared_tria(
477 dynamic_cast<
478 const ::parallel::shared::Triangulation<dim, spacedim> *>(
479 &tria))
480 {
481 if (shared_tria && shared_tria->with_artificial_cells())
482 {
483 // Save the current set of subdomain IDs, and set subdomain IDs
484 // to the "true" owner of each cell.
485 const std::vector<types::subdomain_id> &true_subdomain_ids =
486 shared_tria->get_true_subdomain_ids_of_cells();
487
488 saved_subdomain_ids.resize(shared_tria->n_active_cells());
489 for (const auto &cell : shared_tria->active_cell_iterators())
490 {
491 const unsigned int index = cell->active_cell_index();
492 saved_subdomain_ids[index] = cell->subdomain_id();
493 cell->set_subdomain_id(true_subdomain_ids[index]);
494 }
495 }
496 }
497
498
499
500 template <int dim, int spacedim>
503 {
504 if (shared_tria && shared_tria->with_artificial_cells())
505 {
506 // Undo the subdomain modification.
507 for (const auto &cell : shared_tria->active_cell_iterators())
508 {
509 const unsigned int index = cell->active_cell_index();
510 cell->set_subdomain_id(saved_subdomain_ids[index]);
511 }
512 }
513 }
514 } // namespace shared
515 } // namespace parallel
516} // namespace internal
517
518
519/*-------------- Explicit Instantiations -------------------------------*/
520#include "shared_tria.inst"
521
Definition: point.h:111
cell_iterator end() const
const SmartPointer< const ::parallel::shared::Triangulation< dim, spacedim > > shared_tria
Definition: shared_tria.h:536
TemporarilyRestoreSubdomainIds(const Triangulation< dim, spacedim > &tria)
Definition: shared_tria.cc:475
types::subdomain_id my_subdomain
Definition: tria_base.h:308
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
Definition: tria_base.cc:68
virtual void execute_coarsening_and_refinement() override
Definition: shared_tria.cc:352
typename ::Triangulation< dim, spacedim >::active_cell_iterator active_cell_iterator
Definition: shared_tria.h:110
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &other_tria) override
Definition: shared_tria.cc:402
const std::vector< types::subdomain_id > & get_true_subdomain_ids_of_cells() const
Definition: shared_tria.cc:328
Triangulation(const MPI_Comm &mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing=(::Triangulation< dim, spacedim >::none), const bool allow_artificial_cells=false, const Settings settings=partition_auto)
Definition: shared_tria.cc:39
typename ::Triangulation< dim, spacedim >::cell_iterator cell_iterator
Definition: shared_tria.h:112
const std::vector< types::subdomain_id > & get_true_level_subdomain_ids_of_cells(const unsigned int level) const
Definition: shared_tria.cc:337
virtual bool is_multilevel_hierarchy_constructed() const override
Definition: shared_tria.cc:74
virtual void create_triangulation(const std::vector< Point< spacedim > > &vertices, const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata) override
Definition: shared_tria.cc:363
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:442
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:443
Point< 3 > vertices[4]
unsigned int level
Definition: grid_out.cc:4606
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
Definition: exceptions.h:1473
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const bool group_siblings=true)
Definition: grid_tools.cc:4072
std::vector< typename MeshType::active_cell_iterator > compute_active_cell_halo_layer(const MeshType &mesh, const std::function< bool(const typename MeshType::active_cell_iterator &)> &predicate)
void partition_multigrid_levels(Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:4178
std::vector< typename MeshType::cell_iterator > compute_cell_halo_layer_on_level(const MeshType &mesh, const std::function< bool(const typename MeshType::cell_iterator &)> &predicate, const unsigned int level)
void partition_triangulation(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const SparsityTools::Partitioner partitioner=SparsityTools::Partitioner::metis)
Definition: grid_tools.cc:3831
T sum(const T &t, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
const types::subdomain_id artificial_subdomain_id
Definition: types.h:298
const TriangulationDescription::Settings settings
const ::Triangulation< dim, spacedim > & tria