17#include <deal.II/base/mpi.templates.h>
36#ifdef DEAL_II_WITH_MPI
41 template <
int dim,
int spacedim>
47 const
bool allow_artificial_cells,
53 , allow_artificial_cells(allow_artificial_cells)
55 const auto partition_settings =
56 (partition_zoltan | partition_metis | partition_zorder |
57 partition_custom_signal) &
59 (void)partition_settings;
60 Assert(partition_settings == partition_auto ||
61 partition_settings == partition_metis ||
62 partition_settings == partition_zoltan ||
63 partition_settings == partition_zorder ||
64 partition_settings == partition_custom_signal,
65 ExcMessage(
"Settings must contain exactly one type of the active "
66 "cell partitioning scheme."));
68 if (
settings & construct_multigrid_hierarchy)
69 Assert(allow_artificial_cells,
70 ExcMessage(
"construct_multigrid_hierarchy requires "
71 "allow_artificial_cells to be set to true."));
76 template <
int dim,
int spacedim>
81 return (
settings & construct_multigrid_hierarchy);
86 template <
int dim,
int spacedim>
93 const unsigned int max_active_cells =
96 max_active_cells == this->n_active_cells(),
98 "A parallel::shared::Triangulation needs to be refined in the same "
99 "way on all processors, but the participating processors don't "
100 "agree on the number of active cells."));
103 auto partition_settings = (partition_zoltan | partition_metis |
104 partition_zorder | partition_custom_signal) &
106 if (partition_settings == partition_auto)
107# ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
108 partition_settings = partition_zoltan;
109# elif defined DEAL_II_WITH_METIS
110 partition_settings = partition_metis;
112 partition_settings = partition_zorder;
115 if (partition_settings == partition_zoltan)
117# ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
120 "Choosing 'partition_zoltan' requires the library "
121 "to be compiled with support for Zoltan! "
122 "Instead, you might use 'partition_auto' to select "
123 "a partitioning algorithm that is supported "
124 "by your current configuration."));
130 else if (partition_settings == partition_metis)
132# ifndef DEAL_II_WITH_METIS
135 "Choosing 'partition_metis' requires the library "
136 "to be compiled with support for METIS! "
137 "Instead, you might use 'partition_auto' to select "
138 "a partitioning algorithm that is supported "
139 "by your current configuration."));
146 else if (partition_settings == partition_zorder)
150 else if (partition_settings == partition_custom_signal)
161 if ((
settings & construct_multigrid_hierarchy) &&
162 !(
settings & partition_custom_signal))
165 true_subdomain_ids_of_cells.resize(this->n_active_cells());
169 spacedim>::active_cell_iterator
170 cell = this->begin_active(),
173 if (allow_artificial_cells)
184 spacedim>::active_cell_iterator>
185 active_halo_layer_vector =
188 std::set<typename parallel::shared::Triangulation<dim, spacedim>::
189 active_cell_iterator>
190 active_halo_layer(active_halo_layer_vector.begin(),
191 active_halo_layer_vector.end());
193 for (
unsigned int index = 0; cell != endc; cell++, index++)
196 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
198 if (cell->is_locally_owned() ==
false &&
199 active_halo_layer.find(cell) == active_halo_layer.end())
204 if (
settings & construct_multigrid_hierarchy)
206 true_level_subdomain_ids_of_cells.resize(this->n_levels());
212 for (
unsigned int lvl = 0; lvl < this->n_levels(); ++lvl)
214 true_level_subdomain_ids_of_cells[lvl].resize(
219 spacedim>::cell_iterator>
220 level_halo_layer_vector =
222 *
this, predicate, lvl);
223 std::set<
typename parallel::shared::
224 Triangulation<dim, spacedim>::cell_iterator>
225 level_halo_layer(level_halo_layer_vector.begin(),
226 level_halo_layer_vector.end());
229 cell_iterator cell = this->begin(lvl),
230 endc = this->end(lvl);
231 for (
unsigned int index = 0; cell != endc; cell++, index++)
235 true_level_subdomain_ids_of_cells[lvl][index] =
236 cell->level_subdomain_id();
245 if (cell->is_active() &&
246 cell->subdomain_id() !=
251 if (cell->has_children())
253 bool keep_cell =
false;
254 for (
unsigned int c = 0;
255 c < GeometryInfo<dim>::max_children_per_cell;
257 if (cell->child(c)->level_subdomain_id() ==
269 if (!cell->is_locally_owned_on_level() &&
270 level_halo_layer.find(cell) != level_halo_layer.
end())
274 cell->set_level_subdomain_id(
283 for (
unsigned int index = 0; cell != endc; cell++, index++)
284 true_subdomain_ids_of_cells[index] = cell->subdomain_id();
290 const unsigned int n_my_cells = std::count_if(
291 this->begin_active(),
294 [](
const auto &i) {
return (i.is_locally_owned()); });
296 const unsigned int total_cells =
298 Assert(total_cells == this->n_active_cells(),
299 ExcMessage(
"Not all cells are assigned to a processor."));
304 if (
settings & construct_multigrid_hierarchy)
306 const unsigned int n_my_cells =
307 std::count_if(this->begin(), this->end(), [](
const auto &i) {
308 return (i.is_locally_owned_on_level());
312 const unsigned int total_cells =
314 Assert(total_cells == this->n_cells(),
315 ExcMessage(
"Not all cells are assigned to a processor."));
322 template <
int dim,
int spacedim>
326 return allow_artificial_cells;
331 template <
int dim,
int spacedim>
333 const std::vector<types::subdomain_id>
336 return true_subdomain_ids_of_cells;
341 template <
int dim,
int spacedim>
343 const std::vector<types::subdomain_id>
345 const unsigned int level)
const
347 Assert(
level < true_level_subdomain_ids_of_cells.size(),
349 Assert(true_level_subdomain_ids_of_cells[
level].size() ==
350 this->n_cells(
level),
352 return true_level_subdomain_ids_of_cells[
level];
357 template <
int dim,
int spacedim>
373 using int_type = std::underlying_type_t<
375 static_assert(
sizeof(int_type) ==
sizeof(std::uint8_t),
376 "Internal type mismatch.");
378 std::vector<int_type> refinement_configurations(this->n_active_cells() *
381 for (
const auto &cell : this->active_cell_iterators())
382 if (cell->is_locally_owned())
384 refinement_configurations[cell->active_cell_index() * 2 + 0] =
385 static_cast<int_type
>(cell->refine_flag_set());
386 refinement_configurations[cell->active_cell_index() * 2 + 1] =
387 static_cast<int_type
>(cell->coarsen_flag_set() ? 1 : 0);
391 this->get_communicator(),
392 refinement_configurations);
394 for (
const auto &cell : this->active_cell_iterators())
396 cell->clear_refine_flag();
397 cell->clear_coarsen_flag();
400 (refinement_configurations[cell->active_cell_index() * 2 + 0] >
404 refinement_configurations[cell->active_cell_index() * 2 +
408 "Refinement/coarsening flags of cells are not consistent in parallel!"));
410 if (refinement_configurations[cell->active_cell_index() * 2 + 0] !=
413 refinement_configurations[cell->active_cell_index() * 2 + 0]));
415 if (refinement_configurations[cell->active_cell_index() * 2 + 1] >
417 cell->set_coarsen_flag();
423 this->update_number_cache();
428 template <
int dim,
int spacedim>
441 const typename ::Triangulation<dim, spacedim>::DistortedCellList
449 this->update_number_cache();
454 template <
int dim,
int spacedim>
460 (void)construction_data;
467 template <
int dim,
int spacedim>
474 const ::parallel::DistributedTriangulationBase<dim, spacedim>
475 *
>(&other_tria) ==
nullptr),
477 "Cannot use this function on parallel::distributed::Triangulation."));
482 this->update_number_cache();
493 template <
int dim,
int spacedim>
503 template <
int dim,
int spacedim>
513 template <
int dim,
int spacedim>
515 const std::vector<unsigned int>
519 return true_subdomain_ids_of_cells;
524 template <
int dim,
int spacedim>
526 const std::vector<unsigned int>
528 const unsigned int)
const
531 return true_level_subdomain_ids_of_cells;
547 template <
int dim,
int spacedim>
559 const std::vector<types::subdomain_id> &true_subdomain_ids =
563 for (
const auto &cell :
shared_tria->active_cell_iterators())
565 const unsigned int index = cell->active_cell_index();
567 cell->set_subdomain_id(true_subdomain_ids[
index]);
574 template <
int dim,
int spacedim>
578 if (shared_tria && shared_tria->with_artificial_cells())
581 for (
const auto &cell : shared_tria->active_cell_iterators())
583 const unsigned int index = cell->active_cell_index();
584 cell->set_subdomain_id(saved_subdomain_ids[
index]);
594#include "shared_tria.inst"
cell_iterator end() const
std::vector< unsigned int > saved_subdomain_ids
const SmartPointer< const ::parallel::shared::Triangulation< dim, spacedim > > shared_tria
~TemporarilyRestoreSubdomainIds()
TemporarilyRestoreSubdomainIds(const Triangulation< dim, spacedim > &tria)
types::subdomain_id my_subdomain
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria) override
virtual void execute_coarsening_and_refinement() override
typename ::Triangulation< dim, spacedim >::active_cell_iterator active_cell_iterator
const std::vector< types::subdomain_id > & get_true_subdomain_ids_of_cells() const
typename ::Triangulation< dim, spacedim >::cell_iterator cell_iterator
bool with_artificial_cells() const
const std::vector< types::subdomain_id > & get_true_level_subdomain_ids_of_cells(const unsigned int level) const
virtual bool is_multilevel_hierarchy_constructed() const override
virtual void create_triangulation(const std::vector< Point< spacedim > > &vertices, const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata) override
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_CXX20_REQUIRES(condition)
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
T sum(const T &t, const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
const types::subdomain_id artificial_subdomain_id
const TriangulationDescription::Settings settings
const ::Triangulation< dim, spacedim > & tria