18#include <deal.II/base/mpi.templates.h>
45 template <
int dim,
int spacedim>
51 const
bool check_for_distorted_cells)
53 check_for_distorted_cells)
54 , mpi_communicator(mpi_communicator)
55 , my_subdomain(
Utilities::
MPI::this_mpi_process(this->mpi_communicator))
56 , n_subdomains(
Utilities::
MPI::n_mpi_processes(this->mpi_communicator))
58#ifndef DEAL_II_WITH_MPI
65 template <
int dim,
int spacedim>
70#ifndef DEAL_II_WITH_MPI
76 if (const ::parallel::TriangulationBase<dim, spacedim> *other_tria_x =
77 dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
90 template <
int dim,
int spacedim>
99 number_cache.n_global_active_cells) +
106 template <
int dim,
int spacedim>
118 template <
int dim,
int spacedim>
121 : n_locally_owned_active_cells(0)
122 , n_global_active_cells(0)
123 , number_of_global_coarse_cells(0)
129 template <
int dim,
int spacedim>
139 template <
int dim,
int spacedim>
148 template <
int dim,
int spacedim>
153 return number_cache.n_global_active_cells;
158 template <
int dim,
int spacedim>
162 return mpi_communicator;
167#ifdef DEAL_II_WITH_MPI
168 template <
int dim,
int spacedim>
173 number_cache.level_ghost_owners.clear();
174 number_cache.n_locally_owned_active_cells = 0;
176 if (this->n_levels() == 0)
183 number_cache.n_global_active_cells = 0;
184 number_cache.n_global_levels = 0;
191 for (
const auto &cell : this->active_cell_iterators())
192 if (cell->is_ghost())
193 number_cache.ghost_owners.insert(cell->subdomain_id());
195 Assert(number_cache.ghost_owners.size() <
200 if (this->n_levels() > 0)
201 number_cache.n_locally_owned_active_cells = std::count_if(
202 this->begin_active(),
205 [](
const auto &i) {
return i.is_locally_owned(); });
207 number_cache.n_locally_owned_active_cells = 0;
211 number_cache.n_global_active_cells =
213 number_cache.n_locally_owned_active_cells),
214 this->mpi_communicator);
216 number_cache.n_global_levels =
221 if (this->is_multilevel_hierarchy_constructed() ==
true)
223 number_cache.level_ghost_owners.clear();
226 if (this->n_levels() == 0)
230 for (
const auto &cell : this->cell_iterators())
231 if (cell->level_subdomain_id() !=
numbers::artificial_subdomain_id &&
232 cell->level_subdomain_id() != this->locally_owned_subdomain())
233 this->number_cache.level_ghost_owners.insert(
234 cell->level_subdomain_id());
240 int ierr = MPI_Barrier(this->mpi_communicator);
247 std::vector<MPI_Request> requests(
248 this->number_cache.level_ghost_owners.size());
249 unsigned int dummy = 0;
250 unsigned int req_counter = 0;
252 for (
const auto &it : this->number_cache.level_ghost_owners)
254 ierr = MPI_Isend(&dummy,
259 this->mpi_communicator,
260 &requests[req_counter]);
265 for (
const auto &it : this->number_cache.level_ghost_owners)
268 ierr = MPI_Recv(&dummy,
273 this->mpi_communicator,
278 if (requests.size() > 0)
280 ierr = MPI_Waitall(requests.size(),
282 MPI_STATUSES_IGNORE);
286 ierr = MPI_Barrier(this->mpi_communicator);
291 Assert(this->number_cache.level_ghost_owners.size() <
296 this->number_cache.number_of_global_coarse_cells = this->
n_cells(0);
299 this->reset_global_cell_indices();
304 template <
int dim,
int spacedim>
313 template <
int dim,
int spacedim>
322 std::vector<unsigned int> reference_cells_ui;
324 reference_cells_ui.reserve(this->reference_cells.size());
325 for (
const auto &i : this->reference_cells)
326 reference_cells_ui.push_back(
static_cast<unsigned int>(i));
331 this->mpi_communicator);
334 this->reference_cells.clear();
335 for (
const auto &i : reference_cells_ui)
336 this->reference_cells.emplace_back(
342 template <
int dim,
int spacedim>
352 template <
int dim,
int spacedim>
354 const std::set<types::subdomain_id>
357 return number_cache.ghost_owners;
362 template <
int dim,
int spacedim>
364 const std::set<types::subdomain_id>
367 return number_cache.level_ghost_owners;
372 template <
int dim,
int spacedim>
375 get_boundary_ids()
const
379 this->mpi_communicator);
384 template <
int dim,
int spacedim>
387 get_manifold_ids()
const
391 this->mpi_communicator);
396 template <
int dim,
int spacedim>
400#ifndef DEAL_II_WITH_MPI
406 if (pst->with_artificial_cells() ==
false)
412 std::vector<unsigned int> cell_counter(n_subdomains + 1);
415 for (
const auto &cell : this->active_cell_iterators())
416 cell_counter[cell->subdomain_id() + 1]++;
419 for (
unsigned int i = 0; i < n_subdomains; ++i)
420 cell_counter[i + 1] += cell_counter[i];
425 IndexSet is_local(this->n_active_cells());
426 is_local.
add_range(cell_counter[my_subdomain],
427 cell_counter[my_subdomain + 1]);
428 number_cache.active_cell_index_partitioner =
429 std::make_shared<const Utilities::MPI::Partitioner>(
432 this->mpi_communicator);
435 for (
const auto &cell : this->active_cell_iterators())
436 cell->set_global_active_cell_index(
437 cell_counter[cell->subdomain_id()]++);
439 Assert(this->is_multilevel_hierarchy_constructed() ==
false,
447 this->n_locally_owned_active_cells();
452 const int ierr = MPI_Exscan(
453 &n_locally_owned_cells,
458 this->mpi_communicator);
463 std::pair<types::global_cell_index, types::global_cell_index> my_range;
466 for (
const auto &cell : this->active_cell_iterators())
467 if (cell->is_locally_owned())
468 cell->set_global_active_cell_index(
cell_index++);
475 std::vector<types::global_dof_index> is_ghost_vector;
478 [](
const auto &cell) {
return cell->global_active_cell_index(); },
479 [&is_ghost_vector](
const auto &cell,
const auto &id) {
480 cell->set_global_active_cell_index(
id);
481 is_ghost_vector.push_back(
id);
485 IndexSet is_local(this->n_global_active_cells());
486 is_local.add_range(my_range.first, my_range.second);
488 std::sort(is_ghost_vector.begin(), is_ghost_vector.end());
490 is_ghost.add_indices(is_ghost_vector.begin(), is_ghost_vector.end());
492 number_cache.active_cell_index_partitioner =
493 std::make_shared<const Utilities::MPI::Partitioner>(
494 is_local, is_ghost, this->mpi_communicator);
497 if (this->is_multilevel_hierarchy_constructed() ==
true)
500 std::vector<types::global_cell_index> n_cells_level(
501 this->n_global_levels(), 0);
503 for (
auto cell : this->cell_iterators())
504 if (cell->level_subdomain_id() == this->locally_owned_subdomain())
505 n_cells_level[cell->
level()]++;
508 std::vector<types::global_cell_index>
cell_index(
509 this->n_global_levels(), 0);
511 int ierr = MPI_Exscan(
512 n_cells_level.data(),
514 this->n_global_levels(),
517 this->mpi_communicator);
522 this->mpi_communicator,
528 std::pair<types::global_cell_index, types::global_cell_index>>
529 my_ranges(this->n_global_levels());
530 for (
unsigned int l = 0; l < this->n_global_levels(); ++l)
533 for (
auto cell : this->cell_iterators())
534 if (cell->level_subdomain_id() == this->locally_owned_subdomain())
539 for (
unsigned int l = 0; l < this->n_global_levels(); ++l)
543 std::vector<std::vector<types::global_dof_index>> is_ghost_vectors(
544 this->n_global_levels());
549 [](
const auto &cell) {
return cell->global_level_cell_index(); },
550 [&is_ghost_vectors](
const auto &cell,
const auto &id) {
551 cell->set_global_level_cell_index(
id);
552 is_ghost_vectors[cell->level()].push_back(
id);
555 number_cache.level_cell_index_partitioners.resize(
556 this->n_global_levels());
559 for (
unsigned int l = 0; l < this->n_global_levels(); ++l)
561 IndexSet is_local(n_cells_level[l]);
562 is_local.add_range(my_ranges[l].
first, my_ranges[l].
second);
564 IndexSet is_ghost(n_cells_level[l]);
565 std::sort(is_ghost_vectors[l].begin(), is_ghost_vectors[l].end());
566 is_ghost.add_indices(is_ghost_vectors[l].begin(),
567 is_ghost_vectors[l].end());
569 number_cache.level_cell_index_partitioners[l] =
570 std::make_shared<const Utilities::MPI::Partitioner>(
571 is_local, is_ghost, this->mpi_communicator);
580 template <
int dim,
int spacedim>
583 const
std::vector<
bool> &vertex_locally_moved)
588 const std::vector<bool> locally_owned_vertices =
590 for (
unsigned int i = 0; i < locally_owned_vertices.size(); ++i)
591 Assert((vertex_locally_moved[i] ==
false) ||
592 (locally_owned_vertices[i] ==
true),
593 ExcMessage(
"The vertex_locally_moved argument must not "
594 "contain vertices that are not locally owned"));
599 for (
unsigned int d = 0; d < spacedim; ++d)
600 invalid_point[d] = std::numeric_limits<double>::quiet_NaN();
602 const auto pack = [&](
const auto &cell) {
603 std::vector<Point<spacedim>>
vertices(cell->n_vertices());
605 for (
const auto v : cell->vertex_indices())
606 if (vertex_locally_moved[cell->vertex_index(v)])
614 const auto unpack = [&](
const auto &cell,
const auto &
vertices) {
615 for (
const auto v : cell->vertex_indices())
620 if (this->is_multilevel_hierarchy_constructed())
635 template <
int dim,
int spacedim>
639 spacedim>::global_active_cell_index_partitioner()
const
641 return number_cache.active_cell_index_partitioner;
646 template <
int dim,
int spacedim>
650 global_level_cell_index_partitioner(
const unsigned int level)
const
655 return number_cache.level_cell_index_partitioners[
level];
660 template <
int dim,
int spacedim>
665 return number_cache.number_of_global_coarse_cells;
670 template <
int dim,
int spacedim>
676 const
bool check_for_distorted_cells)
680 check_for_distorted_cells)
685 template <
int dim,
int spacedim>
696 template <
int dim,
int spacedim>
700 if (this->n_global_levels() <= 1)
709 const bool have_coarser_cell =
710 std::any_of(this->begin_active(this->n_global_levels() - 2),
711 this->end_active(this->n_global_levels() - 2),
718 this->mpi_communicator) != 0;
727#include "tria_base.inst"
bool is_locally_owned() const
void add_range(const size_type begin, const size_type end)
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
virtual std::size_t memory_consumption() const
virtual void update_reference_cells()
const std::set< types::subdomain_id > & level_ghost_owners() const
virtual types::global_cell_index n_global_active_cells() const override
const std::set< types::subdomain_id > & ghost_owners() const
types::subdomain_id locally_owned_subdomain() const override
virtual unsigned int n_global_levels() const override
unsigned int n_locally_owned_active_cells() const
virtual types::coarse_cell_id n_global_coarse_cells() const override
virtual void update_number_cache()
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_CXX20_REQUIRES(condition)
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
IndexSet complete_index_set(const IndexSet::size_type N)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
const MPI_Datatype mpi_type_id_for_type
void release_all_unused_memory()
unsigned int n_cells(const internal::TriangulationImplementation::NumberCache< 1 > &c)
constexpr ReferenceCell make_reference_cell_from_int(const std::uint8_t kind)
const types::global_dof_index invalid_dof_index
bool is_nan(const double x)
unsigned int global_cell_index