17 #include <deal.II/base/logstream.h> 18 #include <deal.II/base/memory_consumption.h> 19 #include <deal.II/base/utilities.h> 21 #include <deal.II/distributed/tria_base.h> 23 #include <deal.II/grid/grid_tools.h> 24 #include <deal.II/grid/tria.h> 25 #include <deal.II/grid/tria_accessor.h> 26 #include <deal.II/grid/tria_iterator.h> 28 #include <deal.II/lac/sparsity_pattern.h> 29 #include <deal.II/lac/sparsity_tools.h> 30 #include <deal.II/lac/vector_memory.h> 38 DEAL_II_NAMESPACE_OPEN
42 template <
int dim,
int spacedim>
44 MPI_Comm mpi_communicator,
45 const typename ::Triangulation<dim, spacedim>::MeshSmoothing
47 const bool check_for_distorted_cells)
49 check_for_distorted_cells)
50 , mpi_communicator(mpi_communicator)
51 , my_subdomain(
Utilities::MPI::this_mpi_process(this->mpi_communicator))
52 , n_subdomains(
Utilities::MPI::n_mpi_processes(this->mpi_communicator))
54 #ifndef DEAL_II_WITH_MPI 56 ExcMessage(
"You compiled deal.II without MPI support, for " 57 "which parallel::Triangulation is not available."));
59 number_cache.n_locally_owned_active_cells.resize(n_subdomains);
64 template <
int dim,
int spacedim>
67 const ::Triangulation<dim, spacedim> &other_tria)
69 #ifndef DEAL_II_WITH_MPI 72 ExcMessage(
"You compiled deal.II without MPI support, for " 73 "which parallel::Triangulation is not available."));
77 if (const ::parallel::Triangulation<dim, spacedim> *other_tria_x =
78 dynamic_cast<const ::parallel::Triangulation<dim, spacedim> *
>(
81 mpi_communicator = other_tria_x->get_communicator();
85 ::internal::GrowingVectorMemoryImplementation::
86 release_all_unused_memory();
93 template <
int dim,
int spacedim>
102 number_cache.n_locally_owned_active_cells) +
104 number_cache.n_global_active_cells) +
109 template <
int dim,
int spacedim>
114 ::internal::GrowingVectorMemoryImplementation::
115 release_all_unused_memory();
118 template <
int dim,
int spacedim>
120 : n_global_active_cells(0)
124 template <
int dim,
int spacedim>
131 template <
int dim,
int spacedim>
135 return number_cache.n_global_levels;
138 template <
int dim,
int spacedim>
142 return number_cache.n_global_active_cells;
145 template <
int dim,
int spacedim>
146 const std::vector<unsigned int> &
153 template <
int dim,
int spacedim>
157 return mpi_communicator;
160 #ifdef DEAL_II_WITH_MPI 161 template <
int dim,
int spacedim>
165 Assert(number_cache.n_locally_owned_active_cells.size() ==
169 std::fill(number_cache.n_locally_owned_active_cells.begin(),
170 number_cache.n_locally_owned_active_cells.end(),
173 number_cache.ghost_owners.clear();
174 number_cache.level_ghost_owners.clear();
176 if (this->n_levels() == 0)
183 number_cache.n_global_active_cells = 0;
184 number_cache.n_global_levels = 0;
192 this->begin_active();
195 if (cell->is_ghost())
196 number_cache.ghost_owners.insert(cell->subdomain_id());
198 Assert(number_cache.ghost_owners.size() <
203 if (this->n_levels() > 0)
205 this->begin_active();
208 if (cell->subdomain_id() == my_subdomain)
209 ++number_cache.n_locally_owned_active_cells[my_subdomain];
211 unsigned int send_value =
212 number_cache.n_locally_owned_active_cells[my_subdomain];
214 MPI_Allgather(&send_value,
217 number_cache.n_locally_owned_active_cells.data(),
220 this->mpi_communicator);
223 number_cache.n_global_active_cells =
224 std::accumulate(number_cache.n_locally_owned_active_cells.begin(),
225 number_cache.n_locally_owned_active_cells.end(),
228 number_cache.n_global_levels =
234 template <
int dim,
int spacedim>
241 if (this->n_levels() == 0)
250 cell->level_subdomain_id() != this->locally_owned_subdomain())
251 this->number_cache.level_ghost_owners.insert(
252 cell->level_subdomain_id());
258 int ierr = MPI_Barrier(this->mpi_communicator);
262 std::vector<MPI_Request> requests(
263 this->number_cache.level_ghost_owners.size());
264 unsigned int dummy = 0;
265 unsigned int req_counter = 0;
267 for (std::set<types::subdomain_id>::iterator it =
268 this->number_cache.level_ghost_owners.begin();
269 it != this->number_cache.level_ghost_owners.end();
272 ierr = MPI_Isend(&dummy,
277 this->mpi_communicator,
278 &requests[req_counter]);
282 for (std::set<types::subdomain_id>::iterator it =
283 this->number_cache.level_ghost_owners.begin();
284 it != this->number_cache.level_ghost_owners.end();
288 ierr = MPI_Recv(&dummy,
293 this->mpi_communicator,
298 if (requests.size() > 0)
301 MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
305 ierr = MPI_Barrier(this->mpi_communicator);
310 Assert(this->number_cache.level_ghost_owners.size() <
317 template <
int dim,
int spacedim>
324 template <
int dim,
int spacedim>
333 template <
int dim,
int spacedim>
342 template <
int dim,
int spacedim>
343 const std::set<types::subdomain_id> &
351 template <
int dim,
int spacedim>
352 const std::set<types::subdomain_id> &
360 template <
int dim,
int spacedim>
361 std::map<unsigned int, std::set<::types::subdomain_id>>
371 std::vector<bool> vertex_of_own_cell(this->n_vertices(),
false);
373 for (
const auto &cell : this->active_cell_iterators())
374 if (cell->is_locally_owned())
375 for (
unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell; ++v)
376 vertex_of_own_cell[cell->vertex_index(v)] =
true;
378 std::map<unsigned int, std::set<::types::subdomain_id>> result;
379 for (
const auto &cell : this->active_cell_iterators())
380 if (cell->is_ghost())
383 for (
unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell;
386 if (vertex_of_own_cell[cell->vertex_index(v)])
387 result[cell->vertex_index(v)].insert(owner);
399 #include "tria_base.inst" 401 DEAL_II_NAMESPACE_CLOSE
::internal::TriangulationImplementation::NumberCache< dim > number_cache
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
const std::set< types::subdomain_id > & ghost_owners() const
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int subdomain_id
#define Assert(cond, exc)
virtual std::size_t memory_consumption() const
virtual types::global_dof_index n_global_active_cells() const
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
const std::set< types::subdomain_id > & level_ghost_owners() const
unsigned int global_dof_index
const types::subdomain_id artificial_subdomain_id
unsigned int n_locally_owned_active_cells() const
#define AssertThrowMPI(error_code)
virtual unsigned int n_global_levels() const
virtual ~Triangulation() override
Triangulation(MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
static ::ExceptionBase & ExcNotImplemented()
T max(const T &t, const MPI_Comm &mpi_communicator)
virtual types::subdomain_id locally_owned_subdomain() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static ::ExceptionBase & ExcInternalError()