17 #include <deal.II/base/utilities.h> 18 #include <deal.II/base/memory_consumption.h> 19 #include <deal.II/base/logstream.h> 20 #include <deal.II/lac/sparsity_tools.h> 21 #include <deal.II/lac/sparsity_pattern.h> 22 #include <deal.II/lac/vector_memory.h> 23 #include <deal.II/grid/tria.h> 24 #include <deal.II/grid/tria_accessor.h> 25 #include <deal.II/grid/tria_iterator.h> 26 #include <deal.II/grid/grid_tools.h> 27 #include <deal.II/distributed/tria_base.h> 36 DEAL_II_NAMESPACE_OPEN
41 template <
int dim,
int spacedim>
43 const typename ::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
44 const bool check_for_distorted_cells)
46 ::
Triangulation<dim,spacedim>(smooth_grid,check_for_distorted_cells),
47 mpi_communicator (mpi_communicator),
48 my_subdomain (
Utilities::MPI::this_mpi_process (this->mpi_communicator)),
49 n_subdomains(
Utilities::MPI::n_mpi_processes (this->mpi_communicator))
51 #ifndef DEAL_II_WITH_MPI 53 "which parallel::Triangulation is not available."));
55 number_cache.n_locally_owned_active_cells.resize (n_subdomains);
60 template <
int dim,
int spacedim>
65 #ifndef DEAL_II_WITH_MPI 68 "which parallel::Triangulation is not available."));
72 if (const ::parallel::Triangulation<dim,spacedim> *
73 other_tria_x =
dynamic_cast<const ::parallel::Triangulation<dim,spacedim> *
>(&other_tria))
75 mpi_communicator = other_tria_x->get_communicator();
79 ::internal::GrowingVectorMemoryImplementation::release_all_unused_memory();
86 template <
int dim,
int spacedim>
101 template <
int dim,
int spacedim>
106 ::internal::GrowingVectorMemoryImplementation::release_all_unused_memory();
109 template <
int dim,
int spacedim>
112 n_global_active_cells(0),
116 template <
int dim,
int spacedim>
123 template <
int dim,
int spacedim>
127 return number_cache.n_global_levels;
130 template <
int dim,
int spacedim>
134 return number_cache.n_global_active_cells;
137 template <
int dim,
int spacedim>
138 const std::vector<unsigned int> &
144 template <
int dim,
int spacedim>
148 return mpi_communicator;
151 #ifdef DEAL_II_WITH_MPI 152 template <
int dim,
int spacedim>
156 Assert (number_cache.n_locally_owned_active_cells.size()
161 std::fill (number_cache.n_locally_owned_active_cells.begin(),
162 number_cache.n_locally_owned_active_cells.end(),
165 number_cache.ghost_owners.clear ();
166 number_cache.level_ghost_owners.clear ();
168 if (this->n_levels() == 0)
175 number_cache.n_global_active_cells = 0;
176 number_cache.n_global_levels = 0;
184 cell = this->begin_active();
187 if (cell->is_ghost())
188 number_cache.ghost_owners.insert(cell->subdomain_id());
193 if (this->n_levels() > 0)
195 cell = this->begin_active();
196 cell != this->end(); ++cell)
197 if (cell->subdomain_id() == my_subdomain)
198 ++number_cache.n_locally_owned_active_cells[my_subdomain];
200 unsigned int send_value
201 = number_cache.n_locally_owned_active_cells[my_subdomain];
202 const int ierr = MPI_Allgather (&send_value,
205 number_cache.n_locally_owned_active_cells.data(),
208 this->mpi_communicator);
211 number_cache.n_global_active_cells
212 = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
213 number_cache.n_locally_owned_active_cells.end(),
216 number_cache.n_global_levels =
Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
221 template <
int dim,
int spacedim>
228 if (this->n_levels() == 0)
233 cell = this->begin();
237 && cell->level_subdomain_id() != this->locally_owned_subdomain())
238 this->number_cache.level_ghost_owners.insert(cell->level_subdomain_id());
243 int ierr = MPI_Barrier(this->mpi_communicator);
247 std::vector<MPI_Request> requests (this->number_cache.level_ghost_owners.size());
248 unsigned int dummy = 0;
249 unsigned int req_counter = 0;
251 for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
252 it != this->number_cache.level_ghost_owners.end();
255 ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED,
256 *it, 9001, this->mpi_communicator,
257 &requests[req_counter]);
261 for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
262 it != this->number_cache.level_ghost_owners.end();
266 ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED,
267 *it, 9001, this->mpi_communicator,
272 if (requests.size() > 0)
274 ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
278 ierr = MPI_Barrier(this->mpi_communicator);
289 template <
int dim,
int spacedim>
296 template <
int dim,
int spacedim>
305 template <
int dim,
int spacedim>
314 template <
int dim,
int spacedim>
315 const std::set<types::subdomain_id> &
324 template <
int dim,
int spacedim>
325 const std::set<types::subdomain_id> &
334 template <
int dim,
int spacedim>
335 std::map<unsigned int, std::set<::types::subdomain_id> >
343 Assert(this->get_periodic_face_map().size()==0,
347 std::vector<bool> vertex_of_own_cell(this->n_vertices(),
false);
349 for (
auto cell : this->active_cell_iterators())
350 if (cell->is_locally_owned())
351 for (
unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
352 vertex_of_own_cell[cell->vertex_index(v)] =
true;
354 std::map<unsigned int, std::set<::types::subdomain_id> > result;
355 for (
auto cell : this->active_cell_iterators())
356 if (cell->is_ghost())
359 for (
unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
361 if (vertex_of_own_cell[cell->vertex_index(v)])
362 result[cell->vertex_index(v)].insert(owner);
375 #include "tria_base.inst" 377 DEAL_II_NAMESPACE_CLOSE
::internal::TriangulationImplementation::NumberCache< dim > number_cache
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
const std::set< types::subdomain_id > & ghost_owners() const
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int global_dof_index
#define Assert(cond, exc)
virtual std::size_t memory_consumption() const
unsigned int subdomain_id
virtual types::global_dof_index n_global_active_cells() const
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
const std::set< types::subdomain_id > & level_ghost_owners() const
const types::subdomain_id artificial_subdomain_id
unsigned int n_locally_owned_active_cells() const
#define AssertThrowMPI(error_code)
virtual unsigned int n_global_levels() const
Triangulation(MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
static ::ExceptionBase & ExcNotImplemented()
T max(const T &t, const MPI_Comm &mpi_communicator)
virtual types::subdomain_id locally_owned_subdomain() const
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static ::ExceptionBase & ExcInternalError()