Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
tria_base.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/logstream.h>
18 #include <deal.II/base/memory_consumption.h>
19 #include <deal.II/base/utilities.h>
20 
21 #include <deal.II/distributed/tria_base.h>
22 
23 #include <deal.II/grid/grid_tools.h>
24 #include <deal.II/grid/tria.h>
25 #include <deal.II/grid/tria_accessor.h>
26 #include <deal.II/grid/tria_iterator.h>
27 
28 #include <deal.II/lac/sparsity_pattern.h>
29 #include <deal.II/lac/sparsity_tools.h>
30 #include <deal.II/lac/vector_memory.h>
31 
32 #include <algorithm>
33 #include <fstream>
34 #include <iostream>
35 #include <numeric>
36 
37 
38 DEAL_II_NAMESPACE_OPEN
39 
40 namespace parallel
41 {
42  template <int dim, int spacedim>
44  MPI_Comm mpi_communicator,
45  const typename ::Triangulation<dim, spacedim>::MeshSmoothing
46  smooth_grid,
47  const bool check_for_distorted_cells)
48  : ::Triangulation<dim, spacedim>(smooth_grid,
49  check_for_distorted_cells)
50  , mpi_communicator(mpi_communicator)
51  , my_subdomain(Utilities::MPI::this_mpi_process(this->mpi_communicator))
52  , n_subdomains(Utilities::MPI::n_mpi_processes(this->mpi_communicator))
53  {
54 #ifndef DEAL_II_WITH_MPI
55  Assert(false,
56  ExcMessage("You compiled deal.II without MPI support, for "
57  "which parallel::Triangulation is not available."));
58 #endif
59  number_cache.n_locally_owned_active_cells.resize(n_subdomains);
60  }
61 
62 
63 
64  template <int dim, int spacedim>
65  void
67  const ::Triangulation<dim, spacedim> &other_tria)
68  {
69 #ifndef DEAL_II_WITH_MPI
70  (void)other_tria;
71  Assert(false,
72  ExcMessage("You compiled deal.II without MPI support, for "
73  "which parallel::Triangulation is not available."));
74 #else
76 
77  if (const ::parallel::Triangulation<dim, spacedim> *other_tria_x =
78  dynamic_cast<const ::parallel::Triangulation<dim, spacedim> *>(
79  &other_tria))
80  {
81  mpi_communicator = other_tria_x->get_communicator();
82 
83  // release unused vector memory because we will have very different
84  // vectors now
85  ::internal::GrowingVectorMemoryImplementation::
86  release_all_unused_memory();
87  }
88 #endif
89  }
90 
91 
92 
93  template <int dim, int spacedim>
94  std::size_t
96  {
97  std::size_t mem =
99  MemoryConsumption::memory_consumption(mpi_communicator) +
102  number_cache.n_locally_owned_active_cells) +
104  number_cache.n_global_active_cells) +
105  MemoryConsumption::memory_consumption(number_cache.n_global_levels);
106  return mem;
107  }
108 
109  template <int dim, int spacedim>
111  {
112  // release unused vector memory because the vector layout is going to look
113  // very different now
114  ::internal::GrowingVectorMemoryImplementation::
115  release_all_unused_memory();
116  }
117 
118  template <int dim, int spacedim>
120  : n_global_active_cells(0)
121  , n_global_levels(0)
122  {}
123 
124  template <int dim, int spacedim>
125  unsigned int
127  {
128  return number_cache.n_locally_owned_active_cells[my_subdomain];
129  }
130 
131  template <int dim, int spacedim>
132  unsigned int
134  {
135  return number_cache.n_global_levels;
136  }
137 
138  template <int dim, int spacedim>
141  {
142  return number_cache.n_global_active_cells;
143  }
144 
145  template <int dim, int spacedim>
146  const std::vector<unsigned int> &
148  const
149  {
150  return number_cache.n_locally_owned_active_cells;
151  }
152 
153  template <int dim, int spacedim>
154  MPI_Comm
156  {
157  return mpi_communicator;
158  }
159 
160 #ifdef DEAL_II_WITH_MPI
161  template <int dim, int spacedim>
162  void
164  {
165  Assert(number_cache.n_locally_owned_active_cells.size() ==
166  Utilities::MPI::n_mpi_processes(this->mpi_communicator),
167  ExcInternalError());
168 
169  std::fill(number_cache.n_locally_owned_active_cells.begin(),
170  number_cache.n_locally_owned_active_cells.end(),
171  0);
172 
173  number_cache.ghost_owners.clear();
174  number_cache.level_ghost_owners.clear();
175 
176  if (this->n_levels() == 0)
177  {
178  // Skip communication done below if we do not have any cells
179  // (meaning the Triangulation is empty on all processors). This will
180  // happen when called from the destructor of Triangulation, which
181  // can get called during exception handling causing a hang in this
182  // function.
183  number_cache.n_global_active_cells = 0;
184  number_cache.n_global_levels = 0;
185  return;
186  }
187 
188 
189  {
190  // find ghost owners
192  this->begin_active();
193  cell != this->end();
194  ++cell)
195  if (cell->is_ghost())
196  number_cache.ghost_owners.insert(cell->subdomain_id());
197 
198  Assert(number_cache.ghost_owners.size() <
199  Utilities::MPI::n_mpi_processes(mpi_communicator),
200  ExcInternalError());
201  }
202 
203  if (this->n_levels() > 0)
205  this->begin_active();
206  cell != this->end();
207  ++cell)
208  if (cell->subdomain_id() == my_subdomain)
209  ++number_cache.n_locally_owned_active_cells[my_subdomain];
210 
211  unsigned int send_value =
212  number_cache.n_locally_owned_active_cells[my_subdomain];
213  const int ierr =
214  MPI_Allgather(&send_value,
215  1,
216  MPI_UNSIGNED,
217  number_cache.n_locally_owned_active_cells.data(),
218  1,
219  MPI_UNSIGNED,
220  this->mpi_communicator);
221  AssertThrowMPI(ierr);
222 
223  number_cache.n_global_active_cells =
224  std::accumulate(number_cache.n_locally_owned_active_cells.begin(),
225  number_cache.n_locally_owned_active_cells.end(),
226  /* ensure sum is computed with correct data type:*/
227  static_cast<types::global_dof_index>(0));
228  number_cache.n_global_levels =
229  Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
230  }
231 
232 
233 
234  template <int dim, int spacedim>
235  void
237  {
238  number_cache.level_ghost_owners.clear();
239 
240  // if there is nothing to do, then do nothing
241  if (this->n_levels() == 0)
242  return;
243 
244  // find level ghost owners
246  this->begin();
247  cell != this->end();
248  ++cell)
249  if (cell->level_subdomain_id() != numbers::artificial_subdomain_id &&
250  cell->level_subdomain_id() != this->locally_owned_subdomain())
251  this->number_cache.level_ghost_owners.insert(
252  cell->level_subdomain_id());
253 
254 # ifdef DEBUG
255  // Check that level_ghost_owners is symmetric by sending a message to
256  // everyone
257  {
258  int ierr = MPI_Barrier(this->mpi_communicator);
259  AssertThrowMPI(ierr);
260 
261  // important: preallocate to avoid (re)allocation:
262  std::vector<MPI_Request> requests(
263  this->number_cache.level_ghost_owners.size());
264  unsigned int dummy = 0;
265  unsigned int req_counter = 0;
266 
267  for (std::set<types::subdomain_id>::iterator it =
268  this->number_cache.level_ghost_owners.begin();
269  it != this->number_cache.level_ghost_owners.end();
270  ++it, ++req_counter)
271  {
272  ierr = MPI_Isend(&dummy,
273  1,
274  MPI_UNSIGNED,
275  *it,
276  9001,
277  this->mpi_communicator,
278  &requests[req_counter]);
279  AssertThrowMPI(ierr);
280  }
281 
282  for (std::set<types::subdomain_id>::iterator it =
283  this->number_cache.level_ghost_owners.begin();
284  it != this->number_cache.level_ghost_owners.end();
285  ++it)
286  {
287  unsigned int dummy;
288  ierr = MPI_Recv(&dummy,
289  1,
290  MPI_UNSIGNED,
291  *it,
292  9001,
293  this->mpi_communicator,
294  MPI_STATUS_IGNORE);
295  AssertThrowMPI(ierr);
296  }
297 
298  if (requests.size() > 0)
299  {
300  ierr =
301  MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
302  AssertThrowMPI(ierr);
303  }
304 
305  ierr = MPI_Barrier(this->mpi_communicator);
306  AssertThrowMPI(ierr);
307  }
308 # endif
309 
310  Assert(this->number_cache.level_ghost_owners.size() <
311  Utilities::MPI::n_mpi_processes(this->mpi_communicator),
312  ExcInternalError());
313  }
314 
315 #else
316 
317  template <int dim, int spacedim>
318  void
320  {
321  Assert(false, ExcNotImplemented());
322  }
323 
324  template <int dim, int spacedim>
325  void
327  {
328  Assert(false, ExcNotImplemented());
329  }
330 
331 #endif
332 
333  template <int dim, int spacedim>
336  {
337  return my_subdomain;
338  }
339 
340 
341 
342  template <int dim, int spacedim>
343  const std::set<types::subdomain_id> &
345  {
346  return number_cache.ghost_owners;
347  }
348 
349 
350 
351  template <int dim, int spacedim>
352  const std::set<types::subdomain_id> &
354  {
355  return number_cache.level_ghost_owners;
356  }
357 
358 
359 
360  template <int dim, int spacedim>
361  std::map<unsigned int, std::set<::types::subdomain_id>>
363  {
364  // TODO: we are not treating periodic neighbors correctly here. If we do
365  // we can remove the overriding implementation for p::d::Triangulation
366  // that is currently using a p4est callback to get correct ghost neighbors
367  // over periodic faces.
368  Assert(this->get_periodic_face_map().size() == 0, ExcNotImplemented());
369 
370 
371  std::vector<bool> vertex_of_own_cell(this->n_vertices(), false);
372 
373  for (const auto &cell : this->active_cell_iterators())
374  if (cell->is_locally_owned())
375  for (unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell; ++v)
376  vertex_of_own_cell[cell->vertex_index(v)] = true;
377 
378  std::map<unsigned int, std::set<::types::subdomain_id>> result;
379  for (const auto &cell : this->active_cell_iterators())
380  if (cell->is_ghost())
381  {
382  const types::subdomain_id owner = cell->subdomain_id();
383  for (unsigned int v = 0; v < GeometryInfo<dim>::vertices_per_cell;
384  ++v)
385  {
386  if (vertex_of_own_cell[cell->vertex_index(v)])
387  result[cell->vertex_index(v)].insert(owner);
388  }
389  }
390 
391  return result;
392  }
393 
394 } // end namespace parallel
395 
396 
397 
398 /*-------------- Explicit Instantiations -------------------------------*/
399 #include "tria_base.inst"
400 
401 DEAL_II_NAMESPACE_CLOSE
::internal::TriangulationImplementation::NumberCache< dim > number_cache
Definition: tria.h:3998
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
Definition: tria.cc:10421
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:344
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int subdomain_id
Definition: types.h:43
#define Assert(cond, exc)
Definition: exceptions.h:1407
virtual std::size_t memory_consumption() const
Definition: tria.cc:15053
virtual types::global_dof_index n_global_active_cells() const
Definition: tria.cc:12552
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:353
unsigned int global_dof_index
Definition: types.h:89
const types::subdomain_id artificial_subdomain_id
Definition: types.h:275
Definition: cuda.h:31
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:126
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
virtual unsigned int n_global_levels() const
virtual ~Triangulation() override
Definition: tria.cc:10151
Triangulation(MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:43
static ::ExceptionBase & ExcNotImplemented()
T max(const T &t, const MPI_Comm &mpi_communicator)
virtual types::subdomain_id locally_owned_subdomain() const
Definition: tria.cc:13211
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static ::ExceptionBase & ExcInternalError()