Reference documentation for deal.II version 9.0.0
tria_base.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/utilities.h>
18 #include <deal.II/base/memory_consumption.h>
19 #include <deal.II/base/logstream.h>
20 #include <deal.II/lac/sparsity_tools.h>
21 #include <deal.II/lac/sparsity_pattern.h>
22 #include <deal.II/lac/vector_memory.h>
23 #include <deal.II/grid/tria.h>
24 #include <deal.II/grid/tria_accessor.h>
25 #include <deal.II/grid/tria_iterator.h>
26 #include <deal.II/grid/grid_tools.h>
27 #include <deal.II/distributed/tria_base.h>
28 
29 
30 #include <algorithm>
31 #include <numeric>
32 #include <iostream>
33 #include <fstream>
34 
35 
36 DEAL_II_NAMESPACE_OPEN
37 
38 namespace parallel
39 {
40 
41  template <int dim, int spacedim>
42  Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
43  const typename ::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
44  const bool check_for_distorted_cells)
45  :
46  ::Triangulation<dim,spacedim>(smooth_grid,check_for_distorted_cells),
47  mpi_communicator (mpi_communicator),
48  my_subdomain (Utilities::MPI::this_mpi_process (this->mpi_communicator)),
49  n_subdomains(Utilities::MPI::n_mpi_processes (this->mpi_communicator))
50  {
51 #ifndef DEAL_II_WITH_MPI
52  Assert(false, ExcMessage("You compiled deal.II without MPI support, for "
53  "which parallel::Triangulation is not available."));
54 #endif
55  number_cache.n_locally_owned_active_cells.resize (n_subdomains);
56  }
57 
58 
59 
60  template <int dim, int spacedim>
61  void
63  copy_triangulation (const ::Triangulation<dim, spacedim> &other_tria)
64  {
65 #ifndef DEAL_II_WITH_MPI
66  (void)other_tria;
67  Assert(false, ExcMessage("You compiled deal.II without MPI support, for "
68  "which parallel::Triangulation is not available."));
69 #else
71 
72  if (const ::parallel::Triangulation<dim,spacedim> *
73  other_tria_x = dynamic_cast<const ::parallel::Triangulation<dim,spacedim> *>(&other_tria))
74  {
75  mpi_communicator = other_tria_x->get_communicator();
76 
77  // release unused vector memory because we will have very different
78  // vectors now
79  ::internal::GrowingVectorMemoryImplementation::release_all_unused_memory();
80  }
81 #endif
82  }
83 
84 
85 
86  template <int dim, int spacedim>
87  std::size_t
89  {
90  std::size_t mem=
92  + MemoryConsumption::memory_consumption(mpi_communicator)
94  + MemoryConsumption::memory_consumption(number_cache.n_locally_owned_active_cells)
95  + MemoryConsumption::memory_consumption(number_cache.n_global_active_cells)
96  + MemoryConsumption::memory_consumption(number_cache.n_global_levels);
97  return mem;
98 
99  }
100 
101  template <int dim, int spacedim>
103  {
104  // release unused vector memory because the vector layout is going to look
105  // very different now
106  ::internal::GrowingVectorMemoryImplementation::release_all_unused_memory();
107  }
108 
109  template <int dim, int spacedim>
111  :
112  n_global_active_cells(0),
113  n_global_levels(0)
114  {}
115 
116  template <int dim, int spacedim>
117  unsigned int
119  {
120  return number_cache.n_locally_owned_active_cells[my_subdomain];
121  }
122 
123  template <int dim, int spacedim>
124  unsigned int
126  {
127  return number_cache.n_global_levels;
128  }
129 
130  template <int dim, int spacedim>
133  {
134  return number_cache.n_global_active_cells;
135  }
136 
137  template <int dim, int spacedim>
138  const std::vector<unsigned int> &
140  {
141  return number_cache.n_locally_owned_active_cells;
142  }
143 
144  template <int dim, int spacedim>
145  MPI_Comm
147  {
148  return mpi_communicator;
149  }
150 
151 #ifdef DEAL_II_WITH_MPI
152  template <int dim, int spacedim>
153  void
155  {
156  Assert (number_cache.n_locally_owned_active_cells.size()
157  ==
158  Utilities::MPI::n_mpi_processes (this->mpi_communicator),
159  ExcInternalError());
160 
161  std::fill (number_cache.n_locally_owned_active_cells.begin(),
162  number_cache.n_locally_owned_active_cells.end(),
163  0);
164 
165  number_cache.ghost_owners.clear ();
166  number_cache.level_ghost_owners.clear ();
167 
168  if (this->n_levels() == 0)
169  {
170  // Skip communication done below if we do not have any cells
171  // (meaning the Triangulation is empty on all processors). This will
172  // happen when called from the destructor of Triangulation, which
173  // can get called during exception handling causing a hang in this
174  // function.
175  number_cache.n_global_active_cells = 0;
176  number_cache.n_global_levels = 0;
177  return;
178  }
179 
180 
181  {
182  // find ghost owners
184  cell = this->begin_active();
185  cell != this->end();
186  ++cell)
187  if (cell->is_ghost())
188  number_cache.ghost_owners.insert(cell->subdomain_id());
189 
190  Assert(number_cache.ghost_owners.size() < Utilities::MPI::n_mpi_processes(mpi_communicator), ExcInternalError());
191  }
192 
193  if (this->n_levels() > 0)
195  cell = this->begin_active();
196  cell != this->end(); ++cell)
197  if (cell->subdomain_id() == my_subdomain)
198  ++number_cache.n_locally_owned_active_cells[my_subdomain];
199 
200  unsigned int send_value
201  = number_cache.n_locally_owned_active_cells[my_subdomain];
202  const int ierr = MPI_Allgather (&send_value,
203  1,
204  MPI_UNSIGNED,
205  number_cache.n_locally_owned_active_cells.data(),
206  1,
207  MPI_UNSIGNED,
208  this->mpi_communicator);
209  AssertThrowMPI(ierr);
210 
211  number_cache.n_global_active_cells
212  = std::accumulate (number_cache.n_locally_owned_active_cells.begin(),
213  number_cache.n_locally_owned_active_cells.end(),
214  /* ensure sum is computed with correct data type:*/
215  static_cast<types::global_dof_index>(0));
216  number_cache.n_global_levels = Utilities::MPI::max(this->n_levels(), this->mpi_communicator);
217  }
218 
219 
220 
221  template <int dim, int spacedim>
222  void
224  {
225  number_cache.level_ghost_owners.clear ();
226 
227  // if there is nothing to do, then do nothing
228  if (this->n_levels() == 0)
229  return;
230 
231  // find level ghost owners
233  cell = this->begin();
234  cell != this->end();
235  ++cell)
236  if (cell->level_subdomain_id() != numbers::artificial_subdomain_id
237  && cell->level_subdomain_id() != this->locally_owned_subdomain())
238  this->number_cache.level_ghost_owners.insert(cell->level_subdomain_id());
239 
240 #ifdef DEBUG
241  // Check that level_ghost_owners is symmetric by sending a message to everyone
242  {
243  int ierr = MPI_Barrier(this->mpi_communicator);
244  AssertThrowMPI(ierr);
245 
246  // important: preallocate to avoid (re)allocation:
247  std::vector<MPI_Request> requests (this->number_cache.level_ghost_owners.size());
248  unsigned int dummy = 0;
249  unsigned int req_counter = 0;
250 
251  for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
252  it != this->number_cache.level_ghost_owners.end();
253  ++it, ++req_counter)
254  {
255  ierr = MPI_Isend(&dummy, 1, MPI_UNSIGNED,
256  *it, 9001, this->mpi_communicator,
257  &requests[req_counter]);
258  AssertThrowMPI(ierr);
259  }
260 
261  for (std::set<types::subdomain_id>::iterator it = this->number_cache.level_ghost_owners.begin();
262  it != this->number_cache.level_ghost_owners.end();
263  ++it)
264  {
265  unsigned int dummy;
266  ierr = MPI_Recv(&dummy, 1, MPI_UNSIGNED,
267  *it, 9001, this->mpi_communicator,
268  MPI_STATUS_IGNORE);
269  AssertThrowMPI(ierr);
270  }
271 
272  if (requests.size() > 0)
273  {
274  ierr = MPI_Waitall(requests.size(), requests.data(), MPI_STATUSES_IGNORE);
275  AssertThrowMPI(ierr);
276  }
277 
278  ierr = MPI_Barrier(this->mpi_communicator);
279  AssertThrowMPI(ierr);
280  }
281 #endif
282 
283  Assert(this->number_cache.level_ghost_owners.size() < Utilities::MPI::n_mpi_processes(this->mpi_communicator),
284  ExcInternalError());
285  }
286 
287 #else
288 
289  template <int dim, int spacedim>
290  void
292  {
293  Assert (false, ExcNotImplemented());
294  }
295 
296  template <int dim, int spacedim>
297  void
299  {
300  Assert(false, ExcNotImplemented());
301  }
302 
303 #endif
304 
305  template <int dim, int spacedim>
308  {
309  return my_subdomain;
310  }
311 
312 
313 
314  template <int dim, int spacedim>
315  const std::set<types::subdomain_id> &
318  {
319  return number_cache.ghost_owners;
320  }
321 
322 
323 
324  template <int dim, int spacedim>
325  const std::set<types::subdomain_id> &
328  {
329  return number_cache.level_ghost_owners;
330  }
331 
332 
333 
334  template <int dim, int spacedim>
335  std::map<unsigned int, std::set<::types::subdomain_id> >
338  {
339  // TODO: we are not treating periodic neighbors correctly here. If we do
340  // we can remove the overriding implementation for p::d::Triangulation
341  // that is currently using a p4est callback to get correct ghost neighbors
342  // over periodic faces.
343  Assert(this->get_periodic_face_map().size()==0,
345 
346 
347  std::vector<bool> vertex_of_own_cell(this->n_vertices(), false);
348 
349  for (auto cell : this->active_cell_iterators())
350  if (cell->is_locally_owned())
351  for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
352  vertex_of_own_cell[cell->vertex_index(v)] = true;
353 
354  std::map<unsigned int, std::set<::types::subdomain_id> > result;
355  for (auto cell : this->active_cell_iterators())
356  if (cell->is_ghost())
357  {
358  const types::subdomain_id owner = cell->subdomain_id();
359  for (unsigned int v=0; v<GeometryInfo<dim>::vertices_per_cell; ++v)
360  {
361  if (vertex_of_own_cell[cell->vertex_index(v)])
362  result[cell->vertex_index(v)].insert(owner);
363  }
364  }
365 
366  return result;
367  }
368 
369 } // end namespace parallel
370 
371 
372 
373 
374 /*-------------- Explicit Instantiations -------------------------------*/
375 #include "tria_base.inst"
376 
377 DEAL_II_NAMESPACE_CLOSE
::internal::TriangulationImplementation::NumberCache< dim > number_cache
Definition: tria.h:3621
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
Definition: tria.cc:9159
const std::set< types::subdomain_id > & ghost_owners() const
Definition: tria_base.cc:317
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int global_dof_index
Definition: types.h:88
#define Assert(cond, exc)
Definition: exceptions.h:1142
virtual ~Triangulation()
Definition: tria.cc:8880
virtual std::size_t memory_consumption() const
Definition: tria.cc:13389
unsigned int subdomain_id
Definition: types.h:42
virtual types::global_dof_index n_global_active_cells() const
Definition: tria.cc:11090
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
const std::set< types::subdomain_id > & level_ghost_owners() const
Definition: tria_base.cc:327
const types::subdomain_id artificial_subdomain_id
Definition: types.h:264
Definition: cuda.h:31
unsigned int n_locally_owned_active_cells() const
Definition: tria_base.cc:118
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
virtual unsigned int n_global_levels() const
Triangulation(MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
Definition: tria_base.cc:42
static ::ExceptionBase & ExcNotImplemented()
T max(const T &t, const MPI_Comm &mpi_communicator)
virtual types::subdomain_id locally_owned_subdomain() const
Definition: tria.cc:11686
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
static ::ExceptionBase & ExcInternalError()