Reference documentation for deal.II version 9.0.0
shared_tria.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2015 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/utilities.h>
17 #include <deal.II/base/mpi.h>
18 #include <deal.II/grid/tria.h>
19 #include <deal.II/grid/tria_accessor.h>
20 #include <deal.II/grid/tria_iterator.h>
21 #include <deal.II/grid/grid_tools.h>
22 #include <deal.II/grid/filtered_iterator.h>
23 #include <deal.II/lac/sparsity_tools.h>
24 #include <deal.II/distributed/shared_tria.h>
25 #include <deal.II/distributed/tria.h>
26 
27 
28 DEAL_II_NAMESPACE_OPEN
29 
30 #ifdef DEAL_II_WITH_MPI
31 namespace parallel
32 {
33  namespace shared
34  {
35 
36  template <int dim, int spacedim>
37  Triangulation<dim,spacedim>::Triangulation (MPI_Comm mpi_communicator,
38  const typename ::Triangulation<dim,spacedim>::MeshSmoothing smooth_grid,
39  const bool allow_artificial_cells,
40  const Settings settings):
41  ::parallel::Triangulation<dim,spacedim>(mpi_communicator,smooth_grid,false),
42  settings (settings),
43  allow_artificial_cells(allow_artificial_cells)
44  {
45  const auto partition_settings
48  (void)partition_settings;
49  Assert(partition_settings == partition_auto ||
50  partition_settings == partition_metis ||
51  partition_settings == partition_zoltan ||
52  partition_settings == partition_zorder ||
53  partition_settings == partition_custom_signal,
54  ExcMessage ("Settings must contain exactly one type of the active cell partitioning scheme."))
55 
58  ExcMessage ("construct_multigrid_hierarchy requires allow_artificial_cells to be set to true."))
59  }
60 
61 
62 
63  template <int dim, int spacedim>
65  {
66 #ifdef DEBUG
67  // Check that all meshes are the same (or at least have the same
68  // total number of active cells):
69  const unsigned int max_active_cells
70  = Utilities::MPI::max(this->n_active_cells(), this->get_communicator());
71  Assert(max_active_cells == this->n_active_cells(),
72  ExcMessage("A parallel::shared::Triangulation needs to be refined in the same"
73  "way on all processors, but the participating processors don't "
74  "agree on the number of active cells."))
75 #endif
76 
77  auto partition_settings
78  = (partition_zoltan | partition_metis |
79  partition_zorder | partition_custom_signal) & settings;
80  if (partition_settings == partition_auto)
81 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
82  partition_settings = partition_zoltan;
83 #elif defined DEAL_II_WITH_METIS
84  partition_settings = partition_metis;
85 #else
86  partition_settings = partition_zorder;
87 #endif
88 
89  if (partition_settings == partition_zoltan)
90  {
91 #ifndef DEAL_II_TRILINOS_WITH_ZOLTAN
92  AssertThrow (false,
93  ExcMessage("Choosing 'partition_zoltan' requires the library "
94  "to be compiled with support for Zoltan! "
95  "Instead, you might use 'partition_auto' to select "
96  "a partitioning algorithm that is supported "
97  "by your current configuration."));
98 #else
99  GridTools::partition_triangulation (this->n_subdomains, *this,
101 #endif
102  }
103  else if (partition_settings == partition_metis)
104  {
105 #ifndef DEAL_II_WITH_METIS
106  AssertThrow (false,
107  ExcMessage("Choosing 'partition_metis' requires the library "
108  "to be compiled with support for METIS! "
109  "Instead, you might use 'partition_auto' to select "
110  "a partitioning algorithm that is supported "
111  "by your current configuration."));
112 #else
113  GridTools::partition_triangulation (this->n_subdomains, *this,
115 #endif
116  }
117  else if (partition_settings == partition_zorder)
118  {
119  GridTools::partition_triangulation_zorder (this->n_subdomains, *this);
120  }
121  else if (partition_settings == partition_custom_signal)
122  {
123  // User partitions mesh manually
124  }
125  else
126  {
127  AssertThrow(false, ExcInternalError())
128  }
129 
130  // do not partition multigrid levels if user is
131  // defining a custom partition
132  if ((settings & construct_multigrid_hierarchy) && !(settings & partition_custom_signal))
133  ::GridTools::partition_multigrid_levels(*this);
134 
135  true_subdomain_ids_of_cells.resize(this->n_active_cells());
136 
137  // loop over all cells and mark artificial:
138  typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
139  cell = this->begin_active(),
140  endc = this->end();
141 
142  if (allow_artificial_cells)
143  {
144  // get active halo layer of (ghost) cells
145  // parallel::shared::Triangulation<dim>::
146  std::function<bool (const typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator &)> predicate
147  = IteratorFilters::SubdomainEqualTo(this->my_subdomain);
148 
149  const std::vector<typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator>
150  active_halo_layer_vector = ::GridTools::compute_active_cell_halo_layer (*this, predicate);
151  std::set<typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator>
152  active_halo_layer(active_halo_layer_vector.begin(), active_halo_layer_vector.end());
153 
154  for (unsigned int index=0; cell != endc; cell++, index++)
155  {
156  // store original/true subdomain ids:
157  true_subdomain_ids_of_cells[index] = cell->subdomain_id();
158 
159  if (cell->is_locally_owned() == false &&
160  active_halo_layer.find(cell) == active_halo_layer.end())
161  cell->set_subdomain_id(numbers::artificial_subdomain_id);
162  }
163 
164  // loop over all cells in multigrid hierarchy and mark artificial:
165  if (settings & construct_multigrid_hierarchy)
166  {
167  true_level_subdomain_ids_of_cells.resize(this->n_levels());
168 
169  std::function<bool (const typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator &)> predicate
171  for (unsigned int lvl=0; lvl<this->n_levels(); ++lvl)
172  {
173  true_level_subdomain_ids_of_cells[lvl].resize(this->n_cells(lvl));
174 
175  const std::vector<typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator>
176  level_halo_layer_vector = ::GridTools::compute_cell_halo_layer_on_level (*this, predicate, lvl);
177  std::set<typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator>
178  level_halo_layer(level_halo_layer_vector.begin(), level_halo_layer_vector.end());
179 
180  typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
181  cell = this->begin(lvl),
182  endc = this->end(lvl);
183  for (unsigned int index=0; cell != endc; cell++, index++)
184  {
185  // Store true level subdomain IDs before setting artificial
186  true_level_subdomain_ids_of_cells[lvl][index] = cell->level_subdomain_id();
187 
188  // for active cells, we must have knowledge of level subdomain ids of
189  // all neighbors to our subdomain, not just neighbors on the same level.
190  // if the cells subdomain id was not set to artitficial above, we will
191  // also keep its level subdomain id since it is either owned by this processor
192  // or in the ghost layer of the active mesh.
193  if (!cell->has_children() && cell->subdomain_id() != numbers::artificial_subdomain_id)
194  continue;
195 
196  // we must have knowledge of our parent in the hierarchy
197  if (cell->has_children())
198  {
199  bool keep_cell = false;
200  for (unsigned int c=0; c<GeometryInfo<dim>::max_children_per_cell; ++c)
201  if (cell->child(c)->level_subdomain_id() == this->my_subdomain)
202  {
203  keep_cell = true;
204  break;
205  }
206  if (keep_cell)
207  continue;
208  }
209 
210  // we must have knowledge of our neighbors on the same level
211  if (!cell->is_locally_owned_on_level() &&
212  level_halo_layer.find(cell) != level_halo_layer.end())
213  continue;
214 
215  // mark all other cells to artificial
216  cell->set_level_subdomain_id(numbers::artificial_subdomain_id);
217  }
218  }
219  }
220  }
221  else
222  {
223  // just store true subdomain ids
224  for (unsigned int index=0; cell != endc; cell++, index++)
225  true_subdomain_ids_of_cells[index] = cell->subdomain_id();
226  }
227 
228 #ifdef DEBUG
229  {
230  // Assert that each cell is owned by a processor
231  unsigned int n_my_cells = 0;
232  typename parallel::shared::Triangulation<dim,spacedim>::active_cell_iterator
233  cell = this->begin_active(),
234  endc = this->end();
235  for (; cell!=endc; ++cell)
236  if (cell->is_locally_owned())
237  n_my_cells += 1;
238 
239  const unsigned int total_cells
240  = Utilities::MPI::sum(n_my_cells, this->get_communicator());
241  Assert(total_cells == this->n_active_cells(),
242  ExcMessage("Not all cells are assigned to a processor."))
243  }
244 
245  // If running with multigrid, assert that each level
246  // cell is owned by a processor
247  if (settings & construct_multigrid_hierarchy)
248  {
249  unsigned int n_my_cells = 0;
250  typename parallel::shared::Triangulation<dim,spacedim>::cell_iterator
251  cell = this->begin(),
252  endc = this->end();
253  for (; cell!=endc; ++cell)
254  if (cell->is_locally_owned_on_level())
255  n_my_cells += 1;
256 
257  const unsigned int total_cells
258  = Utilities::MPI::sum(n_my_cells, this->get_communicator());
259  Assert(total_cells == this->n_cells(),
260  ExcMessage("Not all cells are assigned to a processor."))
261  }
262 #endif
263  }
264 
265 
266 
267  template <int dim, int spacedim>
268  bool
270  {
271  return allow_artificial_cells;
272  }
273 
274 
275 
276  template <int dim, int spacedim>
277  const std::vector<types::subdomain_id> &
279  {
280  return true_subdomain_ids_of_cells;
281  }
282 
283 
284 
285  template <int dim, int spacedim>
286  const std::vector<types::subdomain_id> &
288  {
289  return true_level_subdomain_ids_of_cells[level];
290  }
291 
292 
293 
294  template <int dim, int spacedim>
295  void
297  {
299  partition();
300  this->update_number_cache ();
301  }
302 
303 
304 
305  template <int dim, int spacedim>
306  void
308  const std::vector< CellData< dim > > &cells,
309  const SubCellData &subcelldata)
310  {
311  try
312  {
314  create_triangulation (vertices, cells, subcelldata);
315  }
316  catch (const typename ::Triangulation<dim,spacedim>::DistortedCellList &)
317  {
318  // the underlying triangulation should not be checking for distorted
319  // cells
320  Assert (false, ExcInternalError());
321  }
322  partition();
323  this->update_number_cache ();
324  }
325 
326 
327 
328  template <int dim, int spacedim>
329  void
331  copy_triangulation (const ::Triangulation<dim, spacedim> &other_tria)
332  {
333  Assert ((dynamic_cast<const ::parallel::distributed::Triangulation<dim,spacedim> *>(&other_tria) == nullptr),
334  ExcMessage("Cannot use this function on parallel::distributed::Triangulation."));
335 
337  partition();
338  this->update_number_cache ();
339  }
340 
341 
342 
343  template <int dim, int spacedim>
344  void
347  {
349 
350  if (settings & construct_multigrid_hierarchy)
352  }
353  }
354 }
355 
356 #else
357 
358 namespace parallel
359 {
360  namespace shared
361  {
362  template <int dim, int spacedim>
363  bool
365  {
366  Assert (false, ExcNotImplemented());
367  return true;
368  }
369 
370 
371 
372  template <int dim, int spacedim>
373  const std::vector<unsigned int> &
375  {
376  Assert (false, ExcNotImplemented());
377  return true_subdomain_ids_of_cells;
378  }
379 
380 
381 
382  template <int dim, int spacedim>
383  const std::vector<unsigned int> &
385  {
386  Assert (false, ExcNotImplemented());
387  return true_level_subdomain_ids_of_cells;
388  }
389  }
390 }
391 
392 
393 #endif
394 
395 
396 /*-------------- Explicit Instantiations -------------------------------*/
397 #include "shared_tria.inst"
398 
399 DEAL_II_NAMESPACE_CLOSE
virtual void execute_coarsening_and_refinement()
Definition: shared_tria.cc:296
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &old_tria)
Definition: tria_base.cc:63
const std::vector< types::subdomain_id > & get_true_subdomain_ids_of_cells() const
Definition: shared_tria.cc:278
Triangulation(MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing=(::Triangulation< dim, spacedim >::none), const bool allow_artificial_cells=false, const Settings settings=partition_auto)
Definition: shared_tria.cc:37
virtual void create_triangulation(const std::vector< Point< spacedim > > &vertices, const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata)
Definition: shared_tria.cc:307
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
const std::vector< types::subdomain_id > & get_true_level_subdomain_ids_of_cells(const unsigned int level) const
Definition: shared_tria.cc:287
void partition_triangulation_zorder(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation)
Definition: grid_tools.cc:2592
void partition_triangulation(const unsigned int n_partitions, Triangulation< dim, spacedim > &triangulation, const SparsityTools::Partitioner partitioner=SparsityTools::Partitioner::metis)
Definition: grid_tools.cc:2406
cell_iterator end() const
Definition: tria.cc:10576
virtual void execute_coarsening_and_refinement()
Definition: tria.cc:11739
static ::ExceptionBase & ExcMessage(std::string arg1)
virtual void copy_triangulation(const ::Triangulation< dim, spacedim > &other_tria)
Definition: shared_tria.cc:331
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1142
virtual void update_number_cache()
Definition: tria_base.cc:154
virtual void create_triangulation(const std::vector< Point< spacedim > > &vertices, const std::vector< CellData< dim > > &cells, const SubCellData &subcelldata)
Definition: tria.cc:9240
const types::subdomain_id artificial_subdomain_id
Definition: types.h:264
static ::ExceptionBase & ExcNotImplemented()
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()