Reference documentation for deal.II version GIT 35969cdc9b 2023-12-09 01:10:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
repartitioning_policy_tools.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2021 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
19 
22 
25 
27 
28 
30 {
31  namespace
32  {
33  template <int dim, int spacedim>
34  void
35  add_indices_recursively_for_first_child_policy(
37  const internal::CellIDTranslator<dim> &cell_id_translator,
38  IndexSet &is_fine)
39  {
40  is_fine.add_index(cell_id_translator.translate(cell));
41 
42  if (cell->level() > 0 && cell->parent()->child(0) == cell)
43  add_indices_recursively_for_first_child_policy(cell->parent(),
44  cell_id_translator,
45  is_fine);
46  }
47  } // namespace
48 
49 
50  template <int dim, int spacedim>
52  : tighten(tighten)
53  {}
54 
55  template <int dim, int spacedim>
58  const Triangulation<dim, spacedim> &tria_in) const
59  {
60  if (tighten == false)
61  return {}; // nothing to do
62 
63  const auto tria =
64  dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
65  &tria_in);
66 
67  if (tria == nullptr)
68  return {}; // nothing to do, since serial triangulation
69 
70 #ifndef DEAL_II_WITH_MPI
71  Assert(false, ExcNeedsMPI());
72  return {};
73 #else
74 
75  const auto comm = tria->get_communicator();
76 
77  const unsigned int process_has_active_locally_owned_cells =
78  tria->n_locally_owned_active_cells() > 0;
79  const unsigned int n_processes_with_active_locally_owned_cells =
80  Utilities::MPI::sum(process_has_active_locally_owned_cells, comm);
81 
82  if (n_processes_with_active_locally_owned_cells ==
84  return {}; // nothing to do, since all processes have cells
85 
86  unsigned int offset = 0;
87 
88  const int ierr =
89  MPI_Exscan(&process_has_active_locally_owned_cells,
90  &offset,
91  1,
93  decltype(process_has_active_locally_owned_cells)>,
94  MPI_SUM,
95  comm);
96  AssertThrowMPI(ierr);
97 
100 
101  partition = offset;
102 
103  return partition;
104 #endif
105  }
106 
107 
108 
109  template <int dim, int spacedim>
111  const Triangulation<dim, spacedim> &tria_fine)
112  : n_coarse_cells(tria_fine.n_global_coarse_cells())
113  , n_global_levels(tria_fine.n_global_levels())
114  {
115  Assert(
117  ExcMessage(
118  "FirstChildPolicy is only working for pure hex meshes at the moment."));
119 
120  const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
122  is_level_partitions.set_size(cell_id_translator.size());
123 
124  for (const auto &cell : tria_fine.active_cell_iterators())
125  if (cell->is_locally_owned())
126  add_indices_recursively_for_first_child_policy(cell,
127  cell_id_translator,
129  }
130 
131 
132 
133  template <int dim, int spacedim>
136  const Triangulation<dim, spacedim> &tria_coarse_in) const
137  {
138  const auto communicator = tria_coarse_in.get_communicator();
139 
140  const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
141  n_global_levels);
142 
143  IndexSet is_coarse(cell_id_translator.size());
144 
145  for (const auto &cell : tria_coarse_in.active_cell_iterators())
146  if (cell->is_locally_owned())
147  is_coarse.add_index(cell_id_translator.translate(cell));
148 
149  std::vector<unsigned int> owning_ranks_of_coarse_cells(
150  is_coarse.n_elements());
151  {
153  process(is_level_partitions,
154  is_coarse,
155  communicator,
156  owning_ranks_of_coarse_cells,
157  false);
158 
160  std::vector<
161  std::pair<types::global_cell_index, types::global_cell_index>>,
162  std::vector<unsigned int>>
163  consensus_algorithm;
164  consensus_algorithm.run(process, communicator);
165  }
166 
167  const auto tria =
168  dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
169  &tria_coarse_in);
170 
172 
175 
176  for (const auto &cell : tria_coarse_in.active_cell_iterators() |
178  partition[cell->global_active_cell_index()] =
179  owning_ranks_of_coarse_cells[is_coarse.index_within_set(
180  cell_id_translator.translate(cell))];
181 
182  return partition;
183  }
184 
185 
186  template <int dim, int spacedim>
188  const unsigned int n_min_cells)
189  : n_min_cells(n_min_cells)
190  {}
191 
192 
193 
194  template <int dim, int spacedim>
197  const Triangulation<dim, spacedim> &tria_in) const
198  {
199  const auto tria =
200  dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
201  &tria_in);
202 
204 
207 
208  // step 1) check if all processes have enough cells
209 
210  const unsigned int n_locally_owned_active_cells =
211  std::count_if(tria_in.begin_active(),
213  tria_in.end()),
214  [](const auto &cell) { return cell.is_locally_owned(); });
215 
216  const auto comm = tria_in.get_communicator();
217 
218  if (Utilities::MPI::min(n_locally_owned_active_cells, comm) >= n_min_cells)
219  return {}; // all processes have enough cells
220 
221  // step 2) there are processes which do not have enough cells so that
222  // a repartitioning kicks in with the aim that all processes that own
223  // cells have at least the specified number of cells
224 
225  const types::global_cell_index n_global_active_cells =
226  tria_in.n_global_active_cells();
227 
228  const unsigned int n_partitions =
229  std::max<unsigned int>(1,
230  std::min<types::global_cell_index>(
231  n_global_active_cells / n_min_cells,
233 
234  const unsigned int min_cells = n_global_active_cells / n_partitions;
235 
236  const auto convert = [&](const unsigned int i) {
237  // determine the owner of a given index: we try to assign each process
238  // the same number of cells; if there is a remainder, we assign the
239  // first processes an additional cell (so that the difference of number of
240  // locally owned cells is never larger than one between processes).
241  const unsigned int n_partitions_with_additional_cell =
242  n_global_active_cells - min_cells * n_partitions;
243 
244  const unsigned int rank =
245  (i < (min_cells + 1) * n_partitions_with_additional_cell) ?
246  (i / (min_cells + 1)) :
247  ((i - n_partitions_with_additional_cell) / min_cells);
248 
249  AssertIndexRange(rank, n_partitions);
250 
251  return rank;
252  };
253 
254  for (const auto i : partition.locally_owned_elements())
255  partition[i] = convert(i);
256 
257  return partition;
258  }
259 
260 
261 
262  template <int dim, int spacedim>
264  const std::function<
265  unsigned int(const typename Triangulation<dim, spacedim>::cell_iterator &,
266  const CellStatus)> &weighting_function)
267  : weighting_function(weighting_function)
268  {}
269 
270 
271 
272  template <int dim, int spacedim>
275  const Triangulation<dim, spacedim> &tria_in) const
276  {
277 #ifndef DEAL_II_WITH_MPI
278  (void)tria_in;
279  return {};
280 #else
281 
282  const auto tria =
283  dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
284  &tria_in);
285 
287 
288  const auto partitioner =
290 
291  std::vector<unsigned int> weights(partitioner->locally_owned_size());
292 
293  const auto mpi_communicator = tria_in.get_communicator();
294  const auto n_subdomains = Utilities::MPI::n_mpi_processes(mpi_communicator);
295 
296  // determine weight of each cell
297  for (const auto &cell :
299  weights[partitioner->global_to_local(cell->global_active_cell_index())] =
300  weighting_function(cell, CellStatus::cell_will_persist);
301 
302  // determine weight of all the cells locally owned by this process
303  std::uint64_t process_local_weight = 0;
304  for (const auto &weight : weights)
305  process_local_weight += weight;
306 
307  // determine partial sum of weights of this process
308  std::uint64_t process_local_weight_offset = 0;
309 
310  int ierr = MPI_Exscan(
311  &process_local_weight,
312  &process_local_weight_offset,
313  1,
314  Utilities::MPI::mpi_type_id_for_type<decltype(process_local_weight)>,
315  MPI_SUM,
317  AssertThrowMPI(ierr);
318 
319  // total weight of all processes
320  std::uint64_t total_weight =
321  process_local_weight_offset + process_local_weight;
322 
323  ierr =
324  MPI_Bcast(&total_weight,
325  1,
326  Utilities::MPI::mpi_type_id_for_type<decltype(total_weight)>,
327  n_subdomains - 1,
328  mpi_communicator);
329  AssertThrowMPI(ierr);
330 
331  // set up partition
333 
334  for (std::uint64_t i = 0, weight = process_local_weight_offset;
335  i < partition.locally_owned_size();
336  weight += weights[i], ++i)
337  partition.local_element(i) =
338  static_cast<double>(weight * n_subdomains / total_weight);
339 
340  return partition;
341 #endif
342  }
343 
344 
345 } // namespace RepartitioningPolicyTools
346 
347 
348 
349 /*-------------- Explicit Instantiations -------------------------------*/
350 #include "repartitioning_policy_tools.inst"
351 
CellStatus
Definition: cell_status.h:32
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1986
size_type n_elements() const
Definition: index_set.h:1919
void set_size(const size_type size)
Definition: index_set.h:1749
void add_index(const size_type index)
Definition: index_set.h:1780
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_in) const override
CellWeightPolicy(const std::function< unsigned int(const typename Triangulation< dim, spacedim >::cell_iterator &, const CellStatus)> &weighting_function)
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_coarse_in) const override
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_coarse_in) const override
FirstChildPolicy(const Triangulation< dim, spacedim > &tria_fine)
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_in) const override
virtual types::global_cell_index n_global_active_cells() const
virtual MPI_Comm get_communicator() const
bool all_reference_cells_are_hyper_cube() const
virtual std::weak_ptr< const Utilities::MPI::Partitioner > global_active_cell_index_partitioner() const
cell_iterator end() const
active_cell_iterator begin_active(const unsigned int level=0) const
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
types::global_cell_index translate(const TriaIterator< Accessor > &cell) const
types::global_cell_index size() const
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1631
static ::ExceptionBase & ExcNotImplemented()
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1947
#define AssertIndexRange(index, range)
Definition: exceptions.h:1888
static ::ExceptionBase & ExcMessage(std::string arg1)
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:149
T min(const T &t, const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1731
unsigned int global_cell_index
Definition: types.h:122
const MPI_Comm comm
const ::Triangulation< dim, spacedim > & tria