deal.II version GIT relicensing-2171-g98af52917d 2024-11-23 14:40:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
repartitioning_policy_tools.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2021 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
16#include <deal.II/base/mpi.h>
17
20
23
25
26
28{
29 namespace
30 {
31 template <int dim, int spacedim>
32 void
33 add_indices_recursively_for_first_child_policy(
35 const internal::CellIDTranslator<dim> &cell_id_translator,
36 IndexSet &is_fine)
37 {
38 is_fine.add_index(cell_id_translator.translate(cell));
39
40 if (cell->level() > 0 && cell->parent()->child(0) == cell)
41 add_indices_recursively_for_first_child_policy(cell->parent(),
42 cell_id_translator,
43 is_fine);
44 }
45 } // namespace
46
47
48 template <int dim, int spacedim>
50 : tighten(tighten)
51 {}
52
53 template <int dim, int spacedim>
56 const Triangulation<dim, spacedim> &tria_in) const
57 {
58 if (tighten == false)
59 return {}; // nothing to do
60
61 const auto tria =
63 &tria_in);
64
65 if (tria == nullptr)
66 return {}; // nothing to do, since serial triangulation
67
68#ifndef DEAL_II_WITH_MPI
69 Assert(false, ExcNeedsMPI());
70 return {};
71#else
72
73 const auto comm = tria->get_mpi_communicator();
74
75 const unsigned int process_has_active_locally_owned_cells =
76 tria->n_locally_owned_active_cells() > 0;
77 const unsigned int n_processes_with_active_locally_owned_cells =
78 Utilities::MPI::sum(process_has_active_locally_owned_cells, comm);
79
80 if (n_processes_with_active_locally_owned_cells ==
82 return {}; // nothing to do, since all processes have cells
83
84 unsigned int offset = 0;
85
86 const int ierr =
87 MPI_Exscan(&process_has_active_locally_owned_cells,
88 &offset,
89 1,
91 decltype(process_has_active_locally_owned_cells)>,
92 MPI_SUM,
93 comm);
94 AssertThrowMPI(ierr);
95
97 tria->global_active_cell_index_partitioner().lock());
98
99 partition = offset;
100
101 return partition;
102#endif
103 }
104
105
106
107 template <int dim, int spacedim>
109 const Triangulation<dim, spacedim> &tria_fine)
110 : n_coarse_cells(tria_fine.n_global_coarse_cells())
111 , n_global_levels(tria_fine.n_global_levels())
112 {
113 Assert(
116 "FirstChildPolicy is only working for pure hex meshes at the moment."));
117
118 const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
120 is_level_partitions.set_size(cell_id_translator.size());
121
122 for (const auto &cell : tria_fine.active_cell_iterators())
123 if (cell->is_locally_owned())
124 add_indices_recursively_for_first_child_policy(cell,
125 cell_id_translator,
127 }
128
129
130
131 template <int dim, int spacedim>
134 const Triangulation<dim, spacedim> &tria_coarse_in) const
135 {
136 const auto communicator = tria_coarse_in.get_mpi_communicator();
137
138 const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
139 n_global_levels);
140
141 IndexSet is_coarse(cell_id_translator.size());
142
143 for (const auto &cell : tria_coarse_in.active_cell_iterators())
144 if (cell->is_locally_owned())
145 is_coarse.add_index(cell_id_translator.translate(cell));
146
147 const std::vector<unsigned int> owning_ranks_of_coarse_cells =
148 Utilities::MPI::compute_index_owner(is_level_partitions,
149 is_coarse,
150 communicator);
151
152 const auto tria =
153 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
154 &tria_coarse_in);
155 Assert(tria, ExcNotImplemented());
156
158 tria->global_active_cell_index_partitioner().lock());
159
160 for (const auto &cell : tria_coarse_in.active_cell_iterators() |
162 partition[cell->global_active_cell_index()] =
163 owning_ranks_of_coarse_cells[is_coarse.index_within_set(
164 cell_id_translator.translate(cell))];
165
166 return partition;
167 }
168
169
170 template <int dim, int spacedim>
172 const unsigned int n_min_cells)
173 : n_min_cells(n_min_cells)
174 {}
175
176
177
178 template <int dim, int spacedim>
181 const Triangulation<dim, spacedim> &tria_in) const
182 {
183 const auto tria =
184 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
185 &tria_in);
186
187 Assert(tria, ExcNotImplemented());
188
190 tria->global_active_cell_index_partitioner().lock());
191
192 // step 1) check if all processes have enough cells
193
194 const unsigned int n_locally_owned_active_cells =
195 std::count_if(tria_in.begin_active(),
197 tria_in.end()),
198 [](const auto &cell) { return cell.is_locally_owned(); });
199
200 const auto comm = tria_in.get_mpi_communicator();
201
202 if (Utilities::MPI::min(n_locally_owned_active_cells, comm) >= n_min_cells)
203 return {}; // all processes have enough cells
204
205 // step 2) there are processes which do not have enough cells so that
206 // a repartitioning kicks in with the aim that all processes that own
207 // cells have at least the specified number of cells
208
209 const types::global_cell_index n_global_active_cells =
210 tria_in.n_global_active_cells();
211
212 const unsigned int n_partitions =
213 std::max<unsigned int>(1,
214 std::min<types::global_cell_index>(
215 n_global_active_cells / n_min_cells,
217
218 const unsigned int min_cells = n_global_active_cells / n_partitions;
219
220 const auto convert = [&](const unsigned int i) {
221 // determine the owner of a given index: we try to assign each process
222 // the same number of cells; if there is a remainder, we assign the
223 // first processes an additional cell (so that the difference of number of
224 // locally owned cells is never larger than one between processes).
225 const unsigned int n_partitions_with_additional_cell =
226 n_global_active_cells - min_cells * n_partitions;
227
228 const unsigned int rank =
229 (i < (min_cells + 1) * n_partitions_with_additional_cell) ?
230 (i / (min_cells + 1)) :
231 ((i - n_partitions_with_additional_cell) / min_cells);
232
233 AssertIndexRange(rank, n_partitions);
234
235 return rank;
236 };
237
238 for (const auto i : partition.locally_owned_elements())
239 partition[i] = convert(i);
240
241 return partition;
242 }
243
244
245
246 template <int dim, int spacedim>
248 const std::function<
249 unsigned int(const typename Triangulation<dim, spacedim>::cell_iterator &,
250 const CellStatus)> &weighting_function)
251 : weighting_function(weighting_function)
252 {}
253
254
255
256 template <int dim, int spacedim>
259 const Triangulation<dim, spacedim> &tria_in) const
260 {
261#ifndef DEAL_II_WITH_MPI
262 (void)tria_in;
263 return {};
264#else
265
266 const auto tria =
267 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
268 &tria_in);
269
270 Assert(tria, ExcNotImplemented());
271
272 const auto partitioner =
273 tria->global_active_cell_index_partitioner().lock();
274
275 std::vector<unsigned int> weights(partitioner->locally_owned_size());
276
277 const auto mpi_communicator = tria_in.get_mpi_communicator();
278 const auto n_subdomains = Utilities::MPI::n_mpi_processes(mpi_communicator);
279
280 // determine weight of each cell
281 for (const auto &cell :
282 tria->active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
283 weights[partitioner->global_to_local(cell->global_active_cell_index())] =
284 weighting_function(cell, CellStatus::cell_will_persist);
285
286 // determine weight of all the cells locally owned by this process
287 std::uint64_t process_local_weight = 0;
288 for (const auto &weight : weights)
289 process_local_weight += weight;
290
291 // determine partial sum of weights of this process, as well as the total
292 // weight
293 const auto [process_local_weight_offset, total_weight] =
294 Utilities::MPI::partial_and_total_sum(process_local_weight,
295 tria->get_mpi_communicator());
296
297 // set up partition
298 LinearAlgebra::distributed::Vector<double> partition(partitioner);
299
300 for (std::uint64_t i = 0, weight = process_local_weight_offset;
301 i < partition.locally_owned_size();
302 weight += weights[i], ++i)
303 partition.local_element(i) =
304 static_cast<double>(weight * n_subdomains / total_weight);
305
306 return partition;
307#endif
308 }
309
310
311} // namespace RepartitioningPolicyTools
312
313
314
315/*-------------- Explicit Instantiations -------------------------------*/
316#include "repartitioning_policy_tools.inst"
317
CellStatus
Definition cell_status.h:31
size_type index_within_set(const size_type global_index) const
Definition index_set.h:2001
void set_size(const size_type size)
Definition index_set.h:1764
void add_index(const size_type index)
Definition index_set.h:1795
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_in) const override
CellWeightPolicy(const std::function< unsigned int(const typename Triangulation< dim, spacedim >::cell_iterator &, const CellStatus)> &weighting_function)
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_coarse_in) const override
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_coarse_in) const override
FirstChildPolicy(const Triangulation< dim, spacedim > &tria_fine)
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_in) const override
virtual types::global_cell_index n_global_active_cells() const
bool all_reference_cells_are_hyper_cube() const
virtual MPI_Comm get_mpi_communicator() const
cell_iterator end() const
active_cell_iterator begin_active(const unsigned int level=0) const
types::global_cell_index translate(const TriaIterator< Accessor > &cell) const
types::global_cell_index size() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
const MPI_Comm comm
Definition mpi.cc:913
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1809
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1626