Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
repartitioning_policy_tools.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2021 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18
21
24
26
27
29{
30 namespace
31 {
32 template <int dim, int spacedim>
33 void
34 add_indices_recursively_for_first_child_policy(
36 const internal::CellIDTranslator<dim> &cell_id_translator,
37 IndexSet &is_fine)
38 {
39 is_fine.add_index(cell_id_translator.translate(cell));
40
41 if (cell->level() > 0 && cell->parent()->child(0) == cell)
42 add_indices_recursively_for_first_child_policy(cell->parent(),
43 cell_id_translator,
44 is_fine);
45 }
46 } // namespace
47
48
49 template <int dim, int spacedim>
51 : tighten(tighten)
52 {}
53
54 template <int dim, int spacedim>
57 const Triangulation<dim, spacedim> &tria_in) const
58 {
59 if (tighten == false)
60 return {}; // nothing to do
61
62 const auto tria =
64 &tria_in);
65
66 if (tria == nullptr)
67 return {}; // nothing to do, since serial triangulation
68
69#ifndef DEAL_II_WITH_MPI
70 Assert(false, ExcNeedsMPI());
71 return {};
72#else
73
74 const auto comm = tria->get_communicator();
75
76 const unsigned int process_has_active_locally_owned_cells =
77 tria->n_locally_owned_active_cells() > 0;
78 const unsigned int n_processes_with_active_locally_owned_cells =
79 Utilities::MPI::sum(process_has_active_locally_owned_cells, comm);
80
81 if (n_processes_with_active_locally_owned_cells ==
83 return {}; // nothing to do, since all processes have cells
84
85 unsigned int offset = 0;
86
87 const int ierr =
88 MPI_Exscan(&process_has_active_locally_owned_cells,
89 &offset,
90 1,
92 decltype(process_has_active_locally_owned_cells)>,
93 MPI_SUM,
94 comm);
95 AssertThrowMPI(ierr);
96
98 tria->global_active_cell_index_partitioner().lock());
99
100 partition = offset;
101
102 return partition;
103#endif
104 }
105
106
107
108 template <int dim, int spacedim>
110 const Triangulation<dim, spacedim> &tria_fine)
111 : n_coarse_cells(tria_fine.n_global_coarse_cells())
112 , n_global_levels(tria_fine.n_global_levels())
113 {
114 Assert(
117 "FirstChildPolicy is only working for pure hex meshes at the moment."));
118
119 const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
121 is_level_partitions.set_size(cell_id_translator.size());
122
123 for (const auto &cell : tria_fine.active_cell_iterators())
124 if (cell->is_locally_owned())
125 add_indices_recursively_for_first_child_policy(cell,
126 cell_id_translator,
128 }
129
130
131
132 template <int dim, int spacedim>
135 const Triangulation<dim, spacedim> &tria_coarse_in) const
136 {
137 const auto communicator = tria_coarse_in.get_communicator();
138
139 const internal::CellIDTranslator<dim> cell_id_translator(n_coarse_cells,
140 n_global_levels);
141
142 IndexSet is_coarse(cell_id_translator.size());
143
144 for (const auto &cell : tria_coarse_in.active_cell_iterators())
145 if (cell->is_locally_owned())
146 is_coarse.add_index(cell_id_translator.translate(cell));
147
148 std::vector<unsigned int> owning_ranks_of_coarse_cells(
149 is_coarse.n_elements());
150 {
152 process(is_level_partitions,
153 is_coarse,
154 communicator,
155 owning_ranks_of_coarse_cells,
156 false);
157
159 std::vector<
160 std::pair<types::global_cell_index, types::global_cell_index>>,
161 std::vector<unsigned int>>
162 consensus_algorithm;
163 consensus_algorithm.run(process, communicator);
164 }
165
166 const auto tria =
167 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
168 &tria_coarse_in);
169
170 Assert(tria, ExcNotImplemented());
171
173 tria->global_active_cell_index_partitioner().lock());
174
175 for (const auto &cell : tria_coarse_in.active_cell_iterators() |
177 partition[cell->global_active_cell_index()] =
178 owning_ranks_of_coarse_cells[is_coarse.index_within_set(
179 cell_id_translator.translate(cell))];
180
181 return partition;
182 }
183
184
185 template <int dim, int spacedim>
187 const unsigned int n_min_cells)
188 : n_min_cells(n_min_cells)
189 {}
190
191
192
193 template <int dim, int spacedim>
196 const Triangulation<dim, spacedim> &tria_in) const
197 {
198 const auto tria =
199 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
200 &tria_in);
201
202 Assert(tria, ExcNotImplemented());
203
205 tria->global_active_cell_index_partitioner().lock());
206
207 // step 1) check if all processes have enough cells
208
209 const unsigned int n_locally_owned_active_cells =
210 std::count_if(tria_in.begin_active(),
212 tria_in.end()),
213 [](const auto &cell) { return cell.is_locally_owned(); });
214
215 const auto comm = tria_in.get_communicator();
216
217 if (Utilities::MPI::min(n_locally_owned_active_cells, comm) >= n_min_cells)
218 return {}; // all processes have enough cells
219
220 // step 2) there are processes which do not have enough cells so that
221 // a repartitioning kicks in with the aim that all processes that own
222 // cells have at least the specified number of cells
223
224 const types::global_cell_index n_global_active_cells =
225 tria_in.n_global_active_cells();
226
227 const unsigned int n_partitions =
230 n_global_active_cells / n_min_cells,
232
233 const unsigned int min_cells = n_global_active_cells / n_partitions;
234
235 const auto convert = [&](const unsigned int i) {
236 // determine the owner of a given index: we try to assign each process
237 // the same number of cells; if there is a remainder, we assign the
238 // first processes an additional cell (so that the difference of number of
239 // locally owned cells is never larger than one between processes).
240 const unsigned int n_partitions_with_additional_cell =
241 n_global_active_cells - min_cells * n_partitions;
242
243 const unsigned int rank =
244 (i < (min_cells + 1) * n_partitions_with_additional_cell) ?
245 (i / (min_cells + 1)) :
246 ((i - n_partitions_with_additional_cell) / min_cells);
247
248 AssertIndexRange(rank, n_partitions);
249
250 return rank;
251 };
252
253 for (const auto i : partition.locally_owned_elements())
254 partition[i] = convert(i);
255
256 return partition;
257 }
258
259
260
261 template <int dim, int spacedim>
263 const std::function<
264 unsigned int(const typename Triangulation<dim, spacedim>::cell_iterator &,
265 const CellStatus)> &weighting_function)
266 : weighting_function(weighting_function)
267 {}
268
269
270
271 template <int dim, int spacedim>
274 const Triangulation<dim, spacedim> &tria_in) const
275 {
276#ifndef DEAL_II_WITH_MPI
277 (void)tria_in;
278 return {};
279#else
280
281 const auto tria =
282 dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
283 &tria_in);
284
285 Assert(tria, ExcNotImplemented());
286
287 const auto partitioner =
288 tria->global_active_cell_index_partitioner().lock();
289
290 std::vector<unsigned int> weights(partitioner->locally_owned_size());
291
292 const auto mpi_communicator = tria_in.get_communicator();
293 const auto n_subdomains = Utilities::MPI::n_mpi_processes(mpi_communicator);
294
295 // determine weight of each cell
296 for (const auto &cell :
297 tria->active_cell_iterators() | IteratorFilters::LocallyOwnedCell())
298 weights[partitioner->global_to_local(cell->global_active_cell_index())] =
299 weighting_function(cell, CellStatus::cell_will_persist);
300
301 // determine weight of all the cells locally owned by this process
302 std::uint64_t process_local_weight = 0;
303 for (const auto &weight : weights)
304 process_local_weight += weight;
305
306 // determine partial sum of weights of this process, as well as the total
307 // weight
308 const auto [process_local_weight_offset, total_weight] =
309 Utilities::MPI::partial_and_total_sum(process_local_weight,
310 tria->get_communicator());
311
312 // set up partition
313 LinearAlgebra::distributed::Vector<double> partition(partitioner);
314
315 for (std::uint64_t i = 0, weight = process_local_weight_offset;
316 i < partition.locally_owned_size();
317 weight += weights[i], ++i)
318 partition.local_element(i) =
319 static_cast<double>(weight * n_subdomains / total_weight);
320
321 return partition;
322#endif
323 }
324
325
326} // namespace RepartitioningPolicyTools
327
328
329
330/*-------------- Explicit Instantiations -------------------------------*/
331#include "repartitioning_policy_tools.inst"
332
CellStatus
Definition cell_status.h:31
size_type index_within_set(const size_type global_index) const
Definition index_set.h:2001
size_type n_elements() const
Definition index_set.h:1934
void set_size(const size_type size)
Definition index_set.h:1764
void add_index(const size_type index)
Definition index_set.h:1795
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_in) const override
CellWeightPolicy(const std::function< unsigned int(const typename Triangulation< dim, spacedim >::cell_iterator &, const CellStatus)> &weighting_function)
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_coarse_in) const override
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_coarse_in) const override
FirstChildPolicy(const Triangulation< dim, spacedim > &tria_fine)
virtual LinearAlgebra::distributed::Vector< double > partition(const Triangulation< dim, spacedim > &tria_in) const override
virtual types::global_cell_index n_global_active_cells() const
virtual MPI_Comm get_communicator() const
bool all_reference_cells_are_hyper_cube() const
cell_iterator end() const
active_cell_iterator begin_active(const unsigned int level=0) const
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
types::global_cell_index translate(const TriaIterator< Accessor > &cell) const
types::global_cell_index size() const
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
IteratorRange< active_cell_iterator > active_cell_iterators() const
static ::ExceptionBase & ExcNotImplemented()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T min(const T &t, const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm