Loading [MathJax]/extensions/TeX/newcommand.js
 deal.II version GIT relicensing-3087-ga35b476a78 2025-04-19 20:30:01+00:00
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
mpi_noncontiguous_partitioner.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2020 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17#include <deal.II/base/mpi_noncontiguous_partitioner.templates.h>
18
20
21#include <boost/serialization/utility.hpp>
22
23
25
26namespace Utilities
27{
28 namespace MPI
29 {
31 const IndexSet &indexset_has,
32 const IndexSet &indexset_want,
33 const MPI_Comm communicator)
34 {
35 this->reinit(indexset_has, indexset_want, communicator);
36 }
37
38
39
41 const std::vector<types::global_dof_index> &indices_has,
42 const std::vector<types::global_dof_index> &indices_want,
43 const MPI_Comm communicator)
44 {
45 this->reinit(indices_has, indices_want, communicator);
46 }
47
48
49
50 std::pair<unsigned int, unsigned int>
52 {
53 return {send_ranks.size(), recv_ranks.size()};
54 }
55
56
57
58 unsigned int
63
64
65
78
79
80
86
87
88
89 void
91 const IndexSet &indexset_want,
92 const MPI_Comm communicator)
93 {
94 this->communicator = communicator;
95
96 // clean up
97 send_ranks.clear();
98 send_ptr.clear();
99 send_indices.clear();
100 recv_ranks.clear();
101 recv_ptr.clear();
102 recv_indices.clear();
105 buffers.clear();
106 requests.clear();
107
108 // set up communication pattern
109 const auto [owning_ranks_of_ghosts, targets_with_indexset] =
111 indexset_want,
113
114 // set up map of processes from where this rank will receive values
115 {
116 std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
117
118 for (const auto &owner : owning_ranks_of_ghosts)
119 recv_map[owner] = std::vector<types::global_dof_index>();
120
121 for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
122 i++)
123 recv_map[owning_ranks_of_ghosts[i]].push_back(i);
124
125 recv_ptr.push_back(recv_indices.size() /*=0*/);
126 for (const auto &target_with_indexset : recv_map)
127 {
128 recv_ranks.push_back(target_with_indexset.first);
129
130 for (const auto cell_index : target_with_indexset.second)
131 recv_indices.push_back(cell_index);
132
133 recv_ptr.push_back(recv_indices.size());
134 }
135 }
136
137 {
138 send_ptr.push_back(recv_ptr.back());
139 for (const auto &target_with_indexset : targets_with_indexset)
140 {
141 send_ranks.push_back(target_with_indexset.first);
142
143 for (const auto cell_index : target_with_indexset.second)
144 send_indices.push_back(indexset_has.index_within_set(cell_index));
145
146 send_ptr.push_back(send_indices.size() + recv_ptr.back());
147 }
148 }
149 }
150
151
152
153 void
155 const std::vector<types::global_dof_index> &indices_has,
156 const std::vector<types::global_dof_index> &indices_want,
157 const MPI_Comm communicator)
158 {
159 // step 0) clean vectors from numbers::invalid_dof_index (indicating
160 // padding)
161 std::vector<types::global_dof_index> indices_has_clean;
162 indices_has_clean.reserve(indices_has.size());
163
164 for (const auto i : indices_has)
166 indices_has_clean.push_back(i);
167
168 std::vector<types::global_dof_index> indices_want_clean;
169 indices_want_clean.reserve(indices_want.size());
170
171 for (const auto i : indices_want)
173 indices_want_clean.push_back(i);
174
175 // step 0) determine "number of degrees of freedom" needed for IndexSet
176 const types::global_dof_index local_n_dofs_has =
177 indices_has_clean.empty() ?
178 0 :
179 (*std::max_element(indices_has_clean.begin(),
180 indices_has_clean.end()) +
181 1);
182
183 const types::global_dof_index local_n_dofs_want =
184 indices_want_clean.empty() ?
185 0 :
186 (*std::max_element(indices_want_clean.begin(),
187 indices_want_clean.end()) +
188 1);
189
190 const types::global_dof_index n_dofs =
191 Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
193
194 // step 1) convert vectors to indexsets (sorted!)
195 IndexSet index_set_has(n_dofs);
196 index_set_has.add_indices(indices_has_clean.begin(),
197 indices_has_clean.end());
198
199 IndexSet index_set_want(n_dofs);
200 index_set_want.add_indices(indices_want_clean.begin(),
201 indices_want_clean.end());
202
203 // step 2) set up internal data structures with indexset
204 this->reinit(index_set_has, index_set_want, communicator);
205
206 // step 3) fix inner data structures so that it is sorted as
207 // in the original vector
208 {
209 std::vector<types::global_dof_index> temp_map_send(
210 index_set_has.n_elements());
211
212 for (types::global_dof_index i = 0; i < indices_has.size(); ++i)
213 if (indices_has[i] != numbers::invalid_dof_index)
214 temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
215
216 for (auto &i : send_indices)
217 i = temp_map_send[i];
218 }
219
220 {
221 std::vector<std::vector<types::global_dof_index>> temp_map_recv(
222 index_set_want.n_elements());
223
224 for (types::global_dof_index i = 0; i < indices_want.size(); ++i)
225 if (indices_want[i] != numbers::invalid_dof_index)
226 temp_map_recv[index_set_want.index_within_set(indices_want[i])]
227 .push_back(i);
228
229 const bool use_fast_path =
230 std::all_of(temp_map_recv.begin(),
231 temp_map_recv.end(),
232 [&](const auto &x) { return x.size() == 1; });
233
234 if (use_fast_path)
235 {
236 for (auto &i : recv_indices)
237 i = temp_map_recv[i][0];
238 }
239 else
240 {
242
243 for (const auto &indices : temp_map_recv)
244 {
245 for (const auto &index : indices)
246 recv_indices_duplicates.emplace_back(index);
247
250 }
251 }
252 }
253 }
254 } // namespace MPI
255} // namespace Utilities
256
257#include "base/mpi_noncontiguous_partitioner.inst"
258
size_type index_within_set(const size_type global_index) const
Definition index_set.h:2013
size_type n_elements() const
Definition index_set.h:1945
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1842
void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const MPI_Comm communicator) override
std::vector< types::global_dof_index > recv_ptr
std::vector< types::global_dof_index > send_ptr
std::vector< types::global_dof_index > recv_indices
std::pair< unsigned int, unsigned int > n_targets() const
std::vector< types::global_dof_index > send_indices
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
unsigned int cell_index
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition mpi.cc:1886
T max(const T &t, const MPI_Comm mpi_communicator)
constexpr types::global_dof_index invalid_dof_index
Definition types.h:269
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)