Reference documentation for deal.II version GIT relicensing-439-g5fda5c893d 2024-04-20 06:50:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_noncontiguous_partitioner.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2020 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
17#include <deal.II/base/mpi_noncontiguous_partitioner.templates.h>
18
20
21#include <boost/serialization/utility.hpp>
22
23
25
26namespace Utilities
27{
28 namespace MPI
29 {
31 const IndexSet &indexset_has,
32 const IndexSet &indexset_want,
33 const MPI_Comm communicator)
34 {
35 this->reinit(indexset_has, indexset_want, communicator);
36 }
37
38
39
41 const std::vector<types::global_dof_index> &indices_has,
42 const std::vector<types::global_dof_index> &indices_want,
43 const MPI_Comm communicator)
44 {
45 this->reinit(indices_has, indices_want, communicator);
46 }
47
48
49
50 std::pair<unsigned int, unsigned int>
52 {
53 return {send_ranks.size(), recv_ranks.size()};
54 }
55
56
57
58 unsigned int
63
64
65
78
79
80
86
87
88
89 void
91 const IndexSet &indexset_want,
92 const MPI_Comm communicator)
93 {
94 this->communicator = communicator;
95
96 // clean up
97 send_ranks.clear();
98 send_ptr.clear();
99 send_indices.clear();
100 recv_ranks.clear();
101 recv_ptr.clear();
102 recv_indices.clear();
103 buffers.clear();
104 requests.clear();
105
106 // set up communication pattern
107 std::vector<unsigned int> owning_ranks_of_ghosts(
108 indexset_want.n_elements());
109
110 // set up dictionary
112 process(indexset_has,
113 indexset_want,
115 owning_ranks_of_ghosts,
116 true);
117
119 std::vector<
120 std::pair<types::global_dof_index, types::global_dof_index>>,
121 std::vector<unsigned int>>
122 consensus_algorithm;
123 consensus_algorithm.run(process, communicator);
124
125 // set up map of processes from where this rank will receive values
126 {
127 std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
128
129 for (const auto &owner : owning_ranks_of_ghosts)
130 recv_map[owner] = std::vector<types::global_dof_index>();
131
132 for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
133 i++)
134 recv_map[owning_ranks_of_ghosts[i]].push_back(i);
135
136 recv_ptr.push_back(recv_indices.size() /*=0*/);
137 for (const auto &target_with_indexset : recv_map)
138 {
139 recv_ranks.push_back(target_with_indexset.first);
140
141 for (const auto cell_index : target_with_indexset.second)
142 recv_indices.push_back(cell_index);
143
144 recv_ptr.push_back(recv_indices.size());
145 }
146 }
147
148 {
149 const auto targets_with_indexset = process.get_requesters();
150
151 send_ptr.push_back(recv_ptr.back());
152 for (const auto &target_with_indexset : targets_with_indexset)
153 {
154 send_ranks.push_back(target_with_indexset.first);
155
156 for (const auto cell_index : target_with_indexset.second)
157 send_indices.push_back(indexset_has.index_within_set(cell_index));
158
159 send_ptr.push_back(send_indices.size() + recv_ptr.back());
160 }
161 }
162 }
163
164
165
166 void
168 const std::vector<types::global_dof_index> &indices_has,
169 const std::vector<types::global_dof_index> &indices_want,
170 const MPI_Comm communicator)
171 {
172 // step 0) clean vectors from numbers::invalid_dof_index (indicating
173 // padding)
174 std::vector<types::global_dof_index> indices_has_clean;
175 indices_has_clean.reserve(indices_has.size());
176
177 for (const auto i : indices_has)
179 indices_has_clean.push_back(i);
180
181 std::vector<types::global_dof_index> indices_want_clean;
182 indices_want_clean.reserve(indices_want.size());
183
184 for (const auto i : indices_want)
186 indices_want_clean.push_back(i);
187
188 // step 0) determine "number of degrees of freedom" needed for IndexSet
189 const types::global_dof_index local_n_dofs_has =
190 indices_has_clean.empty() ?
191 0 :
192 (*std::max_element(indices_has_clean.begin(),
193 indices_has_clean.end()) +
194 1);
195
196 const types::global_dof_index local_n_dofs_want =
197 indices_want_clean.empty() ?
198 0 :
199 (*std::max_element(indices_want_clean.begin(),
200 indices_want_clean.end()) +
201 1);
202
203 const types::global_dof_index n_dofs =
204 Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
206
207 // step 1) convert vectors to indexsets (sorted!)
208 IndexSet index_set_has(n_dofs);
209 index_set_has.add_indices(indices_has_clean.begin(),
210 indices_has_clean.end());
211
212 IndexSet index_set_want(n_dofs);
213 index_set_want.add_indices(indices_want_clean.begin(),
214 indices_want_clean.end());
215
216 // step 2) set up internal data structures with indexset
217 this->reinit(index_set_has, index_set_want, communicator);
218
219 // step 3) fix inner data structures so that it is sorted as
220 // in the original vector
221 {
222 std::vector<types::global_dof_index> temp_map_send(
223 index_set_has.n_elements());
224
225 for (types::global_dof_index i = 0; i < indices_has.size(); ++i)
226 if (indices_has[i] != numbers::invalid_dof_index)
227 temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
228
229 for (auto &i : send_indices)
230 i = temp_map_send[i];
231 }
232
233 {
234 std::vector<types::global_dof_index> temp_map_recv(
235 index_set_want.n_elements());
236
237 for (types::global_dof_index i = 0; i < indices_want.size(); ++i)
238 if (indices_want[i] != numbers::invalid_dof_index)
239 temp_map_recv[index_set_want.index_within_set(indices_want[i])] = i;
240
241 for (auto &i : recv_indices)
242 i = temp_map_recv[i];
243 }
244 }
245 } // namespace MPI
246} // namespace Utilities
247
248#include "mpi_noncontiguous_partitioner.inst"
249
size_type index_within_set(const size_type global_index) const
Definition index_set.h:1990
size_type n_elements() const
Definition index_set.h:1923
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1820
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const MPI_Comm communicator) override
std::vector< types::global_dof_index > recv_ptr
std::vector< types::global_dof_index > send_ptr
std::vector< types::global_dof_index > recv_indices
std::pair< unsigned int, unsigned int > n_targets() const
std::vector< types::global_dof_index > send_indices
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
unsigned int cell_index
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
T max(const T &t, const MPI_Comm mpi_communicator)
const types::global_dof_index invalid_dof_index
Definition types.h:252
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)