Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_noncontiguous_partitioner.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2020 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18#include <deal.II/base/mpi_noncontiguous_partitioner.templates.h>
19
22
23#include <boost/serialization/utility.hpp>
24
25
27
28namespace Utilities
29{
30 namespace MPI
31 {
33 const IndexSet &indexset_has,
34 const IndexSet &indexset_want,
35 const MPI_Comm communicator)
36 {
37 this->reinit(indexset_has, indexset_want, communicator);
38 }
39
40
41
43 const std::vector<types::global_dof_index> &indices_has,
44 const std::vector<types::global_dof_index> &indices_want,
45 const MPI_Comm communicator)
46 {
47 this->reinit(indices_has, indices_want, communicator);
48 }
49
50
51
52 std::pair<unsigned int, unsigned int>
54 {
55 return {send_ranks.size(), recv_ranks.size()};
56 }
57
58
59
60 unsigned int
62 {
63 return send_ptr.back();
64 }
65
66
67
70 {
79 }
80
81
82
85 {
86 return communicator;
87 }
88
89
90
91 void
93 const IndexSet &indexset_want,
94 const MPI_Comm communicator)
95 {
96 this->communicator = communicator;
97
98 // clean up
99 send_ranks.clear();
100 send_ptr.clear();
101 send_indices.clear();
102 recv_ranks.clear();
103 recv_ptr.clear();
104 recv_indices.clear();
105 buffers.clear();
106 requests.clear();
107
108 // set up communication pattern
109 std::vector<unsigned int> owning_ranks_of_ghosts(
110 indexset_want.n_elements());
111
112 // set up dictionary
114 process(indexset_has,
115 indexset_want,
117 owning_ranks_of_ghosts,
118 true);
119
121 std::vector<
122 std::pair<types::global_dof_index, types::global_dof_index>>,
123 std::vector<unsigned int>>
124 consensus_algorithm;
125 consensus_algorithm.run(process, communicator);
126
127 // set up map of processes from where this rank will receive values
128 {
129 std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
130
131 for (const auto &owner : owning_ranks_of_ghosts)
132 recv_map[owner] = std::vector<types::global_dof_index>();
133
134 for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
135 i++)
136 recv_map[owning_ranks_of_ghosts[i]].push_back(i);
137
138 recv_ptr.push_back(recv_indices.size() /*=0*/);
139 for (const auto &target_with_indexset : recv_map)
140 {
141 recv_ranks.push_back(target_with_indexset.first);
142
143 for (const auto cell_index : target_with_indexset.second)
144 recv_indices.push_back(cell_index);
145
146 recv_ptr.push_back(recv_indices.size());
147 }
148 }
149
150 {
151 const auto targets_with_indexset = process.get_requesters();
152
153 send_ptr.push_back(recv_ptr.back());
154 for (const auto &target_with_indexset : targets_with_indexset)
155 {
156 send_ranks.push_back(target_with_indexset.first);
157
158 for (const auto cell_index : target_with_indexset.second)
159 send_indices.push_back(indexset_has.index_within_set(cell_index));
160
161 send_ptr.push_back(send_indices.size() + recv_ptr.back());
162 }
163 }
164 }
165
166
167
168 void
170 const std::vector<types::global_dof_index> &indices_has,
171 const std::vector<types::global_dof_index> &indices_want,
172 const MPI_Comm communicator)
173 {
174 // step 0) clean vectors from numbers::invalid_dof_index (indicating
175 // padding)
176 std::vector<types::global_dof_index> indices_has_clean;
177 indices_has_clean.reserve(indices_has.size());
178
179 for (const auto i : indices_has)
181 indices_has_clean.push_back(i);
182
183 std::vector<types::global_dof_index> indices_want_clean;
184 indices_want_clean.reserve(indices_want.size());
185
186 for (const auto i : indices_want)
188 indices_want_clean.push_back(i);
189
190 // step 0) determine "number of degrees of freedom" needed for IndexSet
191 const types::global_dof_index local_n_dofs_has =
192 indices_has_clean.empty() ?
193 0 :
194 (*std::max_element(indices_has_clean.begin(),
195 indices_has_clean.end()) +
196 1);
197
198 const types::global_dof_index local_n_dofs_want =
199 indices_want_clean.empty() ?
200 0 :
201 (*std::max_element(indices_want_clean.begin(),
202 indices_want_clean.end()) +
203 1);
204
205 const types::global_dof_index n_dofs =
206 Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
208
209 // step 1) convert vectors to indexsets (sorted!)
210 IndexSet index_set_has(n_dofs);
211 index_set_has.add_indices(indices_has_clean.begin(),
212 indices_has_clean.end());
213
214 IndexSet index_set_want(n_dofs);
215 index_set_want.add_indices(indices_want_clean.begin(),
216 indices_want_clean.end());
217
218 // step 2) set up internal data structures with indexset
219 this->reinit(index_set_has, index_set_want, communicator);
220
221 // step 3) fix inner data structures so that it is sorted as
222 // in the original vector
223 {
224 std::vector<types::global_dof_index> temp_map_send(
225 index_set_has.n_elements());
226
227 for (types::global_dof_index i = 0; i < indices_has.size(); ++i)
228 if (indices_has[i] != numbers::invalid_dof_index)
229 temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
230
231 for (auto &i : send_indices)
232 i = temp_map_send[i];
233 }
234
235 {
236 std::vector<types::global_dof_index> temp_map_recv(
237 index_set_want.n_elements());
238
239 for (types::global_dof_index i = 0; i < indices_want.size(); ++i)
240 if (indices_want[i] != numbers::invalid_dof_index)
241 temp_map_recv[index_set_want.index_within_set(indices_want[i])] = i;
242
243 for (auto &i : recv_indices)
244 i = temp_map_recv[i];
245 }
246 }
247 } // namespace MPI
248} // namespace Utilities
249
250#include "mpi_noncontiguous_partitioner.inst"
251
size_type index_within_set(const size_type global_index) const
Definition index_set.h:1883
size_type n_elements() const
Definition index_set.h:1816
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1727
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
void reinit(const IndexSet &locally_owned_indices, const IndexSet &ghost_indices, const MPI_Comm communicator) override
std::vector< types::global_dof_index > recv_ptr
std::vector< types::global_dof_index > send_ptr
std::vector< types::global_dof_index > recv_indices
std::pair< unsigned int, unsigned int > n_targets() const
std::vector< types::global_dof_index > send_indices
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
unsigned int cell_index
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
T max(const T &t, const MPI_Comm mpi_communicator)
const types::global_dof_index invalid_dof_index
Definition types.h:233
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)