Loading [MathJax]/extensions/TeX/AMSsymbols.js
 Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages
mpi_noncontiguous_partitioner.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2020 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18#include <deal.II/base/mpi_noncontiguous_partitioner.templates.h>
19
22
24
25namespace Utilities
26{
27 namespace MPI
28 {
30 const IndexSet &indexset_has,
31 const IndexSet &indexset_want,
32 const MPI_Comm &communicator)
33 {
34 this->reinit(indexset_has, indexset_want, communicator);
35 }
36
37
38
40 const std::vector<types::global_dof_index> &indices_has,
41 const std::vector<types::global_dof_index> &indices_want,
42 const MPI_Comm & communicator)
43 {
44 this->reinit(indices_has, indices_want, communicator);
45 }
46
47
48
49 std::pair<unsigned int, unsigned int>
51 {
52 return {send_ranks.size(), recv_ranks.size()};
53 }
54
55
56
57 unsigned int
59 {
60 return send_ptr.back();
61 }
62
63
64
67 {
76 }
77
78
79
80 const MPI_Comm &
82 {
83 return communicator;
84 }
85
86
87
88 void
90 const IndexSet &indexset_want,
91 const MPI_Comm &communicator)
92 {
93 this->communicator = communicator;
94
95 // clean up
96 send_ranks.clear();
97 send_ptr.clear();
98 send_indices.clear();
99 recv_ranks.clear();
100 recv_ptr.clear();
101 recv_indices.clear();
102 buffers.clear();
103 requests.clear();
104
105 // setup communication pattern
106 std::vector<unsigned int> owning_ranks_of_ghosts(
107 indexset_want.n_elements());
108
109 // set up dictionary
111 process(indexset_has,
112 indexset_want,
114 owning_ranks_of_ghosts,
115 true);
116
118 std::pair<types::global_dof_index, types::global_dof_index>,
119 unsigned int>
120 consensus_algorithm(process, communicator);
121 consensus_algorithm.run();
122
123 // setup map of processes from where this rank will receive values
124 {
125 std::map<unsigned int, std::vector<types::global_dof_index>> recv_map;
126
127 for (const auto &owner : owning_ranks_of_ghosts)
128 recv_map[owner] = std::vector<types::global_dof_index>();
129
130 for (types::global_dof_index i = 0; i < owning_ranks_of_ghosts.size();
131 i++)
132 recv_map[owning_ranks_of_ghosts[i]].push_back(i);
133
134 recv_ptr.push_back(recv_indices.size() /*=0*/);
135 for (const auto &target_with_indexset : recv_map)
136 {
137 recv_ranks.push_back(target_with_indexset.first);
138
139 for (const auto cell_index : target_with_indexset.second)
140 recv_indices.push_back(cell_index);
141
142 recv_ptr.push_back(recv_indices.size());
143 }
144 }
145
146 {
147 const auto targets_with_indexset = process.get_requesters();
148
149 send_ptr.push_back(recv_ptr.back());
150 for (const auto &target_with_indexset : targets_with_indexset)
151 {
152 send_ranks.push_back(target_with_indexset.first);
153
154 for (const auto cell_index : target_with_indexset.second)
155 send_indices.push_back(indexset_has.index_within_set(cell_index));
156
157 send_ptr.push_back(send_indices.size() + recv_ptr.back());
158 }
159 }
160 }
161
162
163
164 void
166 const std::vector<types::global_dof_index> &indices_has,
167 const std::vector<types::global_dof_index> &indices_want,
168 const MPI_Comm & communicator)
169 {
170 // step 0) clean vectors from numbers::invalid_dof_index (indicating
171 // padding)
172 std::vector<types::global_dof_index> indices_has_clean;
173 indices_has_clean.reserve(indices_has.size());
174
175 for (const auto i : indices_has)
177 indices_has_clean.push_back(i);
178
179 std::vector<types::global_dof_index> indices_want_clean;
180 indices_want_clean.reserve(indices_want.size());
181
182 for (const auto i : indices_want)
184 indices_want_clean.push_back(i);
185
186 // step 0) determine "number of degrees of freedom" needed for IndexSet
187 const types::global_dof_index local_n_dofs_has =
188 indices_has_clean.empty() ?
189 0 :
190 (*std::max_element(indices_has_clean.begin(),
191 indices_has_clean.end()) +
192 1);
193
194 const types::global_dof_index local_n_dofs_want =
195 indices_want_clean.empty() ?
196 0 :
197 (*std::max_element(indices_want_clean.begin(),
198 indices_want_clean.end()) +
199 1);
200
201 const types::global_dof_index n_dofs =
202 Utilities::MPI::max(std::max(local_n_dofs_has, local_n_dofs_want),
204
205 // step 1) convert vectors to indexsets (sorted!)
206 IndexSet index_set_has(n_dofs);
207 index_set_has.add_indices(indices_has_clean.begin(),
208 indices_has_clean.end());
209
210 IndexSet index_set_want(n_dofs);
211 index_set_want.add_indices(indices_want_clean.begin(),
212 indices_want_clean.end());
213
214 // step 2) setup internal data structures with indexset
215 this->reinit(index_set_has, index_set_want, communicator);
216
217 // step 3) fix inner data structures so that it is sorted as
218 // in the original vector
219 {
220 std::vector<types::global_dof_index> temp_map_send(
221 index_set_has.n_elements());
222
223 for (types::global_dof_index i = 0; i < indices_has.size(); i++)
224 if (indices_has[i] != numbers::invalid_dof_index)
225 temp_map_send[index_set_has.index_within_set(indices_has[i])] = i;
226
227 for (auto &i : send_indices)
228 i = temp_map_send[i];
229 }
230
231 {
232 std::vector<types::global_dof_index> temp_map_recv(
233 index_set_want.n_elements());
234
235 for (types::global_dof_index i = 0; i < indices_want.size(); i++)
236 if (indices_want[i] != numbers::invalid_dof_index)
237 temp_map_recv[index_set_want.index_within_set(indices_want[i])] = i;
238
239 for (auto &i : recv_indices)
240 i = temp_map_recv[i];
241 }
242 }
243 } // namespace MPI
244} // namespace Utilities
245
246#include "mpi_noncontiguous_partitioner.inst"
247
size_type index_within_set(const size_type global_index) const
Definition: index_set.h:1921
size_type n_elements() const
Definition: index_set.h:1832
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition: index_set.h:1703
const MPI_Comm & get_mpi_communicator() const override
std::vector< types::global_dof_index > recv_ptr
std::vector< types::global_dof_index > send_ptr
std::vector< types::global_dof_index > recv_indices
std::pair< unsigned int, unsigned int > n_targets() const
void reinit(const IndexSet &indexset_locally_owned, const IndexSet &indexset_ghost, const MPI_Comm &communicator) override
std::vector< types::global_dof_index > send_indices
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
unsigned int cell_index
Definition: grid_tools.cc:1092
std::enable_if< std::is_fundamental< T >::value, std::size_t >::type memory_consumption(const T &t)
T max(const T &t, const MPI_Comm &mpi_communicator)
const types::global_dof_index invalid_dof_index
Definition: types.h:211
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)