Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
partitioner.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 1999 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
18#include <deal.II/base/partitioner.templates.h>
19
20#include <boost/serialization/utility.hpp>
21
22#include <limits>
23
25
26namespace Utilities
27{
28 namespace MPI
29 {
31 : global_size(0)
32 , local_range_data(
33 std::pair<types::global_dof_index, types::global_dof_index>(0, 0))
34 , n_ghost_indices_data(0)
35 , n_import_indices_data(0)
36 , n_ghost_indices_in_larger_set(0)
37 , my_pid(0)
38 , n_procs(1)
39 , communicator(MPI_COMM_SELF)
40 , have_ghost_indices(false)
41 {}
42
43
44
45 Partitioner::Partitioner(const unsigned int size)
46 : global_size(size)
47 , locally_owned_range_data(size)
48 , local_range_data(
49 std::pair<types::global_dof_index, types::global_dof_index>(0, size))
50 , n_ghost_indices_data(0)
51 , n_import_indices_data(0)
52 , n_ghost_indices_in_larger_set(0)
53 , my_pid(0)
54 , n_procs(1)
55 , communicator(MPI_COMM_SELF)
56 , have_ghost_indices(false)
57 {
61 }
62
63
64
66 const types::global_dof_index ghost_size,
67 const MPI_Comm communicator)
68 : global_size(Utilities::MPI::sum<types::global_dof_index>(local_size,
69 communicator))
70 , locally_owned_range_data(global_size)
71 , local_range_data{0, local_size}
72 , n_ghost_indices_data(ghost_size)
73 , n_import_indices_data(0)
74 , n_ghost_indices_in_larger_set(0)
75 , my_pid(Utilities::MPI::this_mpi_process(communicator))
76 , n_procs(Utilities::MPI::n_mpi_processes(communicator))
77 , communicator(communicator)
78 , have_ghost_indices(true)
79 {
80 types::global_dof_index prefix_sum = 0;
81
82#ifdef DEAL_II_WITH_MPI
83 const int ierr =
84 MPI_Exscan(&local_size,
85 &prefix_sum,
86 1,
87 Utilities::MPI::mpi_type_id_for_type<decltype(prefix_sum)>,
88 MPI_SUM,
90 AssertThrowMPI(ierr);
91#endif
92
93 local_range_data = {prefix_sum, prefix_sum + local_size};
94
95 locally_owned_range_data.add_range(prefix_sum, prefix_sum + local_size);
97 }
98
99
100
101 Partitioner::Partitioner(const IndexSet &locally_owned_indices,
102 const IndexSet &ghost_indices_in,
103 const MPI_Comm communicator_in)
104 : global_size(
105 static_cast<types::global_dof_index>(locally_owned_indices.size()))
106 , n_ghost_indices_data(0)
107 , n_import_indices_data(0)
108 , n_ghost_indices_in_larger_set(0)
109 , my_pid(0)
110 , n_procs(1)
111 , communicator(communicator_in)
112 , have_ghost_indices(false)
113 {
114 set_owned_indices(locally_owned_indices);
115 set_ghost_indices(ghost_indices_in);
116 }
117
118
119
120 Partitioner::Partitioner(const IndexSet &locally_owned_indices,
121 const MPI_Comm communicator_in)
122 : global_size(
123 static_cast<types::global_dof_index>(locally_owned_indices.size()))
124 , n_ghost_indices_data(0)
125 , n_import_indices_data(0)
126 , n_ghost_indices_in_larger_set(0)
127 , my_pid(0)
128 , n_procs(1)
129 , communicator(communicator_in)
130 , have_ghost_indices(false)
131 {
132 set_owned_indices(locally_owned_indices);
133 }
134
135
136
137 void
138 Partitioner::reinit(const IndexSet &vector_space_vector_index_set,
139 const IndexSet &read_write_vector_index_set,
140 const MPI_Comm communicator_in)
141 {
142 have_ghost_indices = false;
143 communicator = communicator_in;
144 set_owned_indices(vector_space_vector_index_set);
145 set_ghost_indices(read_write_vector_index_set);
146 }
147
148
149
150 void
151 Partitioner::set_owned_indices(const IndexSet &locally_owned_indices)
152 {
154 {
157 }
158 else
159 {
160 my_pid = 0;
161 n_procs = 1;
162 }
163
164 // set the local range
165 Assert(locally_owned_indices.is_contiguous() == true,
166 ExcMessage("The index set specified in locally_owned_indices "
167 "is not contiguous."));
168 locally_owned_indices.compress();
169 if (locally_owned_indices.n_elements() > 0)
171 std::pair<types::global_dof_index, types::global_dof_index>(
172 locally_owned_indices.nth_index_in_set(0),
173 locally_owned_indices.nth_index_in_set(0) +
174 locally_owned_indices.n_elements());
176 local_range_data.second - local_range_data.first <
177 static_cast<types::global_dof_index>(
178 std::numeric_limits<unsigned int>::max()),
180 "Index overflow: This class supports at most 2^32-1 locally owned vector entries"));
181 locally_owned_range_data.set_size(locally_owned_indices.size());
183 local_range_data.second);
185
186 ghost_indices_data.set_size(locally_owned_indices.size());
187 }
188
189
190
191 void
193 const IndexSet &larger_ghost_index_set)
194 {
195 // Set ghost indices from input. To be sure that no entries from the
196 // locally owned range are present, subtract the locally owned indices
197 // in any case.
198 Assert(ghost_indices_in.n_elements() == 0 ||
199 ghost_indices_in.size() == locally_owned_range_data.size(),
200 ExcDimensionMismatch(ghost_indices_in.size(),
202
203 ghost_indices_data = ghost_indices_in;
210 static_cast<types::global_dof_index>(
211 std::numeric_limits<unsigned int>::max()),
213 "Index overflow: This class supports at most 2^32-1 ghost elements"));
215
218
219 // In the rest of this function, we determine the point-to-point
220 // communication pattern of the partitioner. We make up a list with both
221 // the processors the ghost indices actually belong to, and the indices
222 // that are locally held but ghost indices of other processors. This
223 // allows then to import and export data very easily.
224
225 // find out the end index for each processor and communicate it (this
226 // implies the start index for the next processor)
227#ifdef DEAL_II_WITH_MPI
228 if (n_procs < 2)
229 {
233 return;
234 }
235
237
238 // Allow non-zero start index for the vector. Part 1:
239 // Assume for now that the index set of rank 0 starts with 0
240 // and therefore has an increased size.
241 if (my_pid == 0)
242 my_size += local_range_data.first;
243
244 types::global_dof_index my_shift = 0;
245 {
246 const int ierr = MPI_Exscan(&my_size,
247 &my_shift,
248 1,
250 MPI_SUM,
252 AssertThrowMPI(ierr);
253 }
254
255 // Allow non-zero start index for the vector. Part 2:
256 // We correct the assumption made above and let the
257 // index set of rank 0 actually start from the
258 // correct value, i.e. we correct the shift to
259 // its start.
260 if (my_pid == 0)
261 my_shift = local_range_data.first;
262
263 // Fix the index start in case the index set could not give us that
264 // information.
265 if (local_range_data.first == 0 && my_shift != 0)
266 {
267 const types::global_dof_index old_locally_owned_size =
269 local_range_data.first = my_shift;
270 local_range_data.second = my_shift + old_locally_owned_size;
271 }
272
273 std::vector<unsigned int> owning_ranks_of_ghosts(
275
276 // set up dictionary
281 owning_ranks_of_ghosts,
282 /* track origins of ghosts*/ true);
283
284 // read dictionary by communicating with the process who owns the index
285 // in the static partition (i.e. in the dictionary). This process
286 // returns the actual owner of the index.
288 std::vector<
289 std::pair<types::global_dof_index, types::global_dof_index>>,
290 std::vector<unsigned int>>
291 consensus_algorithm;
292 consensus_algorithm.run(process, communicator);
293
294 {
296
297 if (owning_ranks_of_ghosts.size() > 0)
298 {
299 ghost_targets_data.emplace_back(owning_ranks_of_ghosts[0], 0);
300 for (auto i : owning_ranks_of_ghosts)
301 {
302 Assert(i >= ghost_targets_data.back().first,
304 "Expect result of ConsensusAlgorithms::Process to be "
305 "sorted."));
306 if (i == ghost_targets_data.back().first)
307 ghost_targets_data.back().second++;
308 else
309 ghost_targets_data.emplace_back(i, 1);
310 }
311 }
312 }
313
314 // find how much the individual processes that want import from me
315 std::map<unsigned int, IndexSet> import_data = process.get_requesters();
316
317 // count import requests and set up the compressed indices
320 import_targets_data.reserve(import_data.size());
322 import_indices_chunks_by_rank_data.reserve(import_data.size());
324 for (const auto &i : import_data)
325 if (i.second.n_elements() > 0)
326 {
327 import_targets_data.emplace_back(i.first, i.second.n_elements());
328 n_import_indices_data += i.second.n_elements();
331 i.second.n_intervals());
332 }
333
334 // transform import indices to local index space
337 for (const auto &i : import_data)
338 {
339 Assert((i.second & locally_owned_range_data) == i.second,
340 ExcInternalError("Requested indices must be in local range"));
341 for (auto interval = i.second.begin_intervals();
342 interval != i.second.end_intervals();
343 ++interval)
344 import_indices_data.emplace_back(*interval->begin() -
345 local_range_data.first,
346 interval->last() + 1 -
347 local_range_data.first);
348 }
349
350# ifdef DEBUG
351
352 // simple check: the number of processors to which we want to send
353 // ghosts and the processors to which ghosts reference should be the
354 // same
358
359 // simple check: the number of indices to exchange should match from the
360 // ghost indices side and the import indices side
363
364 // expensive check that the communication channel is sane -> do a ghost
365 // exchange step and see whether the ghost indices sent to us by other
366 // processes (ghost_indices) are the same as we hold locally
367 // (ghost_indices_ref).
368 const std::vector<types::global_dof_index> ghost_indices_ref =
370 AssertDimension(ghost_indices_ref.size(), n_ghost_indices());
371 std::vector<types::global_dof_index> indices_to_send(n_import_indices());
372 std::vector<types::global_dof_index> ghost_indices(n_ghost_indices());
373
374 const std::vector<types::global_dof_index> my_indices =
376 std::vector<MPI_Request> requests;
380 my_indices.data(), my_indices.size()),
381 make_array_view(indices_to_send),
383 requests);
385 int flag = 0;
386 const int ierr = MPI_Testall(requests.size(),
387 requests.data(),
388 &flag,
389 MPI_STATUSES_IGNORE);
390 AssertThrowMPI(ierr);
391 Assert(flag == 1,
393 "MPI found unfinished requests. Check communication setup"));
394
395 for (unsigned int i = 0; i < ghost_indices.size(); ++i)
396 AssertDimension(ghost_indices[i], ghost_indices_ref[i]);
397
398# endif
399
400#endif // #ifdef DEAL_II_WITH_MPI
401
402 if (larger_ghost_index_set.size() == 0)
403 {
407 }
408 else
409 {
410 AssertDimension(larger_ghost_index_set.size(),
412 Assert(
413 (larger_ghost_index_set & locally_owned_range_data).n_elements() ==
414 0,
415 ExcMessage("Ghost index set should not overlap with owned set."));
416 Assert((larger_ghost_index_set & ghost_indices_data) ==
418 ExcMessage("Larger ghost index set must contain the tight "
419 "ghost index set."));
420
421 n_ghost_indices_in_larger_set = larger_ghost_index_set.n_elements();
422
423 // first translate tight ghost indices into indices within the large
424 // set:
425 std::vector<unsigned int> expanded_numbering;
426 for (const ::IndexSet::size_type index : ghost_indices_data)
427 {
428 Assert(larger_ghost_index_set.is_element(index),
429 ExcMessage("The given larger ghost index set must contain "
430 "all indices in the actual index set."));
431 Assert(
432 larger_ghost_index_set.index_within_set(index) <
433 static_cast<types::global_dof_index>(
434 std::numeric_limits<unsigned int>::max()),
436 "Index overflow: This class supports at most 2^32-1 ghost elements"));
437 expanded_numbering.push_back(
438 larger_ghost_index_set.index_within_set(index));
439 }
440
441 // now rework expanded_numbering into ranges and store in:
442 std::vector<std::pair<unsigned int, unsigned int>>
443 ghost_indices_subset;
445 ghost_targets_data.size() + 1);
446 // also populate ghost_indices_subset_chunks_by_rank_data
448 unsigned int shift = 0;
449 for (unsigned int p = 0; p < ghost_targets_data.size(); ++p)
450 {
451 unsigned int last_index = numbers::invalid_unsigned_int - 1;
452 for (unsigned int ii = 0; ii < ghost_targets_data[p].second; ++ii)
453 {
454 const unsigned int i = shift + ii;
455 if (expanded_numbering[i] == last_index + 1)
456 // if contiguous, increment the end of last range:
457 ghost_indices_subset.back().second++;
458 else
459 // otherwise start a new range
460 ghost_indices_subset.emplace_back(expanded_numbering[i],
461 expanded_numbering[i] +
462 1);
463 last_index = expanded_numbering[i];
464 }
465 shift += ghost_targets_data[p].second;
467 ghost_indices_subset.size();
468 }
469 ghost_indices_subset_data = ghost_indices_subset;
470 }
471 }
472
473
474
475 bool
477 {
478 // if the partitioner points to the same memory location as the calling
479 // processor
480 if (&part == this)
481 return true;
482#ifdef DEAL_II_WITH_MPI
484 {
485 int communicators_same = 0;
486 const int ierr = MPI_Comm_compare(part.communicator,
488 &communicators_same);
489 AssertThrowMPI(ierr);
490 if (!(communicators_same == MPI_IDENT ||
491 communicators_same == MPI_CONGRUENT))
492 return false;
493 }
494#endif
495 return (global_size == part.global_size &&
498 }
499
500
501
502 bool
504 {
505 return Utilities::MPI::min(static_cast<int>(is_compatible(part)),
506 communicator) == 1;
507 }
508
509
510
511 std::size_t
513 {
518 memory += sizeof(n_ghost_indices_data);
521 memory += sizeof(import_indices_plain_dev) +
522 sizeof(*import_indices_plain_dev.begin()) *
523 import_indices_plain_dev.capacity();
528 memory +=
532 memory +=
538 return memory;
539 }
540
541
542
543 void
545 {
546 const unsigned int n_import_targets = import_targets_data.size();
547 import_indices_plain_dev.reserve(n_import_targets);
548 for (unsigned int i = 0; i < n_import_targets; ++i)
549 {
550 // Expand the indices on the host
551 std::vector<std::pair<unsigned int, unsigned int>>::const_iterator
552 my_imports = import_indices_data.begin() +
554 end_my_imports = import_indices_data.begin() +
556 std::vector<unsigned int> import_indices_plain_host;
557 for (; my_imports != end_my_imports; ++my_imports)
558 {
559 const unsigned int chunk_size =
560 my_imports->second - my_imports->first;
561 for (unsigned int j = 0; j < chunk_size; ++j)
562 import_indices_plain_host.push_back(my_imports->first + j);
563 }
564
565 // Move the indices to the device
566 const auto chunk_size = import_indices_plain_host.size();
567 import_indices_plain_dev.emplace_back("import_indices_plain_dev" +
568 std::to_string(i),
569 chunk_size);
570 Kokkos::deep_copy(import_indices_plain_dev.back(),
571 Kokkos::View<unsigned int *, Kokkos::HostSpace>(
572 import_indices_plain_host.data(), chunk_size));
573 }
574 }
575
576 } // namespace MPI
577
578} // end of namespace Utilities
579
580
581
582// explicit instantiations from .templates.h file
583#include "partitioner.inst"
584
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:704
bool is_contiguous() const
Definition index_set.h:1799
size_type size() const
Definition index_set.h:1661
size_type index_within_set(const size_type global_index) const
Definition index_set.h:1883
size_type n_elements() const
Definition index_set.h:1816
bool is_element(const size_type index) const
Definition index_set.h:1776
void set_size(const size_type size)
Definition index_set.h:1649
void subtract_set(const IndexSet &other)
Definition index_set.cc:268
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1697
size_type nth_index_in_set(const size_type local_index) const
Definition index_set.h:1864
std::size_t memory_consumption() const
Definition index_set.cc:944
void compress() const
Definition index_set.h:1669
std::vector< size_type > get_index_vector() const
Definition index_set.cc:690
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
unsigned int n_ghost_indices_data
std::size_t memory_consumption() const
std::vector< Kokkos::View< unsigned int *, MemorySpace::Default::kokkos_space > > import_indices_plain_dev
std::vector< std::pair< unsigned int, unsigned int > > import_indices_data
bool is_globally_compatible(const Partitioner &part) const
void set_owned_indices(const IndexSet &locally_owned_indices)
unsigned int n_ghost_indices_in_larger_set
unsigned int n_ghost_indices() const
std::vector< std::pair< unsigned int, unsigned int > > ghost_targets_data
unsigned int local_size() const
const IndexSet & ghost_indices() const
void export_to_ghosted_array_start(const unsigned int communication_channel, const ArrayView< const Number, MemorySpaceType > &locally_owned_array, const ArrayView< Number, MemorySpaceType > &temporary_storage, const ArrayView< Number, MemorySpaceType > &ghost_array, std::vector< MPI_Request > &requests) const
virtual void reinit(const IndexSet &vector_space_vector_index_set, const IndexSet &read_write_vector_index_set, const MPI_Comm communicator) override
void initialize_import_indices_plain_dev() const
types::global_dof_index global_size
unsigned int locally_owned_size() const
std::pair< types::global_dof_index, types::global_dof_index > local_range_data
std::vector< unsigned int > import_indices_chunks_by_rank_data
unsigned int n_import_indices_data
void set_ghost_indices(const IndexSet &ghost_indices, const IndexSet &larger_ghost_index_set=IndexSet())
void export_to_ghosted_array_finish(const ArrayView< Number, MemorySpaceType > &ghost_array, std::vector< MPI_Request > &requests) const
unsigned int n_import_indices() const
std::vector< unsigned int > ghost_indices_subset_chunks_by_rank_data
std::vector< std::pair< unsigned int, unsigned int > > import_targets_data
types::global_dof_index size() const
bool is_compatible(const Partitioner &part) const
std::vector< std::pair< unsigned int, unsigned int > > ghost_indices_subset_data
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDimensionMismatch(std::size_t arg1, std::size_t arg2)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
std::enable_if_t< std::is_fundamental< T >::value, std::size_t > memory_consumption(const T &t)
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
bool job_supports_mpi()
Definition mpi.cc:1057
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1732
static const unsigned int invalid_unsigned_int
Definition types.h:213
STL namespace.
Definition types.h:33
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:99