Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi_compute_index_owner_internal.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2019 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16#include <deal.II/base/config.h>
17
18#include <deal.II/base/mpi.h>
20
21#include <boost/serialization/utility.hpp>
22
24
25namespace Utilities
26{
27 namespace MPI
28 {
29 namespace internal
30 {
31 namespace ComputeIndexOwner
32 {
35
36
37
39 : use_vector(use_vector)
40 , size(0)
41 {}
42
43
44
45 void
46 FlexibleIndexStorage::reinit(const bool use_vector,
47 const bool index_range_contiguous,
48 const std::size_t size)
49 {
50 this->use_vector = use_vector;
51 this->size = size;
52
53 data = {};
54 data_map.clear();
55
56 // in case we have contiguous indices, only fill the vector upon
57 // first request in `fill`
58 if (!index_range_contiguous)
60 }
61
62
63
64 void
66 const std::size_t start,
67 const std::size_t end,
69 {
70 AssertIndexRange(start, size);
71 AssertIndexRange(end, size + 1);
72
73 if (use_vector)
74 {
75 if (data.empty() && end > start)
76 {
77 // in debug mode, we want to track whether we set all
78 // indices, so we first fill an invalid index and only later
79 // the actual ones, whereas we simply assign the given rank
80 // to the complete vector the first time we pass around in
81 // this function in release mode to avoid touching data
82 // unnecessarily (and overwrite the smaller pieces), as the
83 // locally owned part comes first
84#ifdef DEBUG
86 std::fill(data.begin() + start, data.begin() + end, value);
87#else
88 data.resize(size, value);
89#endif
90 }
91 else
92 {
93 AssertDimension(data.size(), size);
94 std::fill(data.begin() + start, data.begin() + end, value);
95 }
96 }
97 else
98 {
99 for (auto i = start; i < end; ++i)
100 data_map[i] = value;
101 }
102 }
103
104
105
107 FlexibleIndexStorage::operator[](const std::size_t index)
108 {
109 AssertIndexRange(index, size);
110
111 if (use_vector)
112 {
113 AssertDimension(data.size(), size);
114 return data[index];
115 }
116 else
117 {
118 if (data_map.find(index) == data_map.end())
120
121 return data_map[index];
122 }
123 }
124
125
126
128 FlexibleIndexStorage::operator[](const std::size_t index) const
129 {
130 AssertIndexRange(index, size);
131
132 if (use_vector)
133 {
134 AssertDimension(data.size(), size);
135 return data[index];
136 }
137 else
138 {
139 if (data_map.find(index) == data_map.end())
140 return invalid_index_value;
141
142 return data_map.at(index);
143 }
144 }
145
146
147
148 bool
149 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
150 {
151 AssertIndexRange(index, size);
152
153 if (use_vector)
154 {
155 if (data.empty())
156 return false;
157
158 AssertDimension(data.size(), size);
159 return data[index] != invalid_index_value;
160 }
161 else
162 return data_map.find(index) != data_map.end();
163 }
164
165
166
167 void
168 Dictionary::reinit(const IndexSet &owned_indices, const MPI_Comm comm)
169 {
170 // 1) set up the partition
171 this->partition(owned_indices, comm);
172
173#ifdef DEAL_II_WITH_MPI
174 unsigned int my_rank = this_mpi_process(comm);
175
176 types::global_dof_index dic_local_received = 0;
177 std::map<unsigned int,
178 std::vector<std::pair<types::global_dof_index,
180 buffers;
181
182 const auto owned_indices_size_actual =
183 Utilities::MPI::sum(owned_indices.n_elements(), comm);
184
185 actually_owning_ranks.reinit((owned_indices_size_actual *
186 sparsity_factor) > owned_indices.size(),
187 owned_indices_size_actual ==
188 owned_indices.size(),
190
191 // 2) collect relevant processes and process local dict entries
192 for (auto interval = owned_indices.begin_intervals();
193 interval != owned_indices.end_intervals();
194 ++interval)
195 {
196 // Due to the granularity of the dictionary, the interval
197 // might be split into several ranges of processor owner
198 // ranks. Here, we process the interval by breaking into
199 // smaller pieces in terms of the dictionary number.
200 std::pair<types::global_dof_index, types::global_dof_index>
201 index_range(*interval->begin(), interval->last() + 1);
202
203 AssertThrow(index_range.second <= size, ExcInternalError());
204
205 while (index_range.first != index_range.second)
206 {
207 Assert(index_range.first < index_range.second,
209
210 const unsigned int owner =
211 dof_to_dict_rank(index_range.first);
212
213 // this explicitly picks up the formula of
214 // dof_to_dict_rank, so the two places must be in sync
215 const types::global_dof_index next_index =
216 std::min(get_index_offset(owner + 1), index_range.second);
217
218 Assert(next_index > index_range.first, ExcInternalError());
219
220# ifdef DEBUG
221 // make sure that the owner is the same on the current
222 // interval
223 for (types::global_dof_index i = index_range.first + 1;
224 i < next_index;
225 ++i)
227# endif
228
229 // add the interval, either to the local range or into a
230 // buffer to be sent to another processor
231 if (owner == my_rank)
232 {
233 actually_owning_ranks.fill(index_range.first -
234 local_range.first,
235 next_index - local_range.first,
236 my_rank);
237 dic_local_received += next_index - index_range.first;
238 if (actually_owning_rank_list.empty())
239 actually_owning_rank_list.push_back(my_rank);
240 }
241 else
242 buffers[owner].emplace_back(index_range.first, next_index);
243
244 index_range.first = next_index;
245 }
246 }
247
248 n_dict_procs_in_owned_indices = buffers.size();
249 std::vector<MPI_Request> request;
250
251 // Check if index set space is partitioned globally without gaps.
252 if (owned_indices_size_actual == owned_indices.size())
253 {
254 // no gaps: setup is simple! Processes send their locally owned
255 // indices to the dictionary. The dictionary stores the sending
256 // rank for each index. The dictionary knows exactly
257 // when it is set up when all indices it is responsible for
258 // have been processed.
259
260 request.reserve(n_dict_procs_in_owned_indices);
261
262 // protect the following communication steps using a mutex:
263 static CollectiveMutex mutex;
265
266 const int mpi_tag =
268
269
270 // 3) send messages with local dofs to the right dict process
271 for (const auto &rank_pair : buffers)
272 {
273 request.push_back(MPI_Request());
274 const int ierr = MPI_Isend(rank_pair.second.data(),
275 rank_pair.second.size() * 2,
277 rank_pair.first,
278 mpi_tag,
279 comm,
280 &request.back());
281 AssertThrowMPI(ierr);
282 }
283
284 // 4) receive messages until all dofs in dict are processed
285 while (this->locally_owned_size != dic_local_received)
286 {
287 // wait for an incoming message
288 MPI_Status status;
289 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
290 AssertThrowMPI(ierr);
291
292 // retrieve size of incoming message
293 int number_amount;
294 ierr = MPI_Get_count(&status,
296 &number_amount);
297 AssertThrowMPI(ierr);
298
299 const auto other_rank = status.MPI_SOURCE;
300 actually_owning_rank_list.push_back(other_rank);
301
302 // receive message
303 Assert(number_amount % 2 == 0, ExcInternalError());
304 std::vector<
305 std::pair<types::global_dof_index, types::global_dof_index>>
306 buffer(number_amount / 2);
307 ierr = MPI_Recv(buffer.data(),
308 number_amount,
310 status.MPI_SOURCE,
311 status.MPI_TAG,
312 comm,
313 MPI_STATUS_IGNORE);
314 AssertThrowMPI(ierr);
315 // process message: loop over all intervals
316 for (auto interval : buffer)
317 {
318# ifdef DEBUG
319 for (types::global_dof_index i = interval.first;
320 i < interval.second;
321 i++)
323 i - local_range.first) == false,
325 Assert(interval.first >= local_range.first &&
326 interval.first < local_range.second,
328 Assert(interval.second > local_range.first &&
329 interval.second <= local_range.second,
331# endif
332
333 actually_owning_ranks.fill(interval.first -
334 local_range.first,
335 interval.second -
336 local_range.first,
337 other_rank);
338 dic_local_received += interval.second - interval.first;
339 }
340 }
341 }
342 else
343 {
344 // with gap: use a ConsensusAlgorithm to determine when all
345 // dictionaries have been set up.
346
347 // 3/4) use a ConsensusAlgorithm to send messages with local
348 // dofs to the right dict process
349
350 using RequestType = std::vector<
351 std::pair<types::global_dof_index, types::global_dof_index>>;
352
353 ConsensusAlgorithms::selector<RequestType>(
354 /* targets = */
355 [&buffers]() {
356 std::vector<unsigned int> targets;
357 targets.reserve(buffers.size());
358 for (const auto &rank_pair : buffers)
359 targets.emplace_back(rank_pair.first);
360
361 return targets;
362 }(),
363
364 /* create_request = */
365 [&buffers](const unsigned int target_rank) -> RequestType {
366 return buffers.at(target_rank);
367 },
368
369 /* process_request = */
370 [&](const unsigned int source_rank,
371 const RequestType &request) -> void {
372 // process message: loop over all intervals
373 for (auto interval : request)
374 {
375# ifdef DEBUG
376 for (types::global_dof_index i = interval.first;
377 i < interval.second;
378 i++)
379 Assert(
381 i - local_range.first) == false,
383 "Multiple processes seem to own the same global index. "
384 "A possible reason is that the sets of locally owned "
385 "indices are not distinct."));
386 Assert(interval.first < interval.second,
388 Assert(
389 local_range.first <= interval.first &&
390 interval.second <= local_range.second,
392 "The specified interval is not handled by the current process."));
393# endif
394 actually_owning_ranks.fill(interval.first -
395 local_range.first,
396 interval.second -
397 local_range.first,
398 source_rank);
399 }
400 actually_owning_rank_list.push_back(source_rank);
401 },
402
403 comm);
404 }
405
406 std::sort(actually_owning_rank_list.begin(),
408
409 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
413
414 // 5) make sure that all messages have been sent
415 if (request.size() > 0)
416 {
417 const int ierr = MPI_Waitall(request.size(),
418 request.data(),
419 MPI_STATUSES_IGNORE);
420 AssertThrowMPI(ierr);
421 }
422
423#else
424 (void)owned_indices;
425 (void)comm;
426#endif
427 }
428
429
430
431 void
432 Dictionary::partition(const IndexSet &owned_indices,
433 const MPI_Comm comm)
434 {
435#ifdef DEAL_II_WITH_MPI
436 const unsigned int n_procs = n_mpi_processes(comm);
437 const unsigned int my_rank = this_mpi_process(comm);
438
439 size = owned_indices.size();
440
442
443 dofs_per_process = (size + n_procs - 1) / n_procs;
445 {
448 }
449 else
451 local_range.first = get_index_offset(my_rank);
452 local_range.second = get_index_offset(my_rank + 1);
453
455#else
456 (void)owned_indices;
457 (void)comm;
458#endif
459 }
460
461
463 const IndexSet & owned_indices,
464 const IndexSet & indices_to_look_up,
465 const MPI_Comm comm,
466 std::vector<unsigned int> &owning_ranks,
467 const bool track_index_requests)
468 : owned_indices(owned_indices)
469 , indices_to_look_up(indices_to_look_up)
470 , comm(comm)
471 , my_rank(this_mpi_process(comm))
472 , n_procs(n_mpi_processes(comm))
473 , track_index_requests(track_index_requests)
474 , owning_ranks(owning_ranks)
475 {
478 }
479
480
481
482 void
484 const unsigned int other_rank,
485 const std::vector<std::pair<types::global_dof_index,
486 types::global_dof_index>> &buffer_recv,
487 std::vector<unsigned int> & request_buffer)
488 {
489 unsigned int owner_index_guess = 0;
490 for (const auto &interval : buffer_recv)
491 for (auto i = interval.first; i < interval.second; ++i)
492 {
493 const unsigned int actual_owner =
495 request_buffer.push_back(actual_owner);
496
499 other_rank,
500 actual_owner,
501 owner_index_guess);
502 }
503 }
504
505
506
507 std::vector<unsigned int>
509 {
510 std::vector<unsigned int> targets;
511
513 unsigned int index = 0;
514 unsigned int owner_index_guess = 0;
515 for (auto i : indices_to_look_up)
516 {
517 unsigned int other_rank = dict.dof_to_dict_rank(i);
518 if (other_rank == my_rank)
519 {
520 owning_ranks[index] =
524 my_rank,
525 owning_ranks[index],
526 owner_index_guess);
527 }
528 else
529 {
530 if (targets.empty() || targets.back() != other_rank)
531 targets.push_back(other_rank);
532 auto &indices = indices_to_look_up_by_dict_rank[other_rank];
533 indices.first.push_back(i);
534 indices.second.push_back(index);
535 }
536 index++;
537 }
538
539 Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
540 ExcMessage("Size does not match!"));
541
542 return targets;
543 }
544
545
546
547 void
549 const unsigned int other_rank,
550 std::vector<std::pair<types::global_dof_index,
551 types::global_dof_index>> &send_buffer)
552 {
553 // create index set and compress data to be sent
554 auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
555 IndexSet is(dict.size);
556 is.add_indices(indices_i.begin(), indices_i.end());
557 is.compress();
558
559 for (auto interval = is.begin_intervals();
560 interval != is.end_intervals();
561 ++interval)
562 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
563 }
564
565
566
567 void
569 const unsigned int other_rank,
570 const std::vector<unsigned int> &recv_buffer)
571 {
572 const auto &recv_indices =
573 indices_to_look_up_by_dict_rank[other_rank].second;
574 AssertDimension(recv_indices.size(), recv_buffer.size());
575 for (unsigned int j = 0; j < recv_indices.size(); ++j)
576 owning_ranks[recv_indices[j]] = recv_buffer[j];
577 }
578
579
580
581 std::map<unsigned int, IndexSet>
583 {
585 ExcMessage("Must enable index range tracking in "
586 "constructor of ConsensusAlgorithmProcess"));
587
588 std::map<unsigned int, ::IndexSet> requested_indices;
589
590#ifdef DEAL_II_WITH_MPI
591
592 static CollectiveMutex mutex;
594
595 const int mpi_tag = Utilities::MPI::internal::Tags::
597
598 // reserve enough slots for the requests ahead; depending on
599 // whether the owning rank is one of the requesters or not, we
600 // might have one less requests to execute, so fill the requests
601 // on demand.
602 std::vector<MPI_Request> send_requests;
603 send_requests.reserve(requesters.size());
604
605 // We use an integer vector for the data exchange. Since we send
606 // data associated to intervals with different requesters, we will
607 // need to send (a) the MPI rank of the requester, (b) the number
608 // of intervals directed to this requester, and (c) a list of
609 // intervals, i.e., two integers per interval. The number of items
610 // sent in total can be deduced both via the MPI status message at
611 // the receiver site as well as be counting the buckets from
612 // different requesters.
613 std::vector<std::vector<unsigned int>> send_data(requesters.size());
614 for (unsigned int i = 0; i < requesters.size(); ++i)
615 {
616 // special code for our own indices
618 {
619 for (const auto &j : requesters[i])
620 {
621 const types::global_dof_index index_offset =
623 IndexSet &my_index_set = requested_indices[j.first];
624 my_index_set.set_size(owned_indices.size());
625 for (const auto &interval : j.second)
626 my_index_set.add_range(index_offset + interval.first,
627 index_offset + interval.second);
628 }
629 }
630 else
631 {
632 for (const auto &j : requesters[i])
633 {
634 send_data[i].push_back(j.first);
635 send_data[i].push_back(j.second.size());
636 for (const auto &interval : j.second)
637 {
638 send_data[i].push_back(interval.first);
639 send_data[i].push_back(interval.second);
640 }
641 }
642 send_requests.push_back(MPI_Request());
643 const int ierr = MPI_Isend(send_data[i].data(),
644 send_data[i].size(),
645 MPI_UNSIGNED,
647 mpi_tag,
648 comm,
649 &send_requests.back());
650 AssertThrowMPI(ierr);
651 }
652 }
653
654 // receive the data
655 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
656 {
657 // wait for an incoming message
658 MPI_Status status;
659 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
660 AssertThrowMPI(ierr);
661
662 // retrieve size of incoming message
663 int number_amount;
664 ierr = MPI_Get_count(&status, MPI_UNSIGNED, &number_amount);
665 AssertThrowMPI(ierr);
666
667 // receive message
668 Assert(number_amount % 2 == 0, ExcInternalError());
669 std::vector<std::pair<unsigned int, unsigned int>> buffer(
670 number_amount / 2);
671 ierr = MPI_Recv(buffer.data(),
672 number_amount,
673 MPI_UNSIGNED,
674 status.MPI_SOURCE,
675 status.MPI_TAG,
676 comm,
677 &status);
678 AssertThrowMPI(ierr);
679
680 // unpack the message and translate the dictionary-local
681 // indices coming via MPI to the global index range
682 const types::global_dof_index index_offset =
683 dict.get_index_offset(status.MPI_SOURCE);
684 unsigned int offset = 0;
685 while (offset < buffer.size())
686 {
687 AssertIndexRange(offset + buffer[offset].second,
688 buffer.size());
689
690 IndexSet my_index_set(owned_indices.size());
691 for (unsigned int i = offset + 1;
692 i < offset + buffer[offset].second + 1;
693 ++i)
694 my_index_set.add_range(index_offset + buffer[i].first,
695 index_offset + buffer[i].second);
696
697 // the underlying index set is able to merge ranges coming
698 // from different ranks due to the partitioning in the
699 // dictionary
700 IndexSet &index_set = requested_indices[buffer[offset].first];
701 if (index_set.size() == 0)
702 index_set.set_size(owned_indices.size());
703 index_set.add_indices(my_index_set);
704
705 offset += buffer[offset].second + 1;
706 }
707 AssertDimension(offset, buffer.size());
708 }
709
710 if (send_requests.size() > 0)
711 {
712 const auto ierr = MPI_Waitall(send_requests.size(),
713 send_requests.data(),
714 MPI_STATUSES_IGNORE);
715 AssertThrowMPI(ierr);
716 }
717
718
719# ifdef DEBUG
720 for (const auto &it : requested_indices)
721 {
722 IndexSet copy_set = it.second;
723 copy_set.subtract_set(owned_indices);
724 Assert(copy_set.n_elements() == 0,
726 "The indices requested from the current "
727 "MPI rank should be locally owned here!"));
728 }
729# endif
730
731#endif // DEAL_II_WITH_MPI
732
733 return requested_indices;
734 }
735
736
737
738 void
740 const unsigned int index_within_dict,
741 const unsigned int rank_of_request,
742 const unsigned int rank_of_owner,
743 unsigned int & owner_index_guess)
744 {
745 // remember who requested which index. We want to use an
746 // std::vector with simple addressing, via a good guess from the
747 // preceding index, rather than std::map, because this is an inner
748 // loop and it avoids the map lookup in every iteration
749 owner_index_guess =
750 dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
751
752 auto &request = requesters[owner_index_guess];
753 if (request.empty() || request.back().first != rank_of_request)
754 request.emplace_back(
755 rank_of_request,
756 std::vector<std::pair<unsigned int, unsigned int>>());
757
758 auto &intervals = request.back().second;
759 if (intervals.empty() || intervals.back().second != index_within_dict)
760 intervals.emplace_back(index_within_dict, index_within_dict + 1);
761 else
762 ++intervals.back().second;
763 }
764 } // namespace ComputeIndexOwner
765 } // namespace internal
766 } // namespace MPI
767} // namespace Utilities
768
IntervalIterator end_intervals() const
Definition index_set.h:1628
size_type size() const
Definition index_set.h:1661
size_type n_elements() const
Definition index_set.h:1816
void set_size(const size_type size)
Definition index_set.h:1649
IntervalIterator begin_intervals() const
Definition index_set.h:1616
void subtract_set(const IndexSet &other)
Definition index_set.cc:268
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1697
void compress() const
Definition index_set.h:1669
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1727
virtual void answer_request(const unsigned int other_rank, const std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &buffer_recv, std::vector< unsigned int > &request_buffer) override
virtual void read_answer(const unsigned int other_rank, const std::vector< unsigned int > &recv_buffer) override
virtual void create_request(const unsigned int other_rank, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > &send_buffer) override
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< unsigned int, unsigned int > > > > > requesters
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
ConsensusAlgorithmsPayload(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm, std::vector< unsigned int > &owning_ranks, const bool track_index_requests=false)
void append_index_origin(const unsigned int index_within_dictionary, const unsigned int rank_of_request, const unsigned int rank_of_owner, unsigned int &owner_index_guess)
void reinit(const bool use_vector, const bool index_range_contiguous, const std::size_t size)
void fill(const std::size_t start, const std::size_t end, const index_type &value)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
Point< 2 > second
Definition grid_out.cc:4616
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition mpi_tags.h:81
@ dictionary_reinit
Dictionary::reinit()
Definition mpi_tags.h:78
T sum(const T &t, const MPI_Comm mpi_communicator)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
unsigned int global_dof_index
Definition types.h:82
types::global_dof_index get_index_offset(const unsigned int rank)
std::pair< types::global_dof_index, types::global_dof_index > local_range
void reinit(const IndexSet &owned_indices, const MPI_Comm comm)
void partition(const IndexSet &owned_indices, const MPI_Comm comm)
unsigned int get_owning_rank_index(const unsigned int rank_in_owned_indices, const unsigned int guess=0)
const MPI_Comm comm
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:99