Loading [MathJax]/extensions/TeX/newcommand.js
 deal.II version GIT relicensing-3012-g95e643124d 2025-04-03 05:00:00+00:00
\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}} \newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=} \newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]} \newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
mpi.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.h>
19#include <deal.II/base/mpi.templates.h>
24
27
28#include <boost/serialization/utility.hpp>
29
30#include <iostream>
31#include <limits>
32#include <numeric>
33#include <set>
34#include <vector>
35
36#if defined(DEAL_II_WITH_MPI)
38# include <mpi.h>
40#endif
41
42
44
45
46namespace Utilities
47{
50 const unsigned int my_partition_id,
51 const unsigned int n_partitions,
52 const types::global_dof_index total_size)
53 {
54 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
55 "IndexSet::size_type must match types::global_dof_index for "
56 "using this function");
57 const unsigned int remain = total_size % n_partitions;
58
59 const IndexSet::size_type min_size = total_size / n_partitions;
60
61 const IndexSet::size_type begin =
62 min_size * my_partition_id + std::min(my_partition_id, remain);
63 const IndexSet::size_type end =
64 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
65 IndexSet result(total_size);
66 result.add_range(begin, end);
67 return result;
68 }
69
70 namespace MPI
71 {
72 MinMaxAvg
73 min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
74 {
75 MinMaxAvg result;
78 mpi_communicator);
79
80 return result;
81 }
82
83
84
85 std::vector<MinMaxAvg>
86 min_max_avg(const std::vector<double> &my_values,
87 const MPI_Comm mpi_communicator)
88 {
89 std::vector<MinMaxAvg> results(my_values.size());
90 min_max_avg(my_values, results, mpi_communicator);
91
92 return results;
93 }
94
95
96
97#ifdef DEAL_II_WITH_MPI
98 unsigned int
99 n_mpi_processes(const MPI_Comm mpi_communicator)
100 {
101 if (job_supports_mpi())
102 {
103 int n_jobs = 1;
104 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
105 AssertThrowMPI(ierr);
106 return n_jobs;
107 }
108 else
109 return 1;
110 }
111
112
113 unsigned int
114 this_mpi_process(const MPI_Comm mpi_communicator)
115 {
116 if (job_supports_mpi())
117 {
118 int rank = 0;
119 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
120 AssertThrowMPI(ierr);
121 return rank;
122 }
123 else
124 return 0;
125 }
126
127
128
129 std::vector<unsigned int>
131 const MPI_Comm comm_small)
132 {
134 return std::vector<unsigned int>{0};
135
136 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
137 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
138
139 std::vector<unsigned int> ranks(size);
140 const int ierr = MPI_Allgather(
141 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
142 AssertThrowMPI(ierr);
143
144 return ranks;
145 }
146
147
148
150 duplicate_communicator(const MPI_Comm mpi_communicator)
151 {
152 MPI_Comm new_communicator;
153 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
154 AssertThrowMPI(ierr);
155 return new_communicator;
156 }
157
158
159
160 void
161 free_communicator(MPI_Comm mpi_communicator)
162 {
163 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
164 const int ierr = MPI_Comm_free(&mpi_communicator);
165 AssertThrowMPI(ierr);
166 }
167
168
169
170 std::vector<IndexSet>
172 const MPI_Comm comm,
174 {
175 static_assert(
176 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
177 "IndexSet::size_type must match types::global_dof_index for "
178 "using this function");
179 const unsigned int n_proc = n_mpi_processes(comm);
180 const std::vector<IndexSet::size_type> sizes =
182 const auto total_size =
183 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
184
185 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
186
187 IndexSet::size_type begin = 0;
188 for (unsigned int i = 0; i < n_proc; ++i)
189 {
190 res[i].add_range(begin, begin + sizes[i]);
191 begin = begin + sizes[i];
192 }
193
194 return res;
195 }
196
197
198
201 const MPI_Comm comm,
202 const types::global_dof_index total_size)
203 {
204 const unsigned int this_proc = this_mpi_process(comm);
205 const unsigned int n_proc = n_mpi_processes(comm);
206
208 n_proc,
209 total_size);
210 }
211
212
213
214 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
215 create_mpi_data_type_n_bytes(const std::size_t n_bytes)
216 {
217 MPI_Datatype result;
218 int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
219 AssertThrowMPI(ierr);
220 ierr = MPI_Type_commit(&result);
221 AssertThrowMPI(ierr);
222
223 if constexpr (running_in_debug_mode())
224 {
225 MPI_Count size64;
226 ierr = MPI_Type_size_x(result, &size64);
227 AssertThrowMPI(ierr);
228
229 Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
230 }
231
232 // Now put the new data type into a std::unique_ptr with a custom
233 // deleter. We call the std::unique_ptr constructor that as first
234 // argument takes a pointer (here, a pointer to a copy of the `result`
235 // object, and as second argument a pointer-to-function, for which
236 // we here use a lambda function without captures that acts as the
237 // 'deleter' object: it calls `MPI_Type_free` and then deletes the
238 // pointer. To avoid a compiler warning about a null this pointer
239 // in the lambda (which don't make sense: the lambda doesn't store
240 // anything), we create the deleter first.
241 auto deleter = [](MPI_Datatype *p) {
242 if (p != nullptr)
243 {
244 [[maybe_unused]] const int ierr = MPI_Type_free(p);
245
246 AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
247
248 delete p;
249 }
250 };
251
252 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
253 new MPI_Datatype(result), deleter);
254 }
255
256
257
258 std::vector<unsigned int>
260 const MPI_Comm mpi_comm,
261 const std::vector<unsigned int> &destinations)
262 {
263 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
264 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
265
266 if constexpr (running_in_debug_mode())
267 {
268 for (const unsigned int destination : destinations)
269 AssertIndexRange(destination, n_procs);
270 }
271
272 // Have a little function that checks if destinations provided
273 // to the current process are unique. The way it does this is
274 // to create a sorted list of destinations and then walk through
275 // the list and look at successive elements -- if we find the
276 // same number twice, we know that the destinations were not
277 // unique
278 const bool my_destinations_are_unique = [destinations]() {
279 if (destinations.empty())
280 return true;
281 else
282 {
283 std::vector<unsigned int> my_destinations = destinations;
284 std::sort(my_destinations.begin(), my_destinations.end());
285 return (std::adjacent_find(my_destinations.begin(),
286 my_destinations.end()) ==
287 my_destinations.end());
288 }
289 }();
290
291 // If all processes report that they have unique destinations,
292 // then we can short-cut the process using a consensus algorithm (which
293 // is implemented only for the case of unique destinations):
294 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
295 1)
296 {
297 return ConsensusAlgorithms::nbx<char, char>(
298 destinations, {}, {}, {}, mpi_comm);
299 }
300
301 // So we need to run a different algorithm, specifically one that
302 // requires more memory -- MPI_Reduce_scatter_block will require memory
303 // proportional to the number of processes involved; that function is
304 // available for MPI 2.2 or later:
305 static CollectiveMutex mutex;
306 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
307
308 const int mpi_tag =
310
311 // Calculate the number of messages to send to each process
312 std::vector<unsigned int> dest_vector(n_procs);
313 for (const auto &el : destinations)
314 ++dest_vector[el];
315
316 // Find how many processes will send to this one
317 // by reducing with sum and then scattering the
318 // results over all processes
319 unsigned int n_recv_from;
320 const int ierr = MPI_Reduce_scatter_block(
321 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
322
323 AssertThrowMPI(ierr);
324
325 // Send myid to every process in `destinations` vector...
326 std::vector<MPI_Request> send_requests(destinations.size());
327 for (const auto &el : destinations)
328 {
329 const int ierr =
330 MPI_Isend(&myid,
331 1,
332 MPI_UNSIGNED,
333 el,
334 mpi_tag,
335 mpi_comm,
336 send_requests.data() + (&el - destinations.data()));
337 AssertThrowMPI(ierr);
338 }
339
340
341 // Receive `n_recv_from` times from the processes
342 // who communicate with this one. Store the obtained id's
343 // in the resulting vector
344 std::vector<unsigned int> origins(n_recv_from);
345 for (auto &el : origins)
346 {
347 const int ierr = MPI_Recv(&el,
348 1,
349 MPI_UNSIGNED,
350 MPI_ANY_SOURCE,
351 mpi_tag,
352 mpi_comm,
353 MPI_STATUS_IGNORE);
354 AssertThrowMPI(ierr);
355 }
356
357 if (destinations.size() > 0)
358 {
359 const int ierr = MPI_Waitall(destinations.size(),
360 send_requests.data(),
361 MPI_STATUSES_IGNORE);
362 AssertThrowMPI(ierr);
363 }
364
365 return origins;
366 }
367
368
369
370 unsigned int
372 const MPI_Comm mpi_comm,
373 const std::vector<unsigned int> &destinations)
374 {
375 // Have a little function that checks if destinations provided
376 // to the current process are unique:
377 const bool my_destinations_are_unique = [destinations]() {
378 std::vector<unsigned int> my_destinations = destinations;
379 const unsigned int n_destinations = my_destinations.size();
380 std::sort(my_destinations.begin(), my_destinations.end());
381 my_destinations.erase(std::unique(my_destinations.begin(),
382 my_destinations.end()),
383 my_destinations.end());
384 return (my_destinations.size() == n_destinations);
385 }();
386
387 // If all processes report that they have unique destinations,
388 // then we can short-cut the process using a consensus algorithm:
389
390 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
391 1)
392 {
393 return ConsensusAlgorithms::nbx<char, char>(
394 destinations, {}, {}, {}, mpi_comm)
395 .size();
396 }
397 else
398 {
399 const unsigned int n_procs =
401
402 if constexpr (running_in_debug_mode())
403 {
404 for (const unsigned int destination : destinations)
405 {
406 AssertIndexRange(destination, n_procs);
407 Assert(
408 destination != Utilities::MPI::this_mpi_process(mpi_comm),
410 "There is no point in communicating with ourselves."));
411 }
412 }
413
414 // Calculate the number of messages to send to each process
415 std::vector<unsigned int> dest_vector(n_procs);
416 for (const auto &el : destinations)
417 ++dest_vector[el];
418
419 // Find out how many processes will send to this one
420 // MPI_Reduce_scatter(_block) does exactly this
421 unsigned int n_recv_from = 0;
422
423 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
424 &n_recv_from,
425 1,
426 MPI_UNSIGNED,
427 MPI_SUM,
428 mpi_comm);
429
430 AssertThrowMPI(ierr);
431
432 return n_recv_from;
433 }
434 }
435
436
437
438 namespace
439 {
440 // custom MIP_Op for calculate_collective_mpi_min_max_avg
441 void
442 max_reduce(const void *in_lhs_,
443 void *inout_rhs_,
444 int *len,
445 MPI_Datatype *)
446 {
447 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
448 MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
449
450 for (int i = 0; i < *len; ++i)
451 {
452 inout_rhs[i].sum += in_lhs[i].sum;
453 if (inout_rhs[i].min > in_lhs[i].min)
454 {
455 inout_rhs[i].min = in_lhs[i].min;
456 inout_rhs[i].min_index = in_lhs[i].min_index;
457 }
458 else if (inout_rhs[i].min == in_lhs[i].min)
459 {
460 // choose lower cpu index when tied to make operator commutative
461 if (inout_rhs[i].min_index > in_lhs[i].min_index)
462 inout_rhs[i].min_index = in_lhs[i].min_index;
463 }
464
465 if (inout_rhs[i].max < in_lhs[i].max)
466 {
467 inout_rhs[i].max = in_lhs[i].max;
468 inout_rhs[i].max_index = in_lhs[i].max_index;
469 }
470 else if (inout_rhs[i].max == in_lhs[i].max)
471 {
472 // choose lower cpu index when tied to make operator commutative
473 if (inout_rhs[i].max_index > in_lhs[i].max_index)
474 inout_rhs[i].max_index = in_lhs[i].max_index;
475 }
476 }
477 }
478 } // namespace
479
480
481
482 void
484 const ArrayView<MinMaxAvg> &result,
485 const MPI_Comm mpi_communicator)
486 {
487 // If MPI was not started, we have a serial computation and cannot run
488 // the other MPI commands
489 if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
490 {
491 for (unsigned int i = 0; i < my_values.size(); ++i)
492 {
493 result[i].sum = my_values[i];
494 result[i].avg = my_values[i];
495 result[i].min = my_values[i];
496 result[i].max = my_values[i];
497 result[i].min_index = 0;
498 result[i].max_index = 0;
499 }
500 return;
501 }
502
503 /*
504 * A custom MPI datatype handle describing the memory layout of the
505 * MinMaxAvg struct. Initialized on first pass control reaches the
506 * static variable. So hopefully not initialized too early.
507 */
508 static MPI_Datatype type = []() {
509 MPI_Datatype type;
510
511 int lengths[] = {3, 2, 1};
512
513 MPI_Aint displacements[] = {0,
514 offsetof(MinMaxAvg, min_index),
515 offsetof(MinMaxAvg, avg)};
516
517 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
518
519 int ierr =
520 MPI_Type_create_struct(3, lengths, displacements, types, &type);
521 AssertThrowMPI(ierr);
522
523 ierr = MPI_Type_commit(&type);
524 AssertThrowMPI(ierr);
525
526 /* Ensure that we free the allocated datatype again at the end of
527 * the program run just before we call MPI_Finalize():*/
528 InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
529 int ierr = MPI_Type_free(&type);
530 AssertThrowMPI(ierr);
531 });
532
533 return type;
534 }();
535
536 /*
537 * A custom MPI op handle for our max_reduce function.
538 * Initialized on first pass control reaches the static variable. So
539 * hopefully not initialized too early.
540 */
541 static MPI_Op op = []() {
542 MPI_Op op;
543
544 int ierr =
545 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
546 static_cast<int>(true),
547 &op);
548 AssertThrowMPI(ierr);
549
550 /* Ensure that we free the allocated op again at the end of the
551 * program run just before we call MPI_Finalize():*/
552 InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
553 int ierr = MPI_Op_free(&op);
554 AssertThrowMPI(ierr);
555 });
556
557 return op;
558 }();
559
560 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
561 Utilities::MPI::max(my_values.size(), mpi_communicator));
562
563 AssertDimension(my_values.size(), result.size());
564
565 // To avoid uninitialized values on some MPI implementations, provide
566 // result with a default value already...
567 MinMaxAvg dummy = {0.,
568 std::numeric_limits<double>::max(),
569 std::numeric_limits<double>::lowest(),
570 0,
571 0,
572 0.};
573
574 for (auto &i : result)
575 i = dummy;
576
577 const unsigned int my_id =
578 ::Utilities::MPI::this_mpi_process(mpi_communicator);
579 const unsigned int numproc =
580 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
581
582 std::vector<MinMaxAvg> in(my_values.size());
583
584 for (unsigned int i = 0; i < my_values.size(); ++i)
585 {
586 in[i].sum = in[i].min = in[i].max = my_values[i];
587 in[i].min_index = in[i].max_index = my_id;
588 }
589
590 int ierr = MPI_Allreduce(
591 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
592 AssertThrowMPI(ierr);
593
594 for (auto &r : result)
595 r.avg = r.sum / numproc;
596 }
597
598
599#else
600
601 unsigned int
603 {
604 return 1;
605 }
606
607
608
609 unsigned int
611 {
612 return 0;
613 }
614
615
616
617 std::vector<unsigned int>
619 {
620 return std::vector<unsigned int>{0};
621 }
622
623
624
625 std::vector<IndexSet>
627 const MPI_Comm /*comm*/,
629 {
630 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
631 }
632
635 const MPI_Comm /*comm*/,
636 const types::global_dof_index total_size)
637 {
638 return complete_index_set(total_size);
639 }
640
641
642
644 duplicate_communicator(const MPI_Comm mpi_communicator)
645 {
646 return mpi_communicator;
647 }
648
649
650
651 void
652 free_communicator(MPI_Comm /*mpi_communicator*/)
653 {}
654
655
656
657 void
658 min_max_avg(const ArrayView<const double> &my_values,
659 const ArrayView<MinMaxAvg> &result,
660 const MPI_Comm)
661 {
662 AssertDimension(my_values.size(), result.size());
663
664 for (unsigned int i = 0; i < my_values.size(); ++i)
665 {
666 result[i].sum = my_values[i];
667 result[i].avg = my_values[i];
668 result[i].min = my_values[i];
669 result[i].max = my_values[i];
670 result[i].min_index = 0;
671 result[i].max_index = 0;
672 }
673 }
674
675#endif
676
677
679 char **&argv,
680 const unsigned int max_num_threads)
681 : InitFinalize(argc,
682 argv,
686 max_num_threads)
687 {}
688
689
690
691 bool
693 {
694#ifdef DEAL_II_WITH_MPI
695 int MPI_has_been_started = 0;
696 const int ierr = MPI_Initialized(&MPI_has_been_started);
697 AssertThrowMPI(ierr);
698
699 return (MPI_has_been_started > 0);
700#else
701 return false;
702#endif
703 }
704
705
706
707 namespace
708 {
713 namespace ComputeIndexOwner
714 {
715 class FlexibleIndexStorage
716 {
717 public:
718 using index_type = unsigned int;
719 static const index_type invalid_index_value =
721
722 FlexibleIndexStorage(const bool use_vector = true);
723
724 void
725 reinit(const bool use_vector,
726 const bool index_range_contiguous,
727 const std::size_t size);
728
729 void
730 fill(const std::size_t start,
731 const std::size_t end,
732 const index_type &value);
733
734 index_type &
735 operator[](const std::size_t index);
736
737 index_type
738 operator[](const std::size_t index) const;
739
740 bool
741 entry_has_been_set(const std::size_t index) const;
742
743 private:
745 std::size_t size;
746 std::vector<index_type> data;
747 std::map<std::size_t, index_type> data_map;
748 };
749
750
751
757 struct Dictionary
758 {
780 static constexpr unsigned int range_minimum_grain_size = 64;
781
788 static constexpr unsigned int sparsity_factor = 4;
789
790
796 Dictionary(const IndexSet &owned_indices, const MPI_Comm comm);
797
804 FlexibleIndexStorage actually_owning_ranks;
805
810 std::vector<unsigned int> actually_owning_rank_list;
811
819
825 std::pair<types::global_dof_index, types::global_dof_index>
827
834
839
845
851 unsigned int stride_small_size;
852
858 unsigned int
859 dof_to_dict_rank(const types::global_dof_index i);
860
866 get_index_offset(const unsigned int rank);
867
873 unsigned int
874 get_owning_rank_index(const unsigned int rank_in_owned_indices,
875 const unsigned int guess = 0);
876
877 private:
882 void
883 partition(const IndexSet &owned_indices, const MPI_Comm comm);
884 };
885
886
887
894 class ConsensusAlgorithmsPayload
895 {
896 public:
897 using RequestType = std::vector<
898 std::pair<types::global_dof_index, types::global_dof_index>>;
899 using AnswerType = std::vector<unsigned int>;
900
904 ConsensusAlgorithmsPayload(const IndexSet &owned_indices,
905 const IndexSet &indices_to_look_up,
906 const MPI_Comm comm,
907 std::vector<unsigned int> &owning_ranks,
908 const bool track_index_requesters = false);
909
914
920
925
929 const unsigned int my_rank;
930
935 const unsigned int n_procs;
936
944
950 std::vector<unsigned int> &owning_ranks;
951
955 Dictionary dict;
956
966 std::vector<std::vector<
967 std::pair<unsigned int,
968 std::vector<std::pair<types::global_dof_index,
971
977 std::map<unsigned int,
978 std::pair<std::vector<types::global_dof_index>,
979 std::vector<unsigned int>>>
981
985 std::vector<unsigned int>
986 compute_targets();
987
991 std::vector<
992 std::pair<types::global_dof_index, types::global_dof_index>>
993 create_request(const unsigned int other_rank);
994
998 std::vector<unsigned int>
999 answer_request(
1000 const unsigned int other_rank,
1001 const std::vector<std::pair<types::global_dof_index,
1002 types::global_dof_index>> &buffer_recv);
1003
1008 void
1009 process_answer(const unsigned int other_rank,
1010 const std::vector<unsigned int> &recv_buffer);
1011
1027 std::map<unsigned int, IndexSet>
1028 get_requesters();
1029
1030 private:
1044 void
1045 append_index_origin(
1046 const types::global_dof_index index_within_dictionary,
1047 const unsigned int rank_of_request,
1048 const unsigned int rank_of_owner,
1049 unsigned int &owner_index_guess);
1050 };
1051
1052 /* ------------------------- inline functions ----------------------- */
1053
1054 inline unsigned int
1055 Dictionary::dof_to_dict_rank(const types::global_dof_index i)
1056 {
1057 // note: this formula is also explicitly used in
1058 // get_index_offset(), so keep the two in sync
1059 return (i / dofs_per_process) * stride_small_size;
1060 }
1061
1062
1064 Dictionary::get_index_offset(const unsigned int rank)
1065 {
1066 return std::min(dofs_per_process *
1067 static_cast<types::global_dof_index>(
1068 (rank + stride_small_size - 1) /
1070 size);
1071 }
1072
1073
1074
1075 inline unsigned int
1076 Dictionary::get_owning_rank_index(
1077 const unsigned int rank_in_owned_indices,
1078 const unsigned int guess)
1079 {
1081 if (actually_owning_rank_list[guess] == rank_in_owned_indices)
1082 return guess;
1083 else
1084 {
1085 auto it = std::lower_bound(actually_owning_rank_list.begin(),
1087 rank_in_owned_indices);
1089 Assert(*it == rank_in_owned_indices, ExcInternalError());
1090 return it - actually_owning_rank_list.begin();
1091 }
1092 }
1093
1094
1095 const FlexibleIndexStorage::index_type
1096 FlexibleIndexStorage::invalid_index_value;
1097
1098
1099
1100 FlexibleIndexStorage::FlexibleIndexStorage(const bool use_vector)
1102 , size(0)
1103 {}
1104
1105
1106
1107 void
1108 FlexibleIndexStorage::reinit(const bool use_vector,
1109 const bool index_range_contiguous,
1110 const std::size_t size)
1111 {
1112 this->use_vector = use_vector;
1113 this->size = size;
1114
1115 data = {};
1116 data_map.clear();
1117
1118 // in case we have contiguous indices, only fill the vector upon
1119 // first request in `fill`
1120 if (use_vector && !index_range_contiguous)
1121 data.resize(size, invalid_index_value);
1122 }
1123
1124
1125
1126 void
1127 FlexibleIndexStorage::fill(
1128 const std::size_t start,
1129 const std::size_t end,
1130 const FlexibleIndexStorage::index_type &value)
1131 {
1132 AssertIndexRange(start, size);
1133 AssertIndexRange(end, size + 1);
1134
1135 if (use_vector)
1136 {
1137 if (data.empty() && end > start)
1138 {
1139 // in debug mode, we want to track whether we set all
1140 // indices, so we first fill an invalid index and only later
1141 // the actual ones, whereas we simply assign the given rank
1142 // to the complete vector the first time we pass around in
1143 // this function in release mode to avoid touching data
1144 // unnecessarily (and overwrite the smaller pieces), as the
1145 // locally owned part comes first
1146 if constexpr (running_in_debug_mode())
1147 {
1148 data.resize(size, invalid_index_value);
1149 std::fill(data.begin() + start,
1150 data.begin() + end,
1151 value);
1152 }
1153 else
1154 {
1155 data.resize(size, value);
1156 }
1157 }
1158 else
1159 {
1160 AssertDimension(data.size(), size);
1161 std::fill(data.begin() + start, data.begin() + end, value);
1162 }
1163 }
1164 else
1165 {
1166 for (auto i = start; i < end; ++i)
1167 data_map[i] = value;
1168 }
1169 }
1170
1171
1172
1173 FlexibleIndexStorage::index_type &
1174 FlexibleIndexStorage::operator[](const std::size_t index)
1175 {
1176 AssertIndexRange(index, size);
1177
1178 if (use_vector)
1179 {
1180 AssertDimension(data.size(), size);
1181 return data[index];
1182 }
1183 else
1184 {
1185 return data_map.try_emplace(index, invalid_index_value)
1186 .first->second;
1187 }
1188 }
1189
1190
1191
1192 inline bool
1193 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
1194 {
1195 AssertIndexRange(index, size);
1196
1197 if (use_vector)
1198 {
1199 if (data.empty())
1200 return false;
1201
1202 AssertDimension(data.size(), size);
1203 return data[index] != invalid_index_value;
1204 }
1205 else
1206 return data_map.find(index) != data_map.end();
1207 }
1208
1209
1210
1211 Dictionary::Dictionary(const IndexSet &owned_indices,
1212 const MPI_Comm comm)
1213 {
1214 // 1) set up the partition
1216
1217 unsigned int my_rank = this_mpi_process(comm);
1218
1219 types::global_dof_index dic_local_received = 0;
1220 std::map<unsigned int,
1221 std::vector<std::pair<types::global_dof_index,
1223 buffers;
1224
1225 const auto owned_indices_size_actual =
1227
1228 actually_owning_ranks.reinit((owned_indices_size_actual *
1230 owned_indices_size_actual ==
1233
1234 // 2) collect relevant processes and process local dict entries
1235 for (auto interval = owned_indices.begin_intervals();
1236 interval != owned_indices.end_intervals();
1237 ++interval)
1238 {
1239 // Due to the granularity of the dictionary, the interval
1240 // might be split into several ranges of processor owner
1241 // ranks. Here, we process the interval by breaking into
1242 // smaller pieces in terms of the dictionary number.
1243 std::pair<types::global_dof_index, types::global_dof_index>
1244 index_range(*interval->begin(), interval->last() + 1);
1245
1246 AssertThrow(index_range.second <= size, ExcInternalError());
1247
1248 while (index_range.first != index_range.second)
1249 {
1250 Assert(index_range.first < index_range.second,
1252
1253 const unsigned int owner =
1254 dof_to_dict_rank(index_range.first);
1255
1256 // this explicitly picks up the formula of
1257 // dof_to_dict_rank, so the two places must be in sync
1258 const types::global_dof_index next_index =
1259 std::min(get_index_offset(owner + 1), index_range.second);
1260
1261 Assert(next_index > index_range.first, ExcInternalError());
1262
1263 if constexpr (running_in_debug_mode())
1264 {
1265 // make sure that the owner is the same on the current
1266 // interval
1267 for (types::global_dof_index i = index_range.first + 1;
1268 i < next_index;
1269 ++i)
1270 AssertDimension(owner, dof_to_dict_rank(i));
1271 }
1272
1273 // add the interval, either to the local range or into a
1274 // buffer to be sent to another processor
1275 if (owner == my_rank)
1276 {
1277 actually_owning_ranks.fill(index_range.first -
1278 local_range.first,
1279 next_index - local_range.first,
1280 my_rank);
1281 dic_local_received += next_index - index_range.first;
1282 if (actually_owning_rank_list.empty())
1284 }
1285 else
1286 buffers[owner].emplace_back(index_range.first, next_index);
1287
1288 index_range.first = next_index;
1289 }
1290 }
1291
1292#ifdef DEAL_II_WITH_MPI
1293 n_dict_procs_in_owned_indices = buffers.size();
1294 std::vector<MPI_Request> request;
1295
1296 // Check if index set space is partitioned globally without gaps.
1297 if (owned_indices_size_actual == owned_indices.size())
1298 {
1299 // no gaps: setup is simple! Processes send their locally owned
1300 // indices to the dictionary. The dictionary stores the sending
1301 // rank for each index. The dictionary knows exactly
1302 // when it is set up when all indices it is responsible for
1303 // have been processed.
1304
1305 request.reserve(n_dict_procs_in_owned_indices);
1306
1307 // protect the following communication steps using a mutex:
1308 static CollectiveMutex mutex;
1309 CollectiveMutex::ScopedLock lock(mutex, comm);
1310
1311 const int mpi_tag =
1313
1314
1315 // 3) send messages with local dofs to the right dict process
1316 for (const auto &rank_pair : buffers)
1317 {
1318 request.push_back(MPI_Request());
1319 const int ierr = MPI_Isend(rank_pair.second.data(),
1320 rank_pair.second.size() * 2,
1322 rank_pair.first,
1323 mpi_tag,
1324 comm,
1325 &request.back());
1326 AssertThrowMPI(ierr);
1327 }
1328
1329 // 4) receive messages until all dofs in dict are processed
1330 while (this->locally_owned_size != dic_local_received)
1331 {
1332 // wait for an incoming message
1333 MPI_Status status;
1334 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1335 AssertThrowMPI(ierr);
1336
1337 // retrieve size of incoming message
1338 int number_amount;
1339 ierr = MPI_Get_count(&status,
1341 &number_amount);
1342 AssertThrowMPI(ierr);
1343
1344 const auto other_rank = status.MPI_SOURCE;
1345 actually_owning_rank_list.push_back(other_rank);
1346
1347 // receive message
1348 Assert(number_amount % 2 == 0, ExcInternalError());
1349 std::vector<
1350 std::pair<types::global_dof_index, types::global_dof_index>>
1351 buffer(number_amount / 2);
1352 ierr = MPI_Recv(buffer.data(),
1353 number_amount,
1355 status.MPI_SOURCE,
1356 status.MPI_TAG,
1357 comm,
1358 MPI_STATUS_IGNORE);
1359 AssertThrowMPI(ierr);
1360 // process message: loop over all intervals
1361 for (auto interval : buffer)
1362 {
1363 if constexpr (library_build_mode ==
1365 {
1366 for (types::global_dof_index i = interval.first;
1367 i < interval.second;
1368 i++)
1369 Assert(actually_owning_ranks.entry_has_been_set(
1370 i - local_range.first) == false,
1372 Assert(interval.first >= local_range.first &&
1373 interval.first < local_range.second,
1375 Assert(interval.second > local_range.first &&
1376 interval.second <= local_range.second,
1378 }
1379
1380 actually_owning_ranks.fill(interval.first -
1381 local_range.first,
1382 interval.second -
1383 local_range.first,
1384 other_rank);
1385 dic_local_received += interval.second - interval.first;
1386 }
1387 }
1388 }
1389 else
1390 {
1391 // with gap: use a ConsensusAlgorithm to determine when all
1392 // dictionaries have been set up.
1393
1394 // 3/4) use a ConsensusAlgorithm to send messages with local
1395 // dofs to the right dict process
1396
1397 using RequestType = std::vector<
1398 std::pair<types::global_dof_index, types::global_dof_index>>;
1399
1400 ConsensusAlgorithms::selector<RequestType>(
1401 /* targets = */
1402 [&buffers]() {
1403 std::vector<unsigned int> targets;
1404 targets.reserve(buffers.size());
1405 for (const auto &rank_pair : buffers)
1406 targets.emplace_back(rank_pair.first);
1407
1408 return targets;
1409 }(),
1410
1411 /* create_request = */
1412 [&buffers](const unsigned int target_rank) -> RequestType {
1413 return buffers.at(target_rank);
1414 },
1415
1416 /* process_request = */
1417 [&](const unsigned int source_rank,
1418 const RequestType &request) -> void {
1419 // process message: loop over all intervals
1420 for (auto interval : request)
1421 {
1422 if constexpr (library_build_mode ==
1424 {
1425 for (types::global_dof_index i = interval.first;
1426 i < interval.second;
1427 i++)
1428 Assert(
1429 actually_owning_ranks.entry_has_been_set(
1430 i - local_range.first) == false,
1431 ExcMessage(
1432 "Multiple processes seem to own the same global index. "
1433 "A possible reason is that the sets of locally owned "
1434 "indices are not distinct."));
1435 Assert(interval.first < interval.second,
1437 Assert(
1438 local_range.first <= interval.first &&
1439 interval.second <= local_range.second,
1440 ExcMessage(
1441 "The specified interval is not handled by the current process."));
1442 }
1443 actually_owning_ranks.fill(interval.first -
1444 local_range.first,
1445 interval.second -
1446 local_range.first,
1447 source_rank);
1448 }
1449 actually_owning_rank_list.push_back(source_rank);
1450 },
1451
1452 comm);
1453 }
1454
1455 std::sort(actually_owning_rank_list.begin(),
1457
1458 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
1462
1463 // 5) make sure that all messages have been sent
1464 if (request.size() > 0)
1465 {
1466 const int ierr = MPI_Waitall(request.size(),
1467 request.data(),
1468 MPI_STATUSES_IGNORE);
1469 AssertThrowMPI(ierr);
1470 }
1471
1472#else
1473 Assert(buffers.empty(), ExcInternalError());
1474 (void)comm;
1475 (void)dic_local_received;
1476#endif
1477 }
1478
1479
1480
1481 void
1482 Dictionary::partition(const IndexSet &owned_indices,
1483 const MPI_Comm comm)
1484 {
1485 const unsigned int n_procs = n_mpi_processes(comm);
1486 const unsigned int my_rank = this_mpi_process(comm);
1487
1489
1491
1493 std::max<types::global_dof_index>((size + n_procs - 1) / n_procs,
1495
1497 std::max<unsigned int>(dofs_per_process * n_procs / size, 1);
1498
1499 local_range.first = get_index_offset(my_rank);
1500 local_range.second = get_index_offset(my_rank + 1);
1501
1503 }
1504
1505
1506 ConsensusAlgorithmsPayload::ConsensusAlgorithmsPayload(
1507 const IndexSet &owned_indices,
1509 const MPI_Comm comm,
1510 std::vector<unsigned int> &owning_ranks,
1511 const bool track_index_requesters)
1514 , comm(comm)
1521 {}
1522
1523
1524
1525 std::vector<unsigned int>
1526 ConsensusAlgorithmsPayload::compute_targets()
1527 {
1528 std::vector<unsigned int> targets;
1529
1531 unsigned int index = 0;
1532 unsigned int owner_index_guess = 0;
1533 for (auto i : indices_to_look_up)
1534 {
1535 unsigned int other_rank = dict.dof_to_dict_rank(i);
1536 if (other_rank == my_rank)
1537 {
1539 dict.actually_owning_ranks[i - dict.local_range.first];
1541 append_index_origin(i - dict.local_range.first,
1542 my_rank,
1543 owning_ranks[index],
1544 owner_index_guess);
1545 }
1546 else
1547 {
1548 if (targets.empty() || targets.back() != other_rank)
1549 targets.push_back(other_rank);
1550 auto &indices = indices_to_look_up_by_dict_rank[other_rank];
1551 indices.first.push_back(i);
1552 indices.second.push_back(index);
1553 }
1554 ++index;
1555 }
1556
1557 Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
1558 ExcMessage("Size does not match!"));
1559
1560 return targets;
1561 }
1562
1563
1564
1565 std::vector<std::pair<types::global_dof_index, types::global_dof_index>>
1566 ConsensusAlgorithmsPayload::create_request(
1567 const unsigned int other_rank)
1568 {
1569 std::vector<
1570 std::pair<types::global_dof_index, types::global_dof_index>>
1571 send_buffer;
1572
1573 // create index set and compress data to be sent
1574 auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
1575 IndexSet is(dict.size);
1576 is.add_indices(indices_i.begin(), indices_i.end());
1577 is.compress();
1578
1579 for (auto interval = is.begin_intervals();
1580 interval != is.end_intervals();
1581 ++interval)
1582 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
1583
1584 return send_buffer;
1585 }
1586
1587
1588
1589 std::vector<unsigned int>
1590 ConsensusAlgorithmsPayload::answer_request(
1591 const unsigned int other_rank,
1592 const std::vector<std::pair<types::global_dof_index,
1593 types::global_dof_index>> &buffer_recv)
1594 {
1595 std::vector<unsigned int> request_buffer;
1596
1597 unsigned int owner_index_guess = 0;
1598 for (const auto &interval : buffer_recv)
1599 for (auto i = interval.first; i < interval.second; ++i)
1600 {
1601 const unsigned int actual_owner =
1602 dict.actually_owning_ranks[i - dict.local_range.first];
1603 request_buffer.push_back(actual_owner);
1604
1606 append_index_origin(i - dict.local_range.first,
1607 other_rank,
1608 actual_owner,
1609 owner_index_guess);
1610 }
1611
1612 return request_buffer;
1613 }
1614
1615
1616
1617 void
1618 ConsensusAlgorithmsPayload::process_answer(
1619 const unsigned int other_rank,
1620 const std::vector<unsigned int> &recv_buffer)
1621 {
1622 const auto &recv_indices =
1623 indices_to_look_up_by_dict_rank[other_rank].second;
1624 AssertDimension(recv_indices.size(), recv_buffer.size());
1625 for (unsigned int j = 0; j < recv_indices.size(); ++j)
1626 owning_ranks[recv_indices[j]] = recv_buffer[j];
1627 }
1628
1629
1630
1631 std::map<unsigned int, IndexSet>
1632 ConsensusAlgorithmsPayload::get_requesters()
1633 {
1635 ExcMessage("Must enable index range tracking in "
1636 "constructor of ConsensusAlgorithmProcess"));
1637
1638 std::map<unsigned int, ::IndexSet> requested_indices;
1639
1640#ifdef DEAL_II_WITH_MPI
1641
1642 static CollectiveMutex mutex;
1643 CollectiveMutex::ScopedLock lock(mutex, comm);
1644
1645 const int mpi_tag = Utilities::MPI::internal::Tags::
1647
1648 // reserve enough slots for the requests ahead; depending on
1649 // whether the owning rank is one of the requesters or not, we
1650 // might have one less requests to execute, so fill the requests
1651 // on demand.
1652 std::vector<MPI_Request> send_requests;
1653 send_requests.reserve(requesters.size());
1654
1655 // We use an integer vector for the data exchange. Since we send
1656 // data associated to intervals with different requesters, we will
1657 // need to send (a) the MPI rank of the requester, (b) the number
1658 // of intervals directed to this requester, and (c) a list of
1659 // intervals, i.e., two integers per interval. The number of items
1660 // sent in total can be deduced both via the MPI status message at
1661 // the receiver site as well as be counting the buckets from
1662 // different requesters.
1663 std::vector<std::vector<types::global_dof_index>> send_data(
1664 requesters.size());
1665 for (unsigned int i = 0; i < requesters.size(); ++i)
1666 {
1667 // special code for our own indices
1668 if (dict.actually_owning_rank_list[i] == my_rank)
1669 {
1670 for (const auto &j : requesters[i])
1671 {
1672 const types::global_dof_index index_offset =
1673 dict.get_index_offset(my_rank);
1674 IndexSet &my_index_set = requested_indices[j.first];
1675 my_index_set.set_size(owned_indices.size());
1676 for (const auto &interval : j.second)
1677 my_index_set.add_range(index_offset + interval.first,
1678 index_offset + interval.second);
1679 }
1680 }
1681 else
1682 {
1683 for (const auto &j : requesters[i])
1684 {
1685 send_data[i].push_back(j.first);
1686 send_data[i].push_back(j.second.size());
1687 for (const auto &interval : j.second)
1688 {
1689 send_data[i].push_back(interval.first);
1690 send_data[i].push_back(interval.second);
1691 }
1692 }
1693 send_requests.push_back(MPI_Request());
1694 const int ierr =
1695 MPI_Isend(send_data[i].data(),
1696 send_data[i].size(),
1699 dict.actually_owning_rank_list[i],
1700 mpi_tag,
1701 comm,
1702 &send_requests.back());
1703 AssertThrowMPI(ierr);
1704 }
1705 }
1706
1707 // receive the data
1708 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
1709 {
1710 // wait for an incoming message
1711 MPI_Status status;
1712 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1713 AssertThrowMPI(ierr);
1714
1715 // retrieve size of incoming message
1716 int number_amount;
1717 ierr = MPI_Get_count(
1718 &status,
1719 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1720 &number_amount);
1721 AssertThrowMPI(ierr);
1722
1723 // receive message
1724 Assert(number_amount % 2 == 0, ExcInternalError());
1725 std::vector<
1726 std::pair<types::global_dof_index, types::global_dof_index>>
1727 buffer(number_amount / 2);
1728 ierr = MPI_Recv(
1729 buffer.data(),
1730 number_amount,
1731 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1732 status.MPI_SOURCE,
1733 status.MPI_TAG,
1734 comm,
1735 &status);
1736 AssertThrowMPI(ierr);
1737
1738 // unpack the message and translate the dictionary-local
1739 // indices coming via MPI to the global index range
1740 const types::global_dof_index index_offset =
1741 dict.get_index_offset(status.MPI_SOURCE);
1742 unsigned int offset = 0;
1743 while (offset < buffer.size())
1744 {
1745 AssertIndexRange(offset + buffer[offset].second,
1746 buffer.size());
1747
1748 IndexSet my_index_set(owned_indices.size());
1749 for (unsigned int i = offset + 1;
1750 i < offset + buffer[offset].second + 1;
1751 ++i)
1752 my_index_set.add_range(index_offset + buffer[i].first,
1753 index_offset + buffer[i].second);
1754
1755 // the underlying index set is able to merge ranges coming
1756 // from different ranks due to the partitioning in the
1757 // dictionary
1758 IndexSet &index_set = requested_indices[buffer[offset].first];
1759 if (index_set.size() == 0)
1760 index_set.set_size(owned_indices.size());
1761 index_set.add_indices(my_index_set);
1762
1763 offset += buffer[offset].second + 1;
1764 }
1765 AssertDimension(offset, buffer.size());
1766 }
1767
1768 if (send_requests.size() > 0)
1769 {
1770 const auto ierr = MPI_Waitall(send_requests.size(),
1771 send_requests.data(),
1772 MPI_STATUSES_IGNORE);
1773 AssertThrowMPI(ierr);
1774 }
1775
1776
1777 if constexpr (running_in_debug_mode())
1778 {
1779 for (const auto &it : requested_indices)
1780 {
1781 IndexSet copy_set = it.second;
1782 copy_set.subtract_set(owned_indices);
1783 Assert(copy_set.n_elements() == 0,
1785 "The indices requested from the current "
1786 "MPI rank should be locally owned here!"));
1787 }
1788 }
1789
1790#endif // DEAL_II_WITH_MPI
1791
1792 return requested_indices;
1793 }
1794
1795
1796
1797 void
1798 ConsensusAlgorithmsPayload::append_index_origin(
1799 const types::global_dof_index index_within_dict,
1800 const unsigned int rank_of_request,
1801 const unsigned int rank_of_owner,
1802 unsigned int &owner_index_guess)
1803 {
1804 // remember who requested which index. We want to use an
1805 // std::vector with simple addressing, via a good guess from the
1806 // preceding index, rather than std::map, because this is an inner
1807 // loop and it avoids the map lookup in every iteration
1808 owner_index_guess =
1809 dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
1810
1811 auto &request = requesters[owner_index_guess];
1812 if (request.empty() || request.back().first != rank_of_request)
1813 request.emplace_back(
1814 rank_of_request,
1815 std::vector<
1816 std::pair<types::global_dof_index, types::global_dof_index>>());
1817
1818 auto &intervals = request.back().second;
1819 if (intervals.empty() || intervals.back().second != index_within_dict)
1820 intervals.emplace_back(index_within_dict, index_within_dict + 1);
1821 else
1822 ++intervals.back().second;
1823 }
1824
1825 } // namespace ComputeIndexOwner
1826 } // namespace
1827
1828
1829
1830 std::vector<unsigned int>
1833 const MPI_Comm comm)
1834 {
1836 ExcMessage("IndexSets have to have the same sizes."));
1837
1838 Assert(
1840 ExcMessage("IndexSets have to have the same size on all processes."));
1841
1842 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1843
1844 // Step 1: setup dictionary
1845 // The input owned_indices can be partitioned arbitrarily. In the
1846 // dictionary, the index set is statically repartitioned among the
1847 // processes again and extended with information with the actual owner
1848 // of that the index.
1849 ComputeIndexOwner::ConsensusAlgorithmsPayload process(
1852 comm,
1854 /* keep track of requesters = */ false);
1855
1856 // Step 2: read dictionary
1857 // Communicate with the process who owns the index in the static
1858 // partition (i.e. in the dictionary). This process returns the actual
1859 // owner of the index.
1860 using RequestType =
1861 ComputeIndexOwner::ConsensusAlgorithmsPayload::RequestType;
1862 using AnswerType =
1863 ComputeIndexOwner::ConsensusAlgorithmsPayload::AnswerType;
1864 ConsensusAlgorithms::selector<RequestType, AnswerType>(
1865 process.compute_targets(),
1866 [&process](const unsigned int other_rank) -> RequestType {
1867 return process.create_request(other_rank);
1868 },
1869 [&process](const unsigned int other_rank, const RequestType &r)
1870 -> AnswerType { return process.answer_request(other_rank, r); },
1871 [&process](const unsigned int other_rank, const AnswerType &a) -> void {
1872 process.process_answer(other_rank, a);
1873 },
1874 comm);
1875
1876 return owning_ranks;
1877 }
1878
1879
1880
1881 std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
1884 const MPI_Comm &comm)
1885 {
1887 ExcMessage("IndexSets have to have the same sizes."));
1888
1889 Assert(
1891 ExcMessage("IndexSets have to have the same size on all processes."));
1892
1893 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1894
1895 // Step 1: setup dictionary
1896 // The input owned_indices can be partitioned arbitrarily. In the
1897 // dictionary, the index set is statically repartitioned among the
1898 // processes again and extended with information with the actual owner
1899 // of that the index.
1900 ComputeIndexOwner::ConsensusAlgorithmsPayload process(
1902
1903 // Step 2: read dictionary
1904 // Communicate with the process who owns the index in the static
1905 // partition (i.e. in the dictionary). This process returns the actual
1906 // owner of the index.
1907 using RequestType =
1908 ComputeIndexOwner::ConsensusAlgorithmsPayload::RequestType;
1909 using AnswerType =
1910 ComputeIndexOwner::ConsensusAlgorithmsPayload::AnswerType;
1911 ConsensusAlgorithms::selector<RequestType, AnswerType>(
1912 process.compute_targets(),
1913 [&process](const unsigned int other_rank) -> RequestType {
1914 return process.create_request(other_rank);
1915 },
1916 [&process](const unsigned int other_rank, const RequestType &r)
1917 -> AnswerType { return process.answer_request(other_rank, r); },
1918 [&process](const unsigned int other_rank, const AnswerType &a) -> void {
1919 process.process_answer(other_rank, a);
1920 },
1921 comm);
1922
1923 return {owning_ranks, process.get_requesters()};
1924 }
1925
1926
1927
1928 namespace internal
1929 {
1930 namespace CollectiveMutexImplementation
1931 {
1936 void
1937 check_exception()
1938 {
1939#ifdef DEAL_II_WITH_MPI
1940 if (std::uncaught_exceptions() > 0)
1941 {
1942 std::cerr
1943 << "---------------------------------------------------------\n"
1944 << "An exception was thrown inside a section of the program\n"
1945 << "guarded by a CollectiveMutex.\n"
1946 << "Because a CollectiveMutex guards critical communication\n"
1947 << "handling the exception would likely\n"
1948 << "deadlock because only the current process is aware of the\n"
1949 << "exception. To prevent this deadlock, the program will be\n"
1950 << "aborted.\n"
1951 << "---------------------------------------------------------"
1952 << std::endl;
1953
1954 MPI_Abort(MPI_COMM_WORLD, 1);
1955 }
1956#endif
1957 }
1958 } // namespace CollectiveMutexImplementation
1959 } // namespace internal
1960
1961
1962
1963 CollectiveMutex::CollectiveMutex()
1964 : locked(false)
1965 , request(MPI_REQUEST_NULL)
1966 {
1968 }
1969
1970
1971
1973 {
1974 // First check if this destructor is called during exception handling
1975 // if so, abort.
1977
1978 Assert(
1979 !locked,
1980 ExcMessage(
1981 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1982
1984 }
1985
1986
1987
1988 void
1990 {
1991 Assert(
1992 !locked,
1993 ExcMessage(
1994 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1995
1996#ifdef DEAL_II_WITH_MPI
1997
1998 if (job_supports_mpi())
1999 {
2000 // TODO: For now, we implement this mutex with a blocking barrier in
2001 // the lock and unlock. It needs to be tested, if we can move to a
2002 // nonblocking barrier (code disabled below).
2003
2004 const int ierr = MPI_Barrier(comm);
2005 AssertThrowMPI(ierr);
2006
2007# if 0
2008 // wait for non-blocking barrier to finish. This is a noop the
2009 // first time we lock().
2010 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2011 AssertThrowMPI(ierr);
2012# else
2013 // nothing to do as blocking barrier already completed
2014# endif
2015 }
2016#else
2017 (void)comm;
2018#endif
2019
2020 locked = true;
2021 }
2022
2023
2024
2025 void
2027 {
2028 // First check if this function is called during exception handling
2029 // if so, abort. This can happen if a ScopedLock is destroyed.
2031
2032 Assert(
2033 locked,
2034 ExcMessage(
2035 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
2036
2037#ifdef DEAL_II_WITH_MPI
2038
2039 if (job_supports_mpi())
2040 {
2041 // TODO: For now, we implement this mutex with a blocking barrier
2042 // in the lock and unlock. It needs to be tested, if we can move
2043 // to a nonblocking barrier (code disabled below):
2044# if 0
2045 const int ierr = MPI_Ibarrier(comm, &request);
2046 AssertThrowMPI(ierr);
2047# else
2048 const int ierr = MPI_Barrier(comm);
2049 AssertThrowMPI(ierr);
2050# endif
2051 }
2052#else
2053 (void)comm;
2054#endif
2055
2056 locked = false;
2057 }
2058
2059
2060#ifndef DOXYGEN
2061 // explicit instantiations
2062
2063 // booleans aren't in MPI_SCALARS
2064 template bool
2065 reduce(const bool &,
2066 const MPI_Comm,
2067 const std::function<bool(const bool &, const bool &)> &,
2068 const unsigned int);
2069
2070 template std::vector<bool>
2071 reduce(const std::vector<bool> &,
2072 const MPI_Comm,
2073 const std::function<std::vector<bool>(const std::vector<bool> &,
2074 const std::vector<bool> &)> &,
2075 const unsigned int);
2076
2077 template bool
2078 all_reduce(const bool &,
2079 const MPI_Comm,
2080 const std::function<bool(const bool &, const bool &)> &);
2081
2082 template std::vector<bool>
2083 all_reduce(
2084 const std::vector<bool> &,
2085 const MPI_Comm,
2086 const std::function<std::vector<bool>(const std::vector<bool> &,
2087 const std::vector<bool> &)> &);
2088
2089 // We need an explicit instantiation of this for the same reason as the
2090 // other types described in mpi.inst.in
2091 template void
2092 internal::all_reduce<bool>(const MPI_Op &,
2093 const ArrayView<const bool> &,
2094 const MPI_Comm,
2095 const ArrayView<bool> &);
2096
2097
2098 template bool
2099 logical_or<bool>(const bool &, const MPI_Comm);
2100
2101
2102 template void
2103 logical_or<bool>(const ArrayView<const bool> &,
2104 const MPI_Comm,
2105 const ArrayView<bool> &);
2106
2107
2108 template std::vector<unsigned int>
2109 compute_set_union(const std::vector<unsigned int> &vec,
2110 const MPI_Comm comm);
2111
2112
2113 template std::set<unsigned int>
2114 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
2115#endif
2116
2117#include "base/mpi.inst"
2118 } // end of namespace MPI
2119} // end of namespace Utilities
2120
value_type * data() const noexcept
Definition array_view.h:666
std::size_t size() const
Definition array_view.h:689
IntervalIterator end_intervals() const
Definition index_set.h:1754
size_type size() const
Definition index_set.h:1787
size_type n_elements() const
Definition index_set.h:1945
void set_size(const size_type size)
Definition index_set.h:1775
IntervalIterator begin_intervals() const
Definition index_set.h:1742
void subtract_set(const IndexSet &other)
Definition index_set.cc:478
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1814
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1842
static void unregister_request(MPI_Request &request)
static void register_request(MPI_Request &request)
static Signals signals
void lock(const MPI_Comm comm)
Definition mpi.cc:1989
void unlock(const MPI_Comm comm)
Definition mpi.cc:2026
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition mpi.cc:678
constexpr LibraryBuildMode library_build_mode
Definition config.h:63
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition config.h:595
constexpr bool running_in_debug_mode()
Definition config.h:73
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition config.h:638
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1215
InitializeLibrary
const unsigned int my_rank
Definition mpi.cc:929
const IndexSet & indices_to_look_up
Definition mpi.cc:919
std::vector< index_type > data
Definition mpi.cc:746
std::pair< types::global_dof_index, types::global_dof_index > local_range
Definition mpi.cc:826
std::vector< unsigned int > actually_owning_rank_list
Definition mpi.cc:810
static constexpr unsigned int range_minimum_grain_size
Definition mpi.cc:780
static constexpr unsigned int sparsity_factor
Definition mpi.cc:788
unsigned int stride_small_size
Definition mpi.cc:851
bool use_vector
Definition mpi.cc:744
Dictionary dict
Definition mpi.cc:955
std::map< std::size_t, index_type > data_map
Definition mpi.cc:747
std::vector< unsigned int > & owning_ranks
Definition mpi.cc:950
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > > > requesters
Definition mpi.cc:970
std::size_t size
Definition mpi.cc:745
const MPI_Comm comm
Definition mpi.cc:924
const IndexSet & owned_indices
Definition mpi.cc:913
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
Definition mpi.cc:980
static const index_type invalid_index_value
Definition mpi.cc:719
const bool track_index_requesters
Definition mpi.cc:943
FlexibleIndexStorage actually_owning_ranks
Definition mpi.cc:804
types::global_dof_index locally_owned_size
Definition mpi.cc:833
unsigned int n_dict_procs_in_owned_indices
Definition mpi.cc:844
const unsigned int n_procs
Definition mpi.cc:935
types::global_dof_index dofs_per_process
Definition mpi.cc:818
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition mpi_tags.h:57
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition mpi_tags.h:81
@ dictionary_reinit
Dictionary::reinit()
Definition mpi_tags.h:78
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:171
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition mpi.cc:1882
T sum(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:215
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:99
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1831
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:200
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:259
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:692
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1634
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:161
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:130
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:371
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:73
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:49
constexpr unsigned int invalid_unsigned_int
Definition types.h:232
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int global_dof_index
Definition types.h:90
boost::signals2::signal< void()> at_mpi_finalize
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:111