deal.II version GIT relicensing-2287-g6548a49e0a 2024-12-20 18:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.h>
19#include <deal.II/base/mpi.templates.h>
24
27
28#include <boost/serialization/utility.hpp>
29
30#include <iostream>
31#include <limits>
32#include <numeric>
33#include <set>
34#include <vector>
35
37
38
39namespace Utilities
40{
43 const unsigned int my_partition_id,
44 const unsigned int n_partitions,
45 const types::global_dof_index total_size)
46 {
47 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
48 "IndexSet::size_type must match types::global_dof_index for "
49 "using this function");
50 const unsigned int remain = total_size % n_partitions;
51
52 const IndexSet::size_type min_size = total_size / n_partitions;
53
54 const IndexSet::size_type begin =
55 min_size * my_partition_id + std::min(my_partition_id, remain);
56 const IndexSet::size_type end =
57 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
58 IndexSet result(total_size);
59 result.add_range(begin, end);
60 return result;
61 }
62
63 namespace MPI
64 {
65 MinMaxAvg
66 min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
67 {
68 MinMaxAvg result;
71 mpi_communicator);
72
73 return result;
74 }
75
76
77
78 std::vector<MinMaxAvg>
79 min_max_avg(const std::vector<double> &my_values,
80 const MPI_Comm mpi_communicator)
81 {
82 std::vector<MinMaxAvg> results(my_values.size());
83 min_max_avg(my_values, results, mpi_communicator);
84
85 return results;
86 }
87
88
89
90#ifdef DEAL_II_WITH_MPI
91 unsigned int
92 n_mpi_processes(const MPI_Comm mpi_communicator)
93 {
94 if (job_supports_mpi())
95 {
96 int n_jobs = 1;
97 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
98 AssertThrowMPI(ierr);
99 return n_jobs;
100 }
101 else
102 return 1;
103 }
104
105
106 unsigned int
107 this_mpi_process(const MPI_Comm mpi_communicator)
108 {
109 if (job_supports_mpi())
110 {
111 int rank = 0;
112 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
113 AssertThrowMPI(ierr);
114 return rank;
115 }
116 else
117 return 0;
118 }
119
120
121
122 std::vector<unsigned int>
124 const MPI_Comm comm_small)
125 {
127 return std::vector<unsigned int>{0};
128
129 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
130 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
131
132 std::vector<unsigned int> ranks(size);
133 const int ierr = MPI_Allgather(
134 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
135 AssertThrowMPI(ierr);
136
137 return ranks;
138 }
139
140
141
143 duplicate_communicator(const MPI_Comm mpi_communicator)
144 {
145 MPI_Comm new_communicator;
146 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
147 AssertThrowMPI(ierr);
148 return new_communicator;
149 }
150
151
152
153 void
154 free_communicator(MPI_Comm mpi_communicator)
155 {
156 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
157 const int ierr = MPI_Comm_free(&mpi_communicator);
158 AssertThrowMPI(ierr);
159 }
160
161
162
163 std::vector<IndexSet>
165 const MPI_Comm comm,
167 {
168 static_assert(
169 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
170 "IndexSet::size_type must match types::global_dof_index for "
171 "using this function");
172 const unsigned int n_proc = n_mpi_processes(comm);
173 const std::vector<IndexSet::size_type> sizes =
175 const auto total_size =
176 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
177
178 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
179
180 IndexSet::size_type begin = 0;
181 for (unsigned int i = 0; i < n_proc; ++i)
182 {
183 res[i].add_range(begin, begin + sizes[i]);
184 begin = begin + sizes[i];
185 }
186
187 return res;
188 }
189
190
191
194 const MPI_Comm comm,
195 const types::global_dof_index total_size)
196 {
197 const unsigned int this_proc = this_mpi_process(comm);
198 const unsigned int n_proc = n_mpi_processes(comm);
199
201 n_proc,
202 total_size);
203 }
204
205
206
207 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
208 create_mpi_data_type_n_bytes(const std::size_t n_bytes)
209 {
210 MPI_Datatype result;
211 int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
212 AssertThrowMPI(ierr);
213 ierr = MPI_Type_commit(&result);
214 AssertThrowMPI(ierr);
215
216# ifdef DEBUG
217 MPI_Count size64;
218 ierr = MPI_Type_size_x(result, &size64);
219 AssertThrowMPI(ierr);
220
221 Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
222# endif
223
224 // Now put the new data type into a std::unique_ptr with a custom
225 // deleter. We call the std::unique_ptr constructor that as first
226 // argument takes a pointer (here, a pointer to a copy of the `result`
227 // object, and as second argument a pointer-to-function, for which
228 // we here use a lambda function without captures that acts as the
229 // 'deleter' object: it calls `MPI_Type_free` and then deletes the
230 // pointer. To avoid a compiler warning about a null this pointer
231 // in the lambda (which don't make sense: the lambda doesn't store
232 // anything), we create the deleter first.
233 auto deleter = [](MPI_Datatype *p) {
234 if (p != nullptr)
235 {
236 [[maybe_unused]] const int ierr = MPI_Type_free(p);
237
238 AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
239
240 delete p;
241 }
242 };
243
244 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
245 new MPI_Datatype(result), deleter);
246 }
247
248
249
250 std::vector<unsigned int>
252 const MPI_Comm mpi_comm,
253 const std::vector<unsigned int> &destinations)
254 {
255 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
256 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
257
258# ifdef DEBUG
259 for (const unsigned int destination : destinations)
260 AssertIndexRange(destination, n_procs);
261# endif
262
263 // Have a little function that checks if destinations provided
264 // to the current process are unique. The way it does this is
265 // to create a sorted list of destinations and then walk through
266 // the list and look at successive elements -- if we find the
267 // same number twice, we know that the destinations were not
268 // unique
269 const bool my_destinations_are_unique = [destinations]() {
270 if (destinations.empty())
271 return true;
272 else
273 {
274 std::vector<unsigned int> my_destinations = destinations;
275 std::sort(my_destinations.begin(), my_destinations.end());
276 return (std::adjacent_find(my_destinations.begin(),
277 my_destinations.end()) ==
278 my_destinations.end());
279 }
280 }();
281
282 // If all processes report that they have unique destinations,
283 // then we can short-cut the process using a consensus algorithm (which
284 // is implemented only for the case of unique destinations):
285 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
286 1)
287 {
288 return ConsensusAlgorithms::nbx<char, char>(
289 destinations, {}, {}, {}, mpi_comm);
290 }
291
292 // So we need to run a different algorithm, specifically one that
293 // requires more memory -- MPI_Reduce_scatter_block will require memory
294 // proportional to the number of processes involved; that function is
295 // available for MPI 2.2 or later:
296 static CollectiveMutex mutex;
297 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
298
299 const int mpi_tag =
301
302 // Calculate the number of messages to send to each process
303 std::vector<unsigned int> dest_vector(n_procs);
304 for (const auto &el : destinations)
305 ++dest_vector[el];
306
307 // Find how many processes will send to this one
308 // by reducing with sum and then scattering the
309 // results over all processes
310 unsigned int n_recv_from;
311 const int ierr = MPI_Reduce_scatter_block(
312 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
313
314 AssertThrowMPI(ierr);
315
316 // Send myid to every process in `destinations` vector...
317 std::vector<MPI_Request> send_requests(destinations.size());
318 for (const auto &el : destinations)
319 {
320 const int ierr =
321 MPI_Isend(&myid,
322 1,
323 MPI_UNSIGNED,
324 el,
325 mpi_tag,
326 mpi_comm,
327 send_requests.data() + (&el - destinations.data()));
328 AssertThrowMPI(ierr);
329 }
330
331
332 // Receive `n_recv_from` times from the processes
333 // who communicate with this one. Store the obtained id's
334 // in the resulting vector
335 std::vector<unsigned int> origins(n_recv_from);
336 for (auto &el : origins)
337 {
338 const int ierr = MPI_Recv(&el,
339 1,
340 MPI_UNSIGNED,
341 MPI_ANY_SOURCE,
342 mpi_tag,
343 mpi_comm,
344 MPI_STATUS_IGNORE);
345 AssertThrowMPI(ierr);
346 }
347
348 if (destinations.size() > 0)
349 {
350 const int ierr = MPI_Waitall(destinations.size(),
351 send_requests.data(),
352 MPI_STATUSES_IGNORE);
353 AssertThrowMPI(ierr);
354 }
355
356 return origins;
357 }
358
359
360
361 unsigned int
363 const MPI_Comm mpi_comm,
364 const std::vector<unsigned int> &destinations)
365 {
366 // Have a little function that checks if destinations provided
367 // to the current process are unique:
368 const bool my_destinations_are_unique = [destinations]() {
369 std::vector<unsigned int> my_destinations = destinations;
370 const unsigned int n_destinations = my_destinations.size();
371 std::sort(my_destinations.begin(), my_destinations.end());
372 my_destinations.erase(std::unique(my_destinations.begin(),
373 my_destinations.end()),
374 my_destinations.end());
375 return (my_destinations.size() == n_destinations);
376 }();
377
378 // If all processes report that they have unique destinations,
379 // then we can short-cut the process using a consensus algorithm:
380
381 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
382 1)
383 {
384 return ConsensusAlgorithms::nbx<char, char>(
385 destinations, {}, {}, {}, mpi_comm)
386 .size();
387 }
388 else
389 {
390 const unsigned int n_procs =
392
393# ifdef DEBUG
394 for (const unsigned int destination : destinations)
395 {
396 AssertIndexRange(destination, n_procs);
397 Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
399 "There is no point in communicating with ourselves."));
400 }
401# endif
402
403 // Calculate the number of messages to send to each process
404 std::vector<unsigned int> dest_vector(n_procs);
405 for (const auto &el : destinations)
406 ++dest_vector[el];
407
408 // Find out how many processes will send to this one
409 // MPI_Reduce_scatter(_block) does exactly this
410 unsigned int n_recv_from = 0;
411
412 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
413 &n_recv_from,
414 1,
415 MPI_UNSIGNED,
416 MPI_SUM,
417 mpi_comm);
418
419 AssertThrowMPI(ierr);
420
421 return n_recv_from;
422 }
423 }
424
425
426
427 namespace
428 {
429 // custom MIP_Op for calculate_collective_mpi_min_max_avg
430 void
431 max_reduce(const void *in_lhs_,
432 void *inout_rhs_,
433 int *len,
434 MPI_Datatype *)
435 {
436 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
437 MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
438
439 for (int i = 0; i < *len; ++i)
440 {
441 inout_rhs[i].sum += in_lhs[i].sum;
442 if (inout_rhs[i].min > in_lhs[i].min)
443 {
444 inout_rhs[i].min = in_lhs[i].min;
445 inout_rhs[i].min_index = in_lhs[i].min_index;
446 }
447 else if (inout_rhs[i].min == in_lhs[i].min)
448 {
449 // choose lower cpu index when tied to make operator commutative
450 if (inout_rhs[i].min_index > in_lhs[i].min_index)
451 inout_rhs[i].min_index = in_lhs[i].min_index;
452 }
453
454 if (inout_rhs[i].max < in_lhs[i].max)
455 {
456 inout_rhs[i].max = in_lhs[i].max;
457 inout_rhs[i].max_index = in_lhs[i].max_index;
458 }
459 else if (inout_rhs[i].max == in_lhs[i].max)
460 {
461 // choose lower cpu index when tied to make operator commutative
462 if (inout_rhs[i].max_index > in_lhs[i].max_index)
463 inout_rhs[i].max_index = in_lhs[i].max_index;
464 }
465 }
466 }
467 } // namespace
468
469
470
471 void
473 const ArrayView<MinMaxAvg> &result,
474 const MPI_Comm mpi_communicator)
475 {
476 // If MPI was not started, we have a serial computation and cannot run
477 // the other MPI commands
478 if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
479 {
480 for (unsigned int i = 0; i < my_values.size(); ++i)
481 {
482 result[i].sum = my_values[i];
483 result[i].avg = my_values[i];
484 result[i].min = my_values[i];
485 result[i].max = my_values[i];
486 result[i].min_index = 0;
487 result[i].max_index = 0;
488 }
489 return;
490 }
491
492 /*
493 * A custom MPI datatype handle describing the memory layout of the
494 * MinMaxAvg struct. Initialized on first pass control reaches the
495 * static variable. So hopefully not initialized too early.
496 */
497 static MPI_Datatype type = []() {
498 MPI_Datatype type;
499
500 int lengths[] = {3, 2, 1};
501
502 MPI_Aint displacements[] = {0,
503 offsetof(MinMaxAvg, min_index),
504 offsetof(MinMaxAvg, avg)};
505
506 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
507
508 int ierr =
509 MPI_Type_create_struct(3, lengths, displacements, types, &type);
510 AssertThrowMPI(ierr);
511
512 ierr = MPI_Type_commit(&type);
513 AssertThrowMPI(ierr);
514
515 /* Ensure that we free the allocated datatype again at the end of
516 * the program run just before we call MPI_Finalize():*/
517 InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
518 int ierr = MPI_Type_free(&type);
519 AssertThrowMPI(ierr);
520 });
521
522 return type;
523 }();
524
525 /*
526 * A custom MPI op handle for our max_reduce function.
527 * Initialized on first pass control reaches the static variable. So
528 * hopefully not initialized too early.
529 */
530 static MPI_Op op = []() {
531 MPI_Op op;
532
533 int ierr =
534 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
535 static_cast<int>(true),
536 &op);
537 AssertThrowMPI(ierr);
538
539 /* Ensure that we free the allocated op again at the end of the
540 * program run just before we call MPI_Finalize():*/
541 InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
542 int ierr = MPI_Op_free(&op);
543 AssertThrowMPI(ierr);
544 });
545
546 return op;
547 }();
548
549 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
550 Utilities::MPI::max(my_values.size(), mpi_communicator));
551
552 AssertDimension(my_values.size(), result.size());
553
554 // To avoid uninitialized values on some MPI implementations, provide
555 // result with a default value already...
556 MinMaxAvg dummy = {0.,
557 std::numeric_limits<double>::max(),
558 std::numeric_limits<double>::lowest(),
559 0,
560 0,
561 0.};
562
563 for (auto &i : result)
564 i = dummy;
565
566 const unsigned int my_id =
567 ::Utilities::MPI::this_mpi_process(mpi_communicator);
568 const unsigned int numproc =
569 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
570
571 std::vector<MinMaxAvg> in(my_values.size());
572
573 for (unsigned int i = 0; i < my_values.size(); ++i)
574 {
575 in[i].sum = in[i].min = in[i].max = my_values[i];
576 in[i].min_index = in[i].max_index = my_id;
577 }
578
579 int ierr = MPI_Allreduce(
580 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
581 AssertThrowMPI(ierr);
582
583 for (auto &r : result)
584 r.avg = r.sum / numproc;
585 }
586
587
588#else
589
590 unsigned int
592 {
593 return 1;
594 }
595
596
597
598 unsigned int
600 {
601 return 0;
602 }
603
604
605
606 std::vector<unsigned int>
608 {
609 return std::vector<unsigned int>{0};
610 }
611
612
613
614 std::vector<IndexSet>
616 const MPI_Comm /*comm*/,
618 {
619 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
620 }
621
624 const MPI_Comm /*comm*/,
625 const types::global_dof_index total_size)
626 {
627 return complete_index_set(total_size);
628 }
629
630
631
633 duplicate_communicator(const MPI_Comm mpi_communicator)
634 {
635 return mpi_communicator;
636 }
637
638
639
640 void
641 free_communicator(MPI_Comm /*mpi_communicator*/)
642 {}
643
644
645
646 void
647 min_max_avg(const ArrayView<const double> &my_values,
648 const ArrayView<MinMaxAvg> &result,
649 const MPI_Comm)
650 {
651 AssertDimension(my_values.size(), result.size());
652
653 for (unsigned int i = 0; i < my_values.size(); ++i)
654 {
655 result[i].sum = my_values[i];
656 result[i].avg = my_values[i];
657 result[i].min = my_values[i];
658 result[i].max = my_values[i];
659 result[i].min_index = 0;
660 result[i].max_index = 0;
661 }
662 }
663
664#endif
665
666
668 char **&argv,
669 const unsigned int max_num_threads)
670 : InitFinalize(argc,
671 argv,
675 max_num_threads)
676 {}
677
678
679
680 bool
682 {
683#ifdef DEAL_II_WITH_MPI
684 int MPI_has_been_started = 0;
685 const int ierr = MPI_Initialized(&MPI_has_been_started);
686 AssertThrowMPI(ierr);
687
688 return (MPI_has_been_started > 0);
689#else
690 return false;
691#endif
692 }
693
694
695
696 namespace
697 {
702 namespace ComputeIndexOwner
703 {
704 class FlexibleIndexStorage
705 {
706 public:
707 using index_type = unsigned int;
708 static const index_type invalid_index_value =
710
711 FlexibleIndexStorage(const bool use_vector = true);
712
713 void
714 reinit(const bool use_vector,
715 const bool index_range_contiguous,
716 const std::size_t size);
717
718 void
719 fill(const std::size_t start,
720 const std::size_t end,
721 const index_type &value);
722
723 index_type &
724 operator[](const std::size_t index);
725
726 index_type
727 operator[](const std::size_t index) const;
728
729 bool
730 entry_has_been_set(const std::size_t index) const;
731
732 private:
734 std::size_t size;
735 std::vector<index_type> data;
736 std::map<std::size_t, index_type> data_map;
737 };
738
739
740
746 struct Dictionary
747 {
769 static constexpr unsigned int range_minimum_grain_size = 64;
770
777 static constexpr unsigned int sparsity_factor = 4;
778
779
785 Dictionary(const IndexSet &owned_indices, const MPI_Comm comm);
786
793 FlexibleIndexStorage actually_owning_ranks;
794
799 std::vector<unsigned int> actually_owning_rank_list;
800
808
814 std::pair<types::global_dof_index, types::global_dof_index>
816
823
828
834
840 unsigned int stride_small_size;
841
847 unsigned int
848 dof_to_dict_rank(const types::global_dof_index i);
849
855 get_index_offset(const unsigned int rank);
856
862 unsigned int
863 get_owning_rank_index(const unsigned int rank_in_owned_indices,
864 const unsigned int guess = 0);
865
866 private:
871 void
872 partition(const IndexSet &owned_indices, const MPI_Comm comm);
873 };
874
875
876
883 class ConsensusAlgorithmsPayload
884 {
885 public:
886 using RequestType = std::vector<
887 std::pair<types::global_dof_index, types::global_dof_index>>;
888 using AnswerType = std::vector<unsigned int>;
889
893 ConsensusAlgorithmsPayload(const IndexSet &owned_indices,
894 const IndexSet &indices_to_look_up,
895 const MPI_Comm comm,
896 std::vector<unsigned int> &owning_ranks,
897 const bool track_index_requesters = false);
898
903
909
914
918 const unsigned int my_rank;
919
924 const unsigned int n_procs;
925
933
939 std::vector<unsigned int> &owning_ranks;
940
944 Dictionary dict;
945
955 std::vector<std::vector<
956 std::pair<unsigned int,
957 std::vector<std::pair<types::global_dof_index,
960
966 std::map<unsigned int,
967 std::pair<std::vector<types::global_dof_index>,
968 std::vector<unsigned int>>>
970
974 std::vector<unsigned int>
975 compute_targets();
976
980 std::vector<
981 std::pair<types::global_dof_index, types::global_dof_index>>
982 create_request(const unsigned int other_rank);
983
987 std::vector<unsigned int>
988 answer_request(
989 const unsigned int other_rank,
990 const std::vector<std::pair<types::global_dof_index,
991 types::global_dof_index>> &buffer_recv);
992
997 void
998 process_answer(const unsigned int other_rank,
999 const std::vector<unsigned int> &recv_buffer);
1000
1016 std::map<unsigned int, IndexSet>
1017 get_requesters();
1018
1019 private:
1033 void
1034 append_index_origin(
1035 const types::global_dof_index index_within_dictionary,
1036 const unsigned int rank_of_request,
1037 const unsigned int rank_of_owner,
1038 unsigned int &owner_index_guess);
1039 };
1040
1041 /* ------------------------- inline functions ----------------------- */
1042
1043 inline unsigned int
1044 Dictionary::dof_to_dict_rank(const types::global_dof_index i)
1045 {
1046 // note: this formula is also explicitly used in
1047 // get_index_offset(), so keep the two in sync
1048 return (i / dofs_per_process) * stride_small_size;
1049 }
1050
1051
1053 Dictionary::get_index_offset(const unsigned int rank)
1054 {
1055 return std::min(dofs_per_process *
1056 static_cast<types::global_dof_index>(
1057 (rank + stride_small_size - 1) /
1059 size);
1060 }
1061
1062
1063
1064 inline unsigned int
1065 Dictionary::get_owning_rank_index(
1066 const unsigned int rank_in_owned_indices,
1067 const unsigned int guess)
1068 {
1070 if (actually_owning_rank_list[guess] == rank_in_owned_indices)
1071 return guess;
1072 else
1073 {
1074 auto it = std::lower_bound(actually_owning_rank_list.begin(),
1076 rank_in_owned_indices);
1078 Assert(*it == rank_in_owned_indices, ExcInternalError());
1079 return it - actually_owning_rank_list.begin();
1080 }
1081 }
1082
1083
1084 const FlexibleIndexStorage::index_type
1085 FlexibleIndexStorage::invalid_index_value;
1086
1087
1088
1089 FlexibleIndexStorage::FlexibleIndexStorage(const bool use_vector)
1091 , size(0)
1092 {}
1093
1094
1095
1096 void
1097 FlexibleIndexStorage::reinit(const bool use_vector,
1098 const bool index_range_contiguous,
1099 const std::size_t size)
1100 {
1101 this->use_vector = use_vector;
1102 this->size = size;
1103
1104 data = {};
1105 data_map.clear();
1106
1107 // in case we have contiguous indices, only fill the vector upon
1108 // first request in `fill`
1109 if (use_vector && !index_range_contiguous)
1110 data.resize(size, invalid_index_value);
1111 }
1112
1113
1114
1115 void
1116 FlexibleIndexStorage::fill(
1117 const std::size_t start,
1118 const std::size_t end,
1119 const FlexibleIndexStorage::index_type &value)
1120 {
1121 AssertIndexRange(start, size);
1122 AssertIndexRange(end, size + 1);
1123
1124 if (use_vector)
1125 {
1126 if (data.empty() && end > start)
1127 {
1128 // in debug mode, we want to track whether we set all
1129 // indices, so we first fill an invalid index and only later
1130 // the actual ones, whereas we simply assign the given rank
1131 // to the complete vector the first time we pass around in
1132 // this function in release mode to avoid touching data
1133 // unnecessarily (and overwrite the smaller pieces), as the
1134 // locally owned part comes first
1135#ifdef DEBUG
1136 data.resize(size, invalid_index_value);
1137 std::fill(data.begin() + start, data.begin() + end, value);
1138#else
1139 data.resize(size, value);
1140#endif
1141 }
1142 else
1143 {
1144 AssertDimension(data.size(), size);
1145 std::fill(data.begin() + start, data.begin() + end, value);
1146 }
1147 }
1148 else
1149 {
1150 for (auto i = start; i < end; ++i)
1151 data_map[i] = value;
1152 }
1153 }
1154
1155
1156
1157 FlexibleIndexStorage::index_type &
1158 FlexibleIndexStorage::operator[](const std::size_t index)
1159 {
1160 AssertIndexRange(index, size);
1161
1162 if (use_vector)
1163 {
1164 AssertDimension(data.size(), size);
1165 return data[index];
1166 }
1167 else
1168 {
1169 return data_map.try_emplace(index, invalid_index_value)
1170 .first->second;
1171 }
1172 }
1173
1174
1175
1176 inline bool
1177 FlexibleIndexStorage::entry_has_been_set(const std::size_t index) const
1178 {
1179 AssertIndexRange(index, size);
1180
1181 if (use_vector)
1182 {
1183 if (data.empty())
1184 return false;
1185
1186 AssertDimension(data.size(), size);
1187 return data[index] != invalid_index_value;
1188 }
1189 else
1190 return data_map.find(index) != data_map.end();
1191 }
1192
1193
1194
1195 Dictionary::Dictionary(const IndexSet &owned_indices,
1196 const MPI_Comm comm)
1197 {
1198 // 1) set up the partition
1200
1201 unsigned int my_rank = this_mpi_process(comm);
1202
1203 types::global_dof_index dic_local_received = 0;
1204 std::map<unsigned int,
1205 std::vector<std::pair<types::global_dof_index,
1207 buffers;
1208
1209 const auto owned_indices_size_actual =
1211
1212 actually_owning_ranks.reinit((owned_indices_size_actual *
1214 owned_indices_size_actual ==
1217
1218 // 2) collect relevant processes and process local dict entries
1219 for (auto interval = owned_indices.begin_intervals();
1220 interval != owned_indices.end_intervals();
1221 ++interval)
1222 {
1223 // Due to the granularity of the dictionary, the interval
1224 // might be split into several ranges of processor owner
1225 // ranks. Here, we process the interval by breaking into
1226 // smaller pieces in terms of the dictionary number.
1227 std::pair<types::global_dof_index, types::global_dof_index>
1228 index_range(*interval->begin(), interval->last() + 1);
1229
1230 AssertThrow(index_range.second <= size, ExcInternalError());
1231
1232 while (index_range.first != index_range.second)
1233 {
1234 Assert(index_range.first < index_range.second,
1236
1237 const unsigned int owner =
1238 dof_to_dict_rank(index_range.first);
1239
1240 // this explicitly picks up the formula of
1241 // dof_to_dict_rank, so the two places must be in sync
1242 const types::global_dof_index next_index =
1243 std::min(get_index_offset(owner + 1), index_range.second);
1244
1245 Assert(next_index > index_range.first, ExcInternalError());
1246
1247#ifdef DEBUG
1248 // make sure that the owner is the same on the current
1249 // interval
1250 for (types::global_dof_index i = index_range.first + 1;
1251 i < next_index;
1252 ++i)
1253 AssertDimension(owner, dof_to_dict_rank(i));
1254#endif
1255
1256 // add the interval, either to the local range or into a
1257 // buffer to be sent to another processor
1258 if (owner == my_rank)
1259 {
1260 actually_owning_ranks.fill(index_range.first -
1261 local_range.first,
1262 next_index - local_range.first,
1263 my_rank);
1264 dic_local_received += next_index - index_range.first;
1265 if (actually_owning_rank_list.empty())
1267 }
1268 else
1269 buffers[owner].emplace_back(index_range.first, next_index);
1270
1271 index_range.first = next_index;
1272 }
1273 }
1274
1275#ifdef DEAL_II_WITH_MPI
1276 n_dict_procs_in_owned_indices = buffers.size();
1277 std::vector<MPI_Request> request;
1278
1279 // Check if index set space is partitioned globally without gaps.
1280 if (owned_indices_size_actual == owned_indices.size())
1281 {
1282 // no gaps: setup is simple! Processes send their locally owned
1283 // indices to the dictionary. The dictionary stores the sending
1284 // rank for each index. The dictionary knows exactly
1285 // when it is set up when all indices it is responsible for
1286 // have been processed.
1287
1288 request.reserve(n_dict_procs_in_owned_indices);
1289
1290 // protect the following communication steps using a mutex:
1291 static CollectiveMutex mutex;
1292 CollectiveMutex::ScopedLock lock(mutex, comm);
1293
1294 const int mpi_tag =
1296
1297
1298 // 3) send messages with local dofs to the right dict process
1299 for (const auto &rank_pair : buffers)
1300 {
1301 request.push_back(MPI_Request());
1302 const int ierr = MPI_Isend(rank_pair.second.data(),
1303 rank_pair.second.size() * 2,
1305 rank_pair.first,
1306 mpi_tag,
1307 comm,
1308 &request.back());
1309 AssertThrowMPI(ierr);
1310 }
1311
1312 // 4) receive messages until all dofs in dict are processed
1313 while (this->locally_owned_size != dic_local_received)
1314 {
1315 // wait for an incoming message
1316 MPI_Status status;
1317 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1318 AssertThrowMPI(ierr);
1319
1320 // retrieve size of incoming message
1321 int number_amount;
1322 ierr = MPI_Get_count(&status,
1324 &number_amount);
1325 AssertThrowMPI(ierr);
1326
1327 const auto other_rank = status.MPI_SOURCE;
1328 actually_owning_rank_list.push_back(other_rank);
1329
1330 // receive message
1331 Assert(number_amount % 2 == 0, ExcInternalError());
1332 std::vector<
1333 std::pair<types::global_dof_index, types::global_dof_index>>
1334 buffer(number_amount / 2);
1335 ierr = MPI_Recv(buffer.data(),
1336 number_amount,
1338 status.MPI_SOURCE,
1339 status.MPI_TAG,
1340 comm,
1341 MPI_STATUS_IGNORE);
1342 AssertThrowMPI(ierr);
1343 // process message: loop over all intervals
1344 for (auto interval : buffer)
1345 {
1346# ifdef DEBUG
1347 for (types::global_dof_index i = interval.first;
1348 i < interval.second;
1349 i++)
1350 Assert(actually_owning_ranks.entry_has_been_set(
1351 i - local_range.first) == false,
1353 Assert(interval.first >= local_range.first &&
1354 interval.first < local_range.second,
1356 Assert(interval.second > local_range.first &&
1357 interval.second <= local_range.second,
1359# endif
1360
1361 actually_owning_ranks.fill(interval.first -
1362 local_range.first,
1363 interval.second -
1364 local_range.first,
1365 other_rank);
1366 dic_local_received += interval.second - interval.first;
1367 }
1368 }
1369 }
1370 else
1371 {
1372 // with gap: use a ConsensusAlgorithm to determine when all
1373 // dictionaries have been set up.
1374
1375 // 3/4) use a ConsensusAlgorithm to send messages with local
1376 // dofs to the right dict process
1377
1378 using RequestType = std::vector<
1379 std::pair<types::global_dof_index, types::global_dof_index>>;
1380
1381 ConsensusAlgorithms::selector<RequestType>(
1382 /* targets = */
1383 [&buffers]() {
1384 std::vector<unsigned int> targets;
1385 targets.reserve(buffers.size());
1386 for (const auto &rank_pair : buffers)
1387 targets.emplace_back(rank_pair.first);
1388
1389 return targets;
1390 }(),
1391
1392 /* create_request = */
1393 [&buffers](const unsigned int target_rank) -> RequestType {
1394 return buffers.at(target_rank);
1395 },
1396
1397 /* process_request = */
1398 [&](const unsigned int source_rank,
1399 const RequestType &request) -> void {
1400 // process message: loop over all intervals
1401 for (auto interval : request)
1402 {
1403# ifdef DEBUG
1404 for (types::global_dof_index i = interval.first;
1405 i < interval.second;
1406 i++)
1407 Assert(
1408 actually_owning_ranks.entry_has_been_set(
1409 i - local_range.first) == false,
1410 ExcMessage(
1411 "Multiple processes seem to own the same global index. "
1412 "A possible reason is that the sets of locally owned "
1413 "indices are not distinct."));
1414 Assert(interval.first < interval.second,
1416 Assert(
1417 local_range.first <= interval.first &&
1418 interval.second <= local_range.second,
1419 ExcMessage(
1420 "The specified interval is not handled by the current process."));
1421# endif
1422 actually_owning_ranks.fill(interval.first -
1423 local_range.first,
1424 interval.second -
1425 local_range.first,
1426 source_rank);
1427 }
1428 actually_owning_rank_list.push_back(source_rank);
1429 },
1430
1431 comm);
1432 }
1433
1434 std::sort(actually_owning_rank_list.begin(),
1436
1437 for (unsigned int i = 1; i < actually_owning_rank_list.size(); ++i)
1441
1442 // 5) make sure that all messages have been sent
1443 if (request.size() > 0)
1444 {
1445 const int ierr = MPI_Waitall(request.size(),
1446 request.data(),
1447 MPI_STATUSES_IGNORE);
1448 AssertThrowMPI(ierr);
1449 }
1450
1451#else
1452 Assert(buffers.empty(), ExcInternalError());
1453 (void)comm;
1454 (void)dic_local_received;
1455#endif
1456 }
1457
1458
1459
1460 void
1461 Dictionary::partition(const IndexSet &owned_indices,
1462 const MPI_Comm comm)
1463 {
1464 const unsigned int n_procs = n_mpi_processes(comm);
1465 const unsigned int my_rank = this_mpi_process(comm);
1466
1468
1470
1472 std::max<types::global_dof_index>((size + n_procs - 1) / n_procs,
1474
1476 std::max<unsigned int>(dofs_per_process * n_procs / size, 1);
1477
1478 local_range.first = get_index_offset(my_rank);
1479 local_range.second = get_index_offset(my_rank + 1);
1480
1482 }
1483
1484
1485 ConsensusAlgorithmsPayload::ConsensusAlgorithmsPayload(
1486 const IndexSet &owned_indices,
1488 const MPI_Comm comm,
1489 std::vector<unsigned int> &owning_ranks,
1490 const bool track_index_requesters)
1493 , comm(comm)
1500 {}
1501
1502
1503
1504 std::vector<unsigned int>
1505 ConsensusAlgorithmsPayload::compute_targets()
1506 {
1507 std::vector<unsigned int> targets;
1508
1510 unsigned int index = 0;
1511 unsigned int owner_index_guess = 0;
1512 for (auto i : indices_to_look_up)
1513 {
1514 unsigned int other_rank = dict.dof_to_dict_rank(i);
1515 if (other_rank == my_rank)
1516 {
1518 dict.actually_owning_ranks[i - dict.local_range.first];
1520 append_index_origin(i - dict.local_range.first,
1521 my_rank,
1522 owning_ranks[index],
1523 owner_index_guess);
1524 }
1525 else
1526 {
1527 if (targets.empty() || targets.back() != other_rank)
1528 targets.push_back(other_rank);
1529 auto &indices = indices_to_look_up_by_dict_rank[other_rank];
1530 indices.first.push_back(i);
1531 indices.second.push_back(index);
1532 }
1533 ++index;
1534 }
1535
1536 Assert(targets.size() == indices_to_look_up_by_dict_rank.size(),
1537 ExcMessage("Size does not match!"));
1538
1539 return targets;
1540 }
1541
1542
1543
1544 std::vector<std::pair<types::global_dof_index, types::global_dof_index>>
1545 ConsensusAlgorithmsPayload::create_request(
1546 const unsigned int other_rank)
1547 {
1548 std::vector<
1549 std::pair<types::global_dof_index, types::global_dof_index>>
1550 send_buffer;
1551
1552 // create index set and compress data to be sent
1553 auto &indices_i = indices_to_look_up_by_dict_rank[other_rank].first;
1554 IndexSet is(dict.size);
1555 is.add_indices(indices_i.begin(), indices_i.end());
1556 is.compress();
1557
1558 for (auto interval = is.begin_intervals();
1559 interval != is.end_intervals();
1560 ++interval)
1561 send_buffer.emplace_back(*interval->begin(), interval->last() + 1);
1562
1563 return send_buffer;
1564 }
1565
1566
1567
1568 std::vector<unsigned int>
1569 ConsensusAlgorithmsPayload::answer_request(
1570 const unsigned int other_rank,
1571 const std::vector<std::pair<types::global_dof_index,
1572 types::global_dof_index>> &buffer_recv)
1573 {
1574 std::vector<unsigned int> request_buffer;
1575
1576 unsigned int owner_index_guess = 0;
1577 for (const auto &interval : buffer_recv)
1578 for (auto i = interval.first; i < interval.second; ++i)
1579 {
1580 const unsigned int actual_owner =
1581 dict.actually_owning_ranks[i - dict.local_range.first];
1582 request_buffer.push_back(actual_owner);
1583
1585 append_index_origin(i - dict.local_range.first,
1586 other_rank,
1587 actual_owner,
1588 owner_index_guess);
1589 }
1590
1591 return request_buffer;
1592 }
1593
1594
1595
1596 void
1597 ConsensusAlgorithmsPayload::process_answer(
1598 const unsigned int other_rank,
1599 const std::vector<unsigned int> &recv_buffer)
1600 {
1601 const auto &recv_indices =
1602 indices_to_look_up_by_dict_rank[other_rank].second;
1603 AssertDimension(recv_indices.size(), recv_buffer.size());
1604 for (unsigned int j = 0; j < recv_indices.size(); ++j)
1605 owning_ranks[recv_indices[j]] = recv_buffer[j];
1606 }
1607
1608
1609
1610 std::map<unsigned int, IndexSet>
1611 ConsensusAlgorithmsPayload::get_requesters()
1612 {
1614 ExcMessage("Must enable index range tracking in "
1615 "constructor of ConsensusAlgorithmProcess"));
1616
1617 std::map<unsigned int, ::IndexSet> requested_indices;
1618
1619#ifdef DEAL_II_WITH_MPI
1620
1621 static CollectiveMutex mutex;
1622 CollectiveMutex::ScopedLock lock(mutex, comm);
1623
1624 const int mpi_tag = Utilities::MPI::internal::Tags::
1626
1627 // reserve enough slots for the requests ahead; depending on
1628 // whether the owning rank is one of the requesters or not, we
1629 // might have one less requests to execute, so fill the requests
1630 // on demand.
1631 std::vector<MPI_Request> send_requests;
1632 send_requests.reserve(requesters.size());
1633
1634 // We use an integer vector for the data exchange. Since we send
1635 // data associated to intervals with different requesters, we will
1636 // need to send (a) the MPI rank of the requester, (b) the number
1637 // of intervals directed to this requester, and (c) a list of
1638 // intervals, i.e., two integers per interval. The number of items
1639 // sent in total can be deduced both via the MPI status message at
1640 // the receiver site as well as be counting the buckets from
1641 // different requesters.
1642 std::vector<std::vector<types::global_dof_index>> send_data(
1643 requesters.size());
1644 for (unsigned int i = 0; i < requesters.size(); ++i)
1645 {
1646 // special code for our own indices
1647 if (dict.actually_owning_rank_list[i] == my_rank)
1648 {
1649 for (const auto &j : requesters[i])
1650 {
1651 const types::global_dof_index index_offset =
1652 dict.get_index_offset(my_rank);
1653 IndexSet &my_index_set = requested_indices[j.first];
1654 my_index_set.set_size(owned_indices.size());
1655 for (const auto &interval : j.second)
1656 my_index_set.add_range(index_offset + interval.first,
1657 index_offset + interval.second);
1658 }
1659 }
1660 else
1661 {
1662 for (const auto &j : requesters[i])
1663 {
1664 send_data[i].push_back(j.first);
1665 send_data[i].push_back(j.second.size());
1666 for (const auto &interval : j.second)
1667 {
1668 send_data[i].push_back(interval.first);
1669 send_data[i].push_back(interval.second);
1670 }
1671 }
1672 send_requests.push_back(MPI_Request());
1673 const int ierr =
1674 MPI_Isend(send_data[i].data(),
1675 send_data[i].size(),
1678 dict.actually_owning_rank_list[i],
1679 mpi_tag,
1680 comm,
1681 &send_requests.back());
1682 AssertThrowMPI(ierr);
1683 }
1684 }
1685
1686 // receive the data
1687 for (unsigned int c = 0; c < dict.n_dict_procs_in_owned_indices; ++c)
1688 {
1689 // wait for an incoming message
1690 MPI_Status status;
1691 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1692 AssertThrowMPI(ierr);
1693
1694 // retrieve size of incoming message
1695 int number_amount;
1696 ierr = MPI_Get_count(
1697 &status,
1698 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1699 &number_amount);
1700 AssertThrowMPI(ierr);
1701
1702 // receive message
1703 Assert(number_amount % 2 == 0, ExcInternalError());
1704 std::vector<
1705 std::pair<types::global_dof_index, types::global_dof_index>>
1706 buffer(number_amount / 2);
1707 ierr = MPI_Recv(
1708 buffer.data(),
1709 number_amount,
1710 Utilities::MPI::mpi_type_id_for_type<types::global_dof_index>,
1711 status.MPI_SOURCE,
1712 status.MPI_TAG,
1713 comm,
1714 &status);
1715 AssertThrowMPI(ierr);
1716
1717 // unpack the message and translate the dictionary-local
1718 // indices coming via MPI to the global index range
1719 const types::global_dof_index index_offset =
1720 dict.get_index_offset(status.MPI_SOURCE);
1721 unsigned int offset = 0;
1722 while (offset < buffer.size())
1723 {
1724 AssertIndexRange(offset + buffer[offset].second,
1725 buffer.size());
1726
1727 IndexSet my_index_set(owned_indices.size());
1728 for (unsigned int i = offset + 1;
1729 i < offset + buffer[offset].second + 1;
1730 ++i)
1731 my_index_set.add_range(index_offset + buffer[i].first,
1732 index_offset + buffer[i].second);
1733
1734 // the underlying index set is able to merge ranges coming
1735 // from different ranks due to the partitioning in the
1736 // dictionary
1737 IndexSet &index_set = requested_indices[buffer[offset].first];
1738 if (index_set.size() == 0)
1739 index_set.set_size(owned_indices.size());
1740 index_set.add_indices(my_index_set);
1741
1742 offset += buffer[offset].second + 1;
1743 }
1744 AssertDimension(offset, buffer.size());
1745 }
1746
1747 if (send_requests.size() > 0)
1748 {
1749 const auto ierr = MPI_Waitall(send_requests.size(),
1750 send_requests.data(),
1751 MPI_STATUSES_IGNORE);
1752 AssertThrowMPI(ierr);
1753 }
1754
1755
1756# ifdef DEBUG
1757 for (const auto &it : requested_indices)
1758 {
1759 IndexSet copy_set = it.second;
1760 copy_set.subtract_set(owned_indices);
1761 Assert(copy_set.n_elements() == 0,
1763 "The indices requested from the current "
1764 "MPI rank should be locally owned here!"));
1765 }
1766# endif
1767
1768#endif // DEAL_II_WITH_MPI
1769
1770 return requested_indices;
1771 }
1772
1773
1774
1775 void
1776 ConsensusAlgorithmsPayload::append_index_origin(
1777 const types::global_dof_index index_within_dict,
1778 const unsigned int rank_of_request,
1779 const unsigned int rank_of_owner,
1780 unsigned int &owner_index_guess)
1781 {
1782 // remember who requested which index. We want to use an
1783 // std::vector with simple addressing, via a good guess from the
1784 // preceding index, rather than std::map, because this is an inner
1785 // loop and it avoids the map lookup in every iteration
1786 owner_index_guess =
1787 dict.get_owning_rank_index(rank_of_owner, owner_index_guess);
1788
1789 auto &request = requesters[owner_index_guess];
1790 if (request.empty() || request.back().first != rank_of_request)
1791 request.emplace_back(
1792 rank_of_request,
1793 std::vector<
1794 std::pair<types::global_dof_index, types::global_dof_index>>());
1795
1796 auto &intervals = request.back().second;
1797 if (intervals.empty() || intervals.back().second != index_within_dict)
1798 intervals.emplace_back(index_within_dict, index_within_dict + 1);
1799 else
1800 ++intervals.back().second;
1801 }
1802
1803 } // namespace ComputeIndexOwner
1804 } // namespace
1805
1806
1807
1808 std::vector<unsigned int>
1811 const MPI_Comm comm)
1812 {
1814 ExcMessage("IndexSets have to have the same sizes."));
1815
1816 Assert(
1818 ExcMessage("IndexSets have to have the same size on all processes."));
1819
1820 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1821
1822 // Step 1: setup dictionary
1823 // The input owned_indices can be partitioned arbitrarily. In the
1824 // dictionary, the index set is statically repartitioned among the
1825 // processes again and extended with information with the actual owner
1826 // of that the index.
1827 ComputeIndexOwner::ConsensusAlgorithmsPayload process(
1830 comm,
1832 /* keep track of requesters = */ false);
1833
1834 // Step 2: read dictionary
1835 // Communicate with the process who owns the index in the static
1836 // partition (i.e. in the dictionary). This process returns the actual
1837 // owner of the index.
1838 using RequestType =
1839 ComputeIndexOwner::ConsensusAlgorithmsPayload::RequestType;
1840 using AnswerType =
1841 ComputeIndexOwner::ConsensusAlgorithmsPayload::AnswerType;
1842 ConsensusAlgorithms::selector<RequestType, AnswerType>(
1843 process.compute_targets(),
1844 [&process](const unsigned int other_rank) -> RequestType {
1845 return process.create_request(other_rank);
1846 },
1847 [&process](const unsigned int other_rank, const RequestType &r)
1848 -> AnswerType { return process.answer_request(other_rank, r); },
1849 [&process](const unsigned int other_rank, const AnswerType &a) -> void {
1850 process.process_answer(other_rank, a);
1851 },
1852 comm);
1853
1854 return owning_ranks;
1855 }
1856
1857
1858
1859 std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
1862 const MPI_Comm &comm)
1863 {
1865 ExcMessage("IndexSets have to have the same sizes."));
1866
1867 Assert(
1869 ExcMessage("IndexSets have to have the same size on all processes."));
1870
1871 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1872
1873 // Step 1: setup dictionary
1874 // The input owned_indices can be partitioned arbitrarily. In the
1875 // dictionary, the index set is statically repartitioned among the
1876 // processes again and extended with information with the actual owner
1877 // of that the index.
1878 ComputeIndexOwner::ConsensusAlgorithmsPayload process(
1880
1881 // Step 2: read dictionary
1882 // Communicate with the process who owns the index in the static
1883 // partition (i.e. in the dictionary). This process returns the actual
1884 // owner of the index.
1885 using RequestType =
1886 ComputeIndexOwner::ConsensusAlgorithmsPayload::RequestType;
1887 using AnswerType =
1888 ComputeIndexOwner::ConsensusAlgorithmsPayload::AnswerType;
1889 ConsensusAlgorithms::selector<RequestType, AnswerType>(
1890 process.compute_targets(),
1891 [&process](const unsigned int other_rank) -> RequestType {
1892 return process.create_request(other_rank);
1893 },
1894 [&process](const unsigned int other_rank, const RequestType &r)
1895 -> AnswerType { return process.answer_request(other_rank, r); },
1896 [&process](const unsigned int other_rank, const AnswerType &a) -> void {
1897 process.process_answer(other_rank, a);
1898 },
1899 comm);
1900
1901 return {owning_ranks, process.get_requesters()};
1902 }
1903
1904
1905
1906 namespace internal
1907 {
1908 namespace CollectiveMutexImplementation
1909 {
1914 void
1915 check_exception()
1916 {
1917#ifdef DEAL_II_WITH_MPI
1918 if (std::uncaught_exceptions() > 0)
1919 {
1920 std::cerr
1921 << "---------------------------------------------------------\n"
1922 << "An exception was thrown inside a section of the program\n"
1923 << "guarded by a CollectiveMutex.\n"
1924 << "Because a CollectiveMutex guards critical communication\n"
1925 << "handling the exception would likely\n"
1926 << "deadlock because only the current process is aware of the\n"
1927 << "exception. To prevent this deadlock, the program will be\n"
1928 << "aborted.\n"
1929 << "---------------------------------------------------------"
1930 << std::endl;
1931
1932 MPI_Abort(MPI_COMM_WORLD, 1);
1933 }
1934#endif
1935 }
1936 } // namespace CollectiveMutexImplementation
1937 } // namespace internal
1938
1939
1940
1941 CollectiveMutex::CollectiveMutex()
1942 : locked(false)
1943 , request(MPI_REQUEST_NULL)
1944 {
1946 }
1947
1948
1949
1951 {
1952 // First check if this destructor is called during exception handling
1953 // if so, abort.
1955
1956 Assert(
1957 !locked,
1958 ExcMessage(
1959 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1960
1962 }
1963
1964
1965
1966 void
1968 {
1969 Assert(
1970 !locked,
1971 ExcMessage(
1972 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1973
1974#ifdef DEAL_II_WITH_MPI
1975
1976 if (job_supports_mpi())
1977 {
1978 // TODO: For now, we implement this mutex with a blocking barrier in
1979 // the lock and unlock. It needs to be tested, if we can move to a
1980 // nonblocking barrier (code disabled below).
1981
1982 const int ierr = MPI_Barrier(comm);
1983 AssertThrowMPI(ierr);
1984
1985# if 0
1986 // wait for non-blocking barrier to finish. This is a noop the
1987 // first time we lock().
1988 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1989 AssertThrowMPI(ierr);
1990# else
1991 // nothing to do as blocking barrier already completed
1992# endif
1993 }
1994#else
1995 (void)comm;
1996#endif
1997
1998 locked = true;
1999 }
2000
2001
2002
2003 void
2005 {
2006 // First check if this function is called during exception handling
2007 // if so, abort. This can happen if a ScopedLock is destroyed.
2009
2010 Assert(
2011 locked,
2012 ExcMessage(
2013 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
2014
2015#ifdef DEAL_II_WITH_MPI
2016
2017 if (job_supports_mpi())
2018 {
2019 // TODO: For now, we implement this mutex with a blocking barrier
2020 // in the lock and unlock. It needs to be tested, if we can move
2021 // to a nonblocking barrier (code disabled below):
2022# if 0
2023 const int ierr = MPI_Ibarrier(comm, &request);
2024 AssertThrowMPI(ierr);
2025# else
2026 const int ierr = MPI_Barrier(comm);
2027 AssertThrowMPI(ierr);
2028# endif
2029 }
2030#else
2031 (void)comm;
2032#endif
2033
2034 locked = false;
2035 }
2036
2037
2038#ifndef DOXYGEN
2039 // explicit instantiations
2040
2041 // booleans aren't in MPI_SCALARS
2042 template bool
2043 reduce(const bool &,
2044 const MPI_Comm,
2045 const std::function<bool(const bool &, const bool &)> &,
2046 const unsigned int);
2047
2048 template std::vector<bool>
2049 reduce(const std::vector<bool> &,
2050 const MPI_Comm,
2051 const std::function<std::vector<bool>(const std::vector<bool> &,
2052 const std::vector<bool> &)> &,
2053 const unsigned int);
2054
2055 template bool
2056 all_reduce(const bool &,
2057 const MPI_Comm,
2058 const std::function<bool(const bool &, const bool &)> &);
2059
2060 template std::vector<bool>
2061 all_reduce(
2062 const std::vector<bool> &,
2063 const MPI_Comm,
2064 const std::function<std::vector<bool>(const std::vector<bool> &,
2065 const std::vector<bool> &)> &);
2066
2067 // We need an explicit instantiation of this for the same reason as the
2068 // other types described in mpi.inst.in
2069 template void
2070 internal::all_reduce<bool>(const MPI_Op &,
2071 const ArrayView<const bool> &,
2072 const MPI_Comm,
2073 const ArrayView<bool> &);
2074
2075
2076 template bool
2077 logical_or<bool>(const bool &, const MPI_Comm);
2078
2079
2080 template void
2081 logical_or<bool>(const ArrayView<const bool> &,
2082 const MPI_Comm,
2083 const ArrayView<bool> &);
2084
2085
2086 template std::vector<unsigned int>
2087 compute_set_union(const std::vector<unsigned int> &vec,
2088 const MPI_Comm comm);
2089
2090
2091 template std::set<unsigned int>
2092 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
2093#endif
2094
2095#include "mpi.inst"
2096 } // end of namespace MPI
2097} // end of namespace Utilities
2098
value_type * data() const noexcept
Definition array_view.h:661
std::size_t size() const
Definition array_view.h:684
IntervalIterator end_intervals() const
Definition index_set.h:1743
size_type size() const
Definition index_set.h:1776
size_type n_elements() const
Definition index_set.h:1934
void set_size(const size_type size)
Definition index_set.h:1764
IntervalIterator begin_intervals() const
Definition index_set.h:1731
void subtract_set(const IndexSet &other)
Definition index_set.cc:473
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
void add_indices(const ForwardIterator &begin, const ForwardIterator &end)
Definition index_set.h:1831
static void unregister_request(MPI_Request &request)
static void register_request(MPI_Request &request)
static Signals signals
void lock(const MPI_Comm comm)
Definition mpi.cc:1967
void unlock(const MPI_Comm comm)
Definition mpi.cc:2004
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition mpi.cc:667
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
static ::ExceptionBase & ExcNotImplemented()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1204
InitializeLibrary
const unsigned int my_rank
Definition mpi.cc:918
const IndexSet & indices_to_look_up
Definition mpi.cc:908
std::vector< index_type > data
Definition mpi.cc:735
std::pair< types::global_dof_index, types::global_dof_index > local_range
Definition mpi.cc:815
std::vector< unsigned int > actually_owning_rank_list
Definition mpi.cc:799
static constexpr unsigned int range_minimum_grain_size
Definition mpi.cc:769
static constexpr unsigned int sparsity_factor
Definition mpi.cc:777
unsigned int stride_small_size
Definition mpi.cc:840
bool use_vector
Definition mpi.cc:733
Dictionary dict
Definition mpi.cc:944
std::map< std::size_t, index_type > data_map
Definition mpi.cc:736
std::vector< unsigned int > & owning_ranks
Definition mpi.cc:939
std::vector< std::vector< std::pair< unsigned int, std::vector< std::pair< types::global_dof_index, types::global_dof_index > > > > > requesters
Definition mpi.cc:959
std::size_t size
Definition mpi.cc:734
const MPI_Comm comm
Definition mpi.cc:913
const IndexSet & owned_indices
Definition mpi.cc:902
std::map< unsigned int, std::pair< std::vector< types::global_dof_index >, std::vector< unsigned int > > > indices_to_look_up_by_dict_rank
Definition mpi.cc:969
static const index_type invalid_index_value
Definition mpi.cc:708
const bool track_index_requesters
Definition mpi.cc:932
FlexibleIndexStorage actually_owning_ranks
Definition mpi.cc:793
types::global_dof_index locally_owned_size
Definition mpi.cc:822
unsigned int n_dict_procs_in_owned_indices
Definition mpi.cc:833
const unsigned int n_procs
Definition mpi.cc:924
types::global_dof_index dofs_per_process
Definition mpi.cc:807
void partition(const SparsityPattern &sparsity_pattern, const unsigned int n_partitions, std::vector< unsigned int > &partition_indices, const Partitioner partitioner=Partitioner::metis)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition mpi_tags.h:55
@ consensus_algorithm_payload_get_requesters
ConsensusAlgorithms::Payload::get_requesters()
Definition mpi_tags.h:79
@ dictionary_reinit
Dictionary::reinit()
Definition mpi_tags.h:76
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:164
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition mpi.cc:1860
T sum(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:208
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1809
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:193
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:251
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:681
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1626
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:123
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:362
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:42
static const unsigned int invalid_unsigned_int
Definition types.h:220
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int global_dof_index
Definition types.h:81
boost::signals2::signal< void()> at_mpi_finalize
#define DEAL_II_DOF_INDEX_MPI_TYPE
Definition types.h:102