Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15
18#include <deal.II/base/mpi.h>
19#include <deal.II/base/mpi.templates.h>
24
27
28#include <boost/serialization/utility.hpp>
29
30#include <iostream>
31#include <limits>
32#include <numeric>
33#include <set>
34#include <vector>
35
37
38
39namespace Utilities
40{
43 const unsigned int my_partition_id,
44 const unsigned int n_partitions,
45 const types::global_dof_index total_size)
46 {
47 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
48 "IndexSet::size_type must match types::global_dof_index for "
49 "using this function");
50 const unsigned int remain = total_size % n_partitions;
51
52 const IndexSet::size_type min_size = total_size / n_partitions;
53
54 const IndexSet::size_type begin =
55 min_size * my_partition_id + std::min(my_partition_id, remain);
56 const IndexSet::size_type end =
57 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
58 IndexSet result(total_size);
59 result.add_range(begin, end);
60 return result;
61 }
62
63 namespace MPI
64 {
65 MinMaxAvg
66 min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
67 {
68 MinMaxAvg result;
71 mpi_communicator);
72
73 return result;
74 }
75
76
77
78 std::vector<MinMaxAvg>
79 min_max_avg(const std::vector<double> &my_values,
80 const MPI_Comm mpi_communicator)
81 {
82 std::vector<MinMaxAvg> results(my_values.size());
83 min_max_avg(my_values, results, mpi_communicator);
84
85 return results;
86 }
87
88
89
90#ifdef DEAL_II_WITH_MPI
91 unsigned int
92 n_mpi_processes(const MPI_Comm mpi_communicator)
93 {
94 if (job_supports_mpi())
95 {
96 int n_jobs = 1;
97 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
98 AssertThrowMPI(ierr);
99 return n_jobs;
100 }
101 else
102 return 1;
103 }
104
105
106 unsigned int
107 this_mpi_process(const MPI_Comm mpi_communicator)
108 {
109 if (job_supports_mpi())
110 {
111 int rank = 0;
112 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
113 AssertThrowMPI(ierr);
114 return rank;
115 }
116 else
117 return 0;
118 }
119
120
121
122 std::vector<unsigned int>
124 const MPI_Comm comm_small)
125 {
127 return std::vector<unsigned int>{0};
128
129 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
130 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
131
132 std::vector<unsigned int> ranks(size);
133 const int ierr = MPI_Allgather(
134 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
135 AssertThrowMPI(ierr);
136
137 return ranks;
138 }
139
140
141
143 duplicate_communicator(const MPI_Comm mpi_communicator)
144 {
145 MPI_Comm new_communicator;
146 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
147 AssertThrowMPI(ierr);
148 return new_communicator;
149 }
150
151
152
153 void
154 free_communicator(MPI_Comm mpi_communicator)
155 {
156 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
157 const int ierr = MPI_Comm_free(&mpi_communicator);
158 AssertThrowMPI(ierr);
159 }
160
161
162
163 int
165 const MPI_Group &group,
166 const int tag,
167 MPI_Comm *new_comm)
168 {
169 const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
170 AssertThrowMPI(ierr);
171 return ierr;
172 }
173
174
175
176 std::vector<IndexSet>
178 const MPI_Comm comm,
179 const types::global_dof_index locally_owned_size)
180 {
181 static_assert(
182 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
183 "IndexSet::size_type must match types::global_dof_index for "
184 "using this function");
185 const unsigned int n_proc = n_mpi_processes(comm);
186 const std::vector<IndexSet::size_type> sizes =
187 all_gather(comm, locally_owned_size);
188 const auto total_size =
189 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
190
191 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
192
193 IndexSet::size_type begin = 0;
194 for (unsigned int i = 0; i < n_proc; ++i)
195 {
196 res[i].add_range(begin, begin + sizes[i]);
197 begin = begin + sizes[i];
198 }
199
200 return res;
201 }
202
203
204
207 const MPI_Comm comm,
208 const types::global_dof_index total_size)
209 {
210 const unsigned int this_proc = this_mpi_process(comm);
211 const unsigned int n_proc = n_mpi_processes(comm);
212
214 n_proc,
215 total_size);
216 }
217
218
219
220 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
221 create_mpi_data_type_n_bytes(const std::size_t n_bytes)
222 {
223 MPI_Datatype result;
224 int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
225 AssertThrowMPI(ierr);
226 ierr = MPI_Type_commit(&result);
227 AssertThrowMPI(ierr);
228
229# ifdef DEBUG
230 MPI_Count size64;
231 ierr = MPI_Type_size_x(result, &size64);
232 AssertThrowMPI(ierr);
233
234 Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
235# endif
236
237 // Now put the new data type into a std::unique_ptr with a custom
238 // deleter. We call the std::unique_ptr constructor that as first
239 // argument takes a pointer (here, a pointer to a copy of the `result`
240 // object, and as second argument a pointer-to-function, for which
241 // we here use a lambda function without captures that acts as the
242 // 'deleter' object: it calls `MPI_Type_free` and then deletes the
243 // pointer. To avoid a compiler warning about a null this pointer
244 // in the lambda (which don't make sense: the lambda doesn't store
245 // anything), we create the deleter first.
246 auto deleter = [](MPI_Datatype *p) {
247 if (p != nullptr)
248 {
249 const int ierr = MPI_Type_free(p);
250 (void)ierr;
251 AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
252
253 delete p;
254 }
255 };
256
257 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
258 new MPI_Datatype(result), deleter);
259 }
260
261
262
263 std::vector<unsigned int>
265 const MPI_Comm mpi_comm,
266 const std::vector<unsigned int> &destinations)
267 {
268 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
269 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
270 (void)myid;
271 (void)n_procs;
272
273 for (const unsigned int destination : destinations)
274 {
275 (void)destination;
276 AssertIndexRange(destination, n_procs);
277 }
278
279
280 // Have a little function that checks if destinations provided
281 // to the current process are unique. The way it does this is
282 // to create a sorted list of destinations and then walk through
283 // the list and look at successive elements -- if we find the
284 // same number twice, we know that the destinations were not
285 // unique
286 const bool my_destinations_are_unique = [destinations]() {
287 if (destinations.empty())
288 return true;
289 else
290 {
291 std::vector<unsigned int> my_destinations = destinations;
292 std::sort(my_destinations.begin(), my_destinations.end());
293 return (std::adjacent_find(my_destinations.begin(),
294 my_destinations.end()) ==
295 my_destinations.end());
296 }
297 }();
298
299 // If all processes report that they have unique destinations,
300 // then we can short-cut the process using a consensus algorithm (which
301 // is implemented only for the case of unique destinations):
302 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
303 1)
304 {
306 destinations, {}, {}, {}, mpi_comm);
307 }
308
309 // So we need to run a different algorithm, specifically one that
310 // requires more memory -- MPI_Reduce_scatter_block will require memory
311 // proportional to the number of processes involved; that function is
312 // available for MPI 2.2 or later:
313 static CollectiveMutex mutex;
314 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
315
316 const int mpi_tag =
318
319 // Calculate the number of messages to send to each process
320 std::vector<unsigned int> dest_vector(n_procs);
321 for (const auto &el : destinations)
322 ++dest_vector[el];
323
324 // Find how many processes will send to this one
325 // by reducing with sum and then scattering the
326 // results over all processes
327 unsigned int n_recv_from;
328 const int ierr = MPI_Reduce_scatter_block(
329 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
330
331 AssertThrowMPI(ierr);
332
333 // Send myid to every process in `destinations` vector...
334 std::vector<MPI_Request> send_requests(destinations.size());
335 for (const auto &el : destinations)
336 {
337 const int ierr =
338 MPI_Isend(&myid,
339 1,
340 MPI_UNSIGNED,
341 el,
342 mpi_tag,
343 mpi_comm,
344 send_requests.data() + (&el - destinations.data()));
345 AssertThrowMPI(ierr);
346 }
347
348
349 // Receive `n_recv_from` times from the processes
350 // who communicate with this one. Store the obtained id's
351 // in the resulting vector
352 std::vector<unsigned int> origins(n_recv_from);
353 for (auto &el : origins)
354 {
355 const int ierr = MPI_Recv(&el,
356 1,
357 MPI_UNSIGNED,
358 MPI_ANY_SOURCE,
359 mpi_tag,
360 mpi_comm,
361 MPI_STATUS_IGNORE);
362 AssertThrowMPI(ierr);
363 }
364
365 if (destinations.size() > 0)
366 {
367 const int ierr = MPI_Waitall(destinations.size(),
368 send_requests.data(),
369 MPI_STATUSES_IGNORE);
370 AssertThrowMPI(ierr);
371 }
372
373 return origins;
374 }
375
376
377
378 unsigned int
380 const MPI_Comm mpi_comm,
381 const std::vector<unsigned int> &destinations)
382 {
383 // Have a little function that checks if destinations provided
384 // to the current process are unique:
385 const bool my_destinations_are_unique = [destinations]() {
386 std::vector<unsigned int> my_destinations = destinations;
387 const unsigned int n_destinations = my_destinations.size();
388 std::sort(my_destinations.begin(), my_destinations.end());
389 my_destinations.erase(std::unique(my_destinations.begin(),
390 my_destinations.end()),
391 my_destinations.end());
392 return (my_destinations.size() == n_destinations);
393 }();
394
395 // If all processes report that they have unique destinations,
396 // then we can short-cut the process using a consensus algorithm:
397
398 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
399 1)
400 {
402 destinations, {}, {}, {}, mpi_comm)
403 .size();
404 }
405 else
406 {
407 const unsigned int n_procs =
409
410 for (const unsigned int destination : destinations)
411 {
412 (void)destination;
413 AssertIndexRange(destination, n_procs);
414 Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
416 "There is no point in communicating with ourselves."));
417 }
418
419 // Calculate the number of messages to send to each process
420 std::vector<unsigned int> dest_vector(n_procs);
421 for (const auto &el : destinations)
422 ++dest_vector[el];
423
424 // Find out how many processes will send to this one
425 // MPI_Reduce_scatter(_block) does exactly this
426 unsigned int n_recv_from = 0;
427
428 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
429 &n_recv_from,
430 1,
431 MPI_UNSIGNED,
432 MPI_SUM,
433 mpi_comm);
434
435 AssertThrowMPI(ierr);
436
437 return n_recv_from;
438 }
439 }
440
441
442
443 namespace
444 {
445 // custom MIP_Op for calculate_collective_mpi_min_max_avg
446 void
447 max_reduce(const void *in_lhs_,
448 void *inout_rhs_,
449 int *len,
450 MPI_Datatype *)
451 {
452 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
453 MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
454
455 for (int i = 0; i < *len; ++i)
456 {
457 inout_rhs[i].sum += in_lhs[i].sum;
458 if (inout_rhs[i].min > in_lhs[i].min)
459 {
460 inout_rhs[i].min = in_lhs[i].min;
461 inout_rhs[i].min_index = in_lhs[i].min_index;
462 }
463 else if (inout_rhs[i].min == in_lhs[i].min)
464 {
465 // choose lower cpu index when tied to make operator commutative
466 if (inout_rhs[i].min_index > in_lhs[i].min_index)
467 inout_rhs[i].min_index = in_lhs[i].min_index;
468 }
469
470 if (inout_rhs[i].max < in_lhs[i].max)
471 {
472 inout_rhs[i].max = in_lhs[i].max;
473 inout_rhs[i].max_index = in_lhs[i].max_index;
474 }
475 else if (inout_rhs[i].max == in_lhs[i].max)
476 {
477 // choose lower cpu index when tied to make operator commutative
478 if (inout_rhs[i].max_index > in_lhs[i].max_index)
479 inout_rhs[i].max_index = in_lhs[i].max_index;
480 }
481 }
482 }
483 } // namespace
484
485
486
487 void
489 const ArrayView<MinMaxAvg> &result,
490 const MPI_Comm mpi_communicator)
491 {
492 // If MPI was not started, we have a serial computation and cannot run
493 // the other MPI commands
494 if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
495 {
496 for (unsigned int i = 0; i < my_values.size(); ++i)
497 {
498 result[i].sum = my_values[i];
499 result[i].avg = my_values[i];
500 result[i].min = my_values[i];
501 result[i].max = my_values[i];
502 result[i].min_index = 0;
503 result[i].max_index = 0;
504 }
505 return;
506 }
507
508 /*
509 * A custom MPI datatype handle describing the memory layout of the
510 * MinMaxAvg struct. Initialized on first pass control reaches the
511 * static variable. So hopefully not initialized too early.
512 */
513 static MPI_Datatype type = []() {
514 MPI_Datatype type;
515
516 int lengths[] = {3, 2, 1};
517
518 MPI_Aint displacements[] = {0,
519 offsetof(MinMaxAvg, min_index),
520 offsetof(MinMaxAvg, avg)};
521
522 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
523
524 int ierr =
525 MPI_Type_create_struct(3, lengths, displacements, types, &type);
526 AssertThrowMPI(ierr);
527
528 ierr = MPI_Type_commit(&type);
529 AssertThrowMPI(ierr);
530
531 /* Ensure that we free the allocated datatype again at the end of
532 * the program run just before we call MPI_Finalize():*/
533 InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
534 int ierr = MPI_Type_free(&type);
535 AssertThrowMPI(ierr);
536 });
537
538 return type;
539 }();
540
541 /*
542 * A custom MPI op handle for our max_reduce function.
543 * Initialized on first pass control reaches the static variable. So
544 * hopefully not initialized too early.
545 */
546 static MPI_Op op = []() {
547 MPI_Op op;
548
549 int ierr =
550 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
551 static_cast<int>(true),
552 &op);
553 AssertThrowMPI(ierr);
554
555 /* Ensure that we free the allocated op again at the end of the
556 * program run just before we call MPI_Finalize():*/
557 InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
558 int ierr = MPI_Op_free(&op);
559 AssertThrowMPI(ierr);
560 });
561
562 return op;
563 }();
564
565 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
566 Utilities::MPI::max(my_values.size(), mpi_communicator));
567
568 AssertDimension(my_values.size(), result.size());
569
570 // To avoid uninitialized values on some MPI implementations, provide
571 // result with a default value already...
572 MinMaxAvg dummy = {0.,
573 std::numeric_limits<double>::max(),
574 std::numeric_limits<double>::lowest(),
575 0,
576 0,
577 0.};
578
579 for (auto &i : result)
580 i = dummy;
581
582 const unsigned int my_id =
583 ::Utilities::MPI::this_mpi_process(mpi_communicator);
584 const unsigned int numproc =
585 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
586
587 std::vector<MinMaxAvg> in(my_values.size());
588
589 for (unsigned int i = 0; i < my_values.size(); ++i)
590 {
591 in[i].sum = in[i].min = in[i].max = my_values[i];
592 in[i].min_index = in[i].max_index = my_id;
593 }
594
595 int ierr = MPI_Allreduce(
596 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
597 AssertThrowMPI(ierr);
598
599 for (auto &r : result)
600 r.avg = r.sum / numproc;
601 }
602
603
604#else
605
606 unsigned int
608 {
609 return 1;
610 }
611
612
613
614 unsigned int
616 {
617 return 0;
618 }
619
620
621
622 std::vector<unsigned int>
624 {
625 return std::vector<unsigned int>{0};
626 }
627
628
629
630 std::vector<IndexSet>
632 const MPI_Comm /*comm*/,
633 const types::global_dof_index locally_owned_size)
634 {
635 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
636 }
637
640 const MPI_Comm /*comm*/,
641 const types::global_dof_index total_size)
642 {
643 return complete_index_set(total_size);
644 }
645
646
647
649 duplicate_communicator(const MPI_Comm mpi_communicator)
650 {
651 return mpi_communicator;
652 }
653
654
655
656 void
657 free_communicator(MPI_Comm /*mpi_communicator*/)
658 {}
659
660
661
662 void
663 min_max_avg(const ArrayView<const double> &my_values,
664 const ArrayView<MinMaxAvg> &result,
665 const MPI_Comm)
666 {
667 AssertDimension(my_values.size(), result.size());
668
669 for (unsigned int i = 0; i < my_values.size(); ++i)
670 {
671 result[i].sum = my_values[i];
672 result[i].avg = my_values[i];
673 result[i].min = my_values[i];
674 result[i].max = my_values[i];
675 result[i].min_index = 0;
676 result[i].max_index = 0;
677 }
678 }
679
680#endif
681
682
684 char **&argv,
685 const unsigned int max_num_threads)
686 : InitFinalize(argc,
687 argv,
691 max_num_threads)
692 {}
693
694
695
696 bool
698 {
699#ifdef DEAL_II_WITH_MPI
700 int MPI_has_been_started = 0;
701 const int ierr = MPI_Initialized(&MPI_has_been_started);
702 AssertThrowMPI(ierr);
703
704 return (MPI_has_been_started > 0);
705#else
706 return false;
707#endif
708 }
709
710
711
712 std::vector<unsigned int>
713 compute_index_owner(const IndexSet &owned_indices,
714 const IndexSet &indices_to_look_up,
715 const MPI_Comm comm)
716 {
717 Assert(owned_indices.size() == indices_to_look_up.size(),
718 ExcMessage("IndexSets have to have the same sizes."));
719
720 Assert(
721 owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
722 ExcMessage("IndexSets have to have the same size on all processes."));
723
724 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
725
726 // Step 1: setup dictionary
727 // The input owned_indices can be partitioned arbitrarily. In the
728 // dictionary, the index set is statically repartitioned among the
729 // processes again and extended with information with the actual owner
730 // of that the index.
732 owned_indices, indices_to_look_up, comm, owning_ranks);
733
734 // Step 2: read dictionary
735 // Communicate with the process who owns the index in the static
736 // partition (i.e. in the dictionary). This process returns the actual
737 // owner of the index.
739 std::vector<
740 std::pair<types::global_dof_index, types::global_dof_index>>,
741 std::vector<unsigned int>>
742 consensus_algorithm;
743 consensus_algorithm.run(process, comm);
744
745 return owning_ranks;
746 }
747
748
749
750 namespace internal
751 {
752 namespace CollectiveMutexImplementation
753 {
758 void
760 {
761#ifdef DEAL_II_WITH_MPI
762# if __cpp_lib_uncaught_exceptions >= 201411
763 // std::uncaught_exception() is deprecated in c++17
764 if (std::uncaught_exceptions() != 0)
765# else
766 if (std::uncaught_exception() == true)
767# endif
768 {
769 std::cerr
770 << "---------------------------------------------------------\n"
771 << "An exception was thrown inside a section of the program\n"
772 << "guarded by a CollectiveMutex.\n"
773 << "Because a CollectiveMutex guards critical communication\n"
774 << "handling the exception would likely\n"
775 << "deadlock because only the current process is aware of the\n"
776 << "exception. To prevent this deadlock, the program will be\n"
777 << "aborted.\n"
778 << "---------------------------------------------------------"
779 << std::endl;
780
781 MPI_Abort(MPI_COMM_WORLD, 1);
782 }
783#endif
784 }
785 } // namespace CollectiveMutexImplementation
786 } // namespace internal
787
788
789
791 : locked(false)
792 , request(MPI_REQUEST_NULL)
793 {
795 }
796
797
798
800 {
801 // First check if this destructor is called during exception handling
802 // if so, abort.
804
805 Assert(
806 !locked,
808 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
809
811 }
812
813
814
815 void
817 {
818 (void)comm;
819
820 Assert(
821 !locked,
823 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
824
825#ifdef DEAL_II_WITH_MPI
826
827 if (job_supports_mpi())
828 {
829 // TODO: For now, we implement this mutex with a blocking barrier in
830 // the lock and unlock. It needs to be tested, if we can move to a
831 // nonblocking barrier (code disabled below).
832
833 const int ierr = MPI_Barrier(comm);
834 AssertThrowMPI(ierr);
835
836# if 0
837 // wait for non-blocking barrier to finish. This is a noop the
838 // first time we lock().
839 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
840 AssertThrowMPI(ierr);
841# else
842 // nothing to do as blocking barrier already completed
843# endif
844 }
845#endif
846
847 locked = true;
848 }
849
850
851
852 void
854 {
855 (void)comm;
856
857 // First check if this function is called during exception handling
858 // if so, abort. This can happen if a ScopedLock is destroyed.
860
861 Assert(
862 locked,
864 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
865
866#ifdef DEAL_II_WITH_MPI
867
868 if (job_supports_mpi())
869 {
870 // TODO: For now, we implement this mutex with a blocking barrier
871 // in the lock and unlock. It needs to be tested, if we can move
872 // to a nonblocking barrier (code disabled below):
873# if 0
874 const int ierr = MPI_Ibarrier(comm, &request);
875 AssertThrowMPI(ierr);
876# else
877 const int ierr = MPI_Barrier(comm);
878 AssertThrowMPI(ierr);
879# endif
880 }
881#endif
882
883 locked = false;
884 }
885
886
887#ifndef DOXYGEN
888 // explicit instantiations
889
890 // booleans aren't in MPI_SCALARS
891 template bool
892 reduce(const bool &,
893 const MPI_Comm,
894 const std::function<bool(const bool &, const bool &)> &,
895 const unsigned int);
896
897 template std::vector<bool>
898 reduce(const std::vector<bool> &,
899 const MPI_Comm,
900 const std::function<std::vector<bool>(const std::vector<bool> &,
901 const std::vector<bool> &)> &,
902 const unsigned int);
903
904 template bool
905 all_reduce(const bool &,
906 const MPI_Comm,
907 const std::function<bool(const bool &, const bool &)> &);
908
909 template std::vector<bool>
911 const std::vector<bool> &,
912 const MPI_Comm,
913 const std::function<std::vector<bool>(const std::vector<bool> &,
914 const std::vector<bool> &)> &);
915
916 // We need an explicit instantiation of this for the same reason as the
917 // other types described in mpi.inst.in
918 template void
919 internal::all_reduce<bool>(const MPI_Op &,
920 const ArrayView<const bool> &,
921 const MPI_Comm,
922 const ArrayView<bool> &);
923
924
925 template bool
926 logical_or<bool>(const bool &, const MPI_Comm);
927
928
929 template void
931 const MPI_Comm,
932 const ArrayView<bool> &);
933
934
935 template std::vector<unsigned int>
936 compute_set_union(const std::vector<unsigned int> &vec,
937 const MPI_Comm comm);
938
939
940 template std::set<unsigned int>
941 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
942#endif
943
944#include "mpi.inst"
945 } // end of namespace MPI
946} // end of namespace Utilities
947
value_type * data() const noexcept
Definition array_view.h:661
std::size_t size() const
Definition array_view.h:684
size_type size() const
Definition index_set.h:1776
size_type n_elements() const
Definition index_set.h:1934
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1803
static void unregister_request(MPI_Request &request)
static void register_request(MPI_Request &request)
static Signals signals
void lock(const MPI_Comm comm)
Definition mpi.cc:816
void unlock(const MPI_Comm comm)
Definition mpi.cc:853
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition mpi.cc:683
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1204
InitializeLibrary
std::vector< unsigned int > nbx(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition mpi_tags.h:56
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:177
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:221
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:713
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:206
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:264
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition mpi.cc:164
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:697
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:123
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:379
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:42
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:32
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_finalize