Reference documentation for deal.II version 9.5.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2005 - 2023 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
19#include <deal.II/base/mpi.h>
20#include <deal.II/base/mpi.templates.h>
26
32
33#include <boost/serialization/utility.hpp>
34
35#include <Kokkos_Core.hpp>
36
37#include <iostream>
38#include <limits>
39#include <numeric>
40#include <set>
41#include <vector>
42
43#ifdef DEAL_II_WITH_TRILINOS
44# ifdef DEAL_II_WITH_MPI
47
48# include <Epetra_MpiComm.h>
49# endif
50#endif
51
52#ifdef DEAL_II_WITH_PETSC
55
56# include <petscsys.h>
57#endif
58
59#ifdef DEAL_II_WITH_SLEPC
61
62# include <slepcsys.h>
63#endif
64
65#ifdef DEAL_II_WITH_P4EST
66# include <p4est_bits.h>
67#endif
68
69#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
70# include <zoltan_cpp.h>
71#endif
72
74
75
76namespace Utilities
77{
80 const unsigned int my_partition_id,
81 const unsigned int n_partitions,
82 const types::global_dof_index total_size)
83 {
84 static_assert(
85 std::is_same<types::global_dof_index, IndexSet::size_type>::value,
86 "IndexSet::size_type must match types::global_dof_index for "
87 "using this function");
88 const unsigned int remain = total_size % n_partitions;
89
90 const IndexSet::size_type min_size = total_size / n_partitions;
91
92 const IndexSet::size_type begin =
93 min_size * my_partition_id + std::min(my_partition_id, remain);
94 const IndexSet::size_type end =
95 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
96 IndexSet result(total_size);
97 result.add_range(begin, end);
98 return result;
99 }
100
101 namespace MPI
102 {
103#ifdef DEAL_II_WITH_MPI
104 // Provide definitions of template variables for all valid instantiations.
105 template const MPI_Datatype mpi_type_id_for_type<bool>;
106 template const MPI_Datatype mpi_type_id_for_type<char>;
107 template const MPI_Datatype mpi_type_id_for_type<signed char>;
108 template const MPI_Datatype mpi_type_id_for_type<short>;
109 template const MPI_Datatype mpi_type_id_for_type<int>;
110 template const MPI_Datatype mpi_type_id_for_type<long int>;
111 template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
112 template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
113 template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
115 template const MPI_Datatype mpi_type_id_for_type<float>;
116 template const MPI_Datatype mpi_type_id_for_type<double>;
117 template const MPI_Datatype mpi_type_id_for_type<long double>;
118 template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
119 template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
120#endif
121
122
124 min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
125 {
126 MinMaxAvg result;
128 ArrayView<MinMaxAvg>(result),
129 mpi_communicator);
130
131 return result;
132 }
133
134
135
136 std::vector<MinMaxAvg>
137 min_max_avg(const std::vector<double> &my_values,
138 const MPI_Comm mpi_communicator)
139 {
140 std::vector<MinMaxAvg> results(my_values.size());
141 min_max_avg(my_values, results, mpi_communicator);
142
143 return results;
144 }
145
146
147
148#ifdef DEAL_II_WITH_MPI
149 unsigned int
150 n_mpi_processes(const MPI_Comm mpi_communicator)
151 {
152 int n_jobs = 1;
153 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
154 AssertThrowMPI(ierr);
155
156 return n_jobs;
157 }
158
159
160 unsigned int
161 this_mpi_process(const MPI_Comm mpi_communicator)
162 {
163 int rank = 0;
164 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
165 AssertThrowMPI(ierr);
166
167 return rank;
168 }
169
170
171
172 const std::vector<unsigned int>
174 const MPI_Comm comm_small)
175 {
177 return std::vector<unsigned int>{0};
178
179 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
180 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
181
182 std::vector<unsigned int> ranks(size);
183 const int ierr = MPI_Allgather(
184 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
185 AssertThrowMPI(ierr);
186
187 return ranks;
188 }
189
190
191
193 duplicate_communicator(const MPI_Comm mpi_communicator)
194 {
195 MPI_Comm new_communicator;
196 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
197 AssertThrowMPI(ierr);
198 return new_communicator;
199 }
200
201
202
203 void
204 free_communicator(MPI_Comm mpi_communicator)
205 {
206 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
207 const int ierr = MPI_Comm_free(&mpi_communicator);
208 AssertThrowMPI(ierr);
209 }
210
211
212
213 int
215 const MPI_Group &group,
216 const int tag,
217 MPI_Comm * new_comm)
218 {
219 const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
220 AssertThrowMPI(ierr);
221 return ierr;
222 }
223
224
225
226 std::vector<IndexSet>
228 const MPI_Comm comm,
229 const types::global_dof_index locally_owned_size)
230 {
231 static_assert(
232 std::is_same<types::global_dof_index, IndexSet::size_type>::value,
233 "IndexSet::size_type must match types::global_dof_index for "
234 "using this function");
235 const unsigned int n_proc = n_mpi_processes(comm);
236 const std::vector<IndexSet::size_type> sizes =
237 all_gather(comm, locally_owned_size);
238 const auto total_size =
239 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
240
241 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
242
243 IndexSet::size_type begin = 0;
244 for (unsigned int i = 0; i < n_proc; ++i)
245 {
246 res[i].add_range(begin, begin + sizes[i]);
247 begin = begin + sizes[i];
248 }
249
250 return res;
251 }
252
253
254
257 const MPI_Comm comm,
258 const types::global_dof_index total_size)
259 {
260 const unsigned int this_proc = this_mpi_process(comm);
261 const unsigned int n_proc = n_mpi_processes(comm);
262
264 n_proc,
265 total_size);
266 }
267
268
269
270 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
271 create_mpi_data_type_n_bytes(const std::size_t n_bytes)
272 {
273 MPI_Datatype result;
274 int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
275 AssertThrowMPI(ierr);
276 ierr = MPI_Type_commit(&result);
277 AssertThrowMPI(ierr);
278
279# ifdef DEBUG
280 MPI_Count size64;
281 ierr = MPI_Type_size_x(result, &size64);
282 AssertThrowMPI(ierr);
283
284 Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
285# endif
286
287 // Now put the new data type into a std::unique_ptr with a custom
288 // deleter. We call the std::unique_ptr constructor that as first
289 // argument takes a pointer (here, a pointer to a copy of the `result`
290 // object, and as second argument a pointer-to-function, for which
291 // we here use a lambda function without captures that acts as the
292 // 'deleter' object: it calls `MPI_Type_free` and then deletes the
293 // pointer. To avoid a compiler warning about a null this pointer
294 // in the lambda (which don't make sense: the lambda doesn't store
295 // anything), we create the deleter first.
296 auto deleter = [](MPI_Datatype *p) {
297 if (p != nullptr)
298 {
299 const int ierr = MPI_Type_free(p);
300 (void)ierr;
301 AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
302
303 delete p;
304 }
305 };
306
307 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
308 new MPI_Datatype(result), deleter);
309 }
310
311
312
313 std::vector<unsigned int>
315 const MPI_Comm mpi_comm,
316 const std::vector<unsigned int> &destinations)
317 {
318 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
319 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
320 (void)myid;
321 (void)n_procs;
322
323 for (const unsigned int destination : destinations)
324 {
325 (void)destination;
326 AssertIndexRange(destination, n_procs);
327 }
328
329
330 // Have a little function that checks if destinations provided
331 // to the current process are unique. The way it does this is
332 // to create a sorted list of destinations and then walk through
333 // the list and look at successive elements -- if we find the
334 // same number twice, we know that the destinations were not
335 // unique
336 const bool my_destinations_are_unique = [destinations]() {
337 if (destinations.size() == 0)
338 return true;
339 else
340 {
341 std::vector<unsigned int> my_destinations = destinations;
342 std::sort(my_destinations.begin(), my_destinations.end());
343 return (std::adjacent_find(my_destinations.begin(),
344 my_destinations.end()) ==
345 my_destinations.end());
346 }
347 }();
348
349 // If all processes report that they have unique destinations,
350 // then we can short-cut the process using a consensus algorithm (which
351 // is implemented only for the case of unique destinations):
352 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
353 1)
354 {
355 return ConsensusAlgorithms::nbx<char, char>(
356 destinations, {}, {}, {}, mpi_comm);
357 }
358
359 // So we need to run a different algorithm, specifically one that
360 // requires more memory -- MPI_Reduce_scatter_block will require memory
361 // proportional to the number of processes involved; that function is
362 // available for MPI 2.2 or later:
363 static CollectiveMutex mutex;
364 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
365
366 const int mpi_tag =
368
369 // Calculate the number of messages to send to each process
370 std::vector<unsigned int> dest_vector(n_procs);
371 for (const auto &el : destinations)
372 ++dest_vector[el];
373
374 // Find how many processes will send to this one
375 // by reducing with sum and then scattering the
376 // results over all processes
377 unsigned int n_recv_from;
378 const int ierr = MPI_Reduce_scatter_block(
379 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
380
381 AssertThrowMPI(ierr);
382
383 // Send myid to every process in `destinations` vector...
384 std::vector<MPI_Request> send_requests(destinations.size());
385 for (const auto &el : destinations)
386 {
387 const int ierr =
388 MPI_Isend(&myid,
389 1,
390 MPI_UNSIGNED,
391 el,
392 mpi_tag,
393 mpi_comm,
394 send_requests.data() + (&el - destinations.data()));
395 AssertThrowMPI(ierr);
396 }
397
398
399 // Receive `n_recv_from` times from the processes
400 // who communicate with this one. Store the obtained id's
401 // in the resulting vector
402 std::vector<unsigned int> origins(n_recv_from);
403 for (auto &el : origins)
404 {
405 const int ierr = MPI_Recv(&el,
406 1,
407 MPI_UNSIGNED,
408 MPI_ANY_SOURCE,
409 mpi_tag,
410 mpi_comm,
411 MPI_STATUS_IGNORE);
412 AssertThrowMPI(ierr);
413 }
414
415 if (destinations.size() > 0)
416 {
417 const int ierr = MPI_Waitall(destinations.size(),
418 send_requests.data(),
419 MPI_STATUSES_IGNORE);
420 AssertThrowMPI(ierr);
421 }
422
423 return origins;
424 }
425
426
427
428 unsigned int
430 const MPI_Comm mpi_comm,
431 const std::vector<unsigned int> &destinations)
432 {
433 // Have a little function that checks if destinations provided
434 // to the current process are unique:
435 const bool my_destinations_are_unique = [destinations]() {
436 std::vector<unsigned int> my_destinations = destinations;
437 const unsigned int n_destinations = my_destinations.size();
438 std::sort(my_destinations.begin(), my_destinations.end());
439 my_destinations.erase(std::unique(my_destinations.begin(),
440 my_destinations.end()),
441 my_destinations.end());
442 return (my_destinations.size() == n_destinations);
443 }();
444
445 // If all processes report that they have unique destinations,
446 // then we can short-cut the process using a consensus algorithm:
447
448 if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
449 1)
450 {
451 return ConsensusAlgorithms::nbx<char, char>(
452 destinations, {}, {}, {}, mpi_comm)
453 .size();
454 }
455 else
456 {
457 const unsigned int n_procs =
459
460 for (const unsigned int destination : destinations)
461 {
462 (void)destination;
463 AssertIndexRange(destination, n_procs);
464 Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
466 "There is no point in communicating with ourselves."));
467 }
468
469 // Calculate the number of messages to send to each process
470 std::vector<unsigned int> dest_vector(n_procs);
471 for (const auto &el : destinations)
472 ++dest_vector[el];
473
474 // Find out how many processes will send to this one
475 // MPI_Reduce_scatter(_block) does exactly this
476 unsigned int n_recv_from = 0;
477
478 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
479 &n_recv_from,
480 1,
481 MPI_UNSIGNED,
482 MPI_SUM,
483 mpi_comm);
484
485 AssertThrowMPI(ierr);
486
487 return n_recv_from;
488 }
489 }
490
491
492
493 namespace
494 {
495 // custom MIP_Op for calculate_collective_mpi_min_max_avg
496 void
497 max_reduce(const void *in_lhs_,
498 void * inout_rhs_,
499 int * len,
500 MPI_Datatype *)
501 {
502 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
503 MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
504
505 for (int i = 0; i < *len; ++i)
506 {
507 inout_rhs[i].sum += in_lhs[i].sum;
508 if (inout_rhs[i].min > in_lhs[i].min)
509 {
510 inout_rhs[i].min = in_lhs[i].min;
511 inout_rhs[i].min_index = in_lhs[i].min_index;
512 }
513 else if (inout_rhs[i].min == in_lhs[i].min)
514 {
515 // choose lower cpu index when tied to make operator commutative
516 if (inout_rhs[i].min_index > in_lhs[i].min_index)
517 inout_rhs[i].min_index = in_lhs[i].min_index;
518 }
519
520 if (inout_rhs[i].max < in_lhs[i].max)
521 {
522 inout_rhs[i].max = in_lhs[i].max;
523 inout_rhs[i].max_index = in_lhs[i].max_index;
524 }
525 else if (inout_rhs[i].max == in_lhs[i].max)
526 {
527 // choose lower cpu index when tied to make operator commutative
528 if (inout_rhs[i].max_index > in_lhs[i].max_index)
529 inout_rhs[i].max_index = in_lhs[i].max_index;
530 }
531 }
532 }
533 } // namespace
534
535
536
537 void
539 const ArrayView<MinMaxAvg> & result,
540 const MPI_Comm mpi_communicator)
541 {
542 // If MPI was not started, we have a serial computation and cannot run
543 // the other MPI commands
544 if (job_supports_mpi() == false ||
545 Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
546 {
547 for (unsigned int i = 0; i < my_values.size(); ++i)
548 {
549 result[i].sum = my_values[i];
550 result[i].avg = my_values[i];
551 result[i].min = my_values[i];
552 result[i].max = my_values[i];
553 result[i].min_index = 0;
554 result[i].max_index = 0;
555 }
556 return;
557 }
558
559 /*
560 * A custom MPI datatype handle describing the memory layout of the
561 * MinMaxAvg struct. Initialized on first pass control reaches the
562 * static variable. So hopefully not initialized too early.
563 */
564 static MPI_Datatype type = []() {
565 MPI_Datatype type;
566
567 int lengths[] = {3, 2, 1};
568
569 MPI_Aint displacements[] = {0,
570 offsetof(MinMaxAvg, min_index),
571 offsetof(MinMaxAvg, avg)};
572
573 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
574
575 int ierr =
576 MPI_Type_create_struct(3, lengths, displacements, types, &type);
577 AssertThrowMPI(ierr);
578
579 ierr = MPI_Type_commit(&type);
580 AssertThrowMPI(ierr);
581
582 /* Ensure that we free the allocated datatype again at the end of
583 * the program run just before we call MPI_Finalize():*/
584 MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
585 int ierr = MPI_Type_free(&type);
586 AssertThrowMPI(ierr);
587 });
588
589 return type;
590 }();
591
592 /*
593 * A custom MPI op handle for our max_reduce function.
594 * Initialized on first pass control reaches the static variable. So
595 * hopefully not initialized too early.
596 */
597 static MPI_Op op = []() {
598 MPI_Op op;
599
600 int ierr =
601 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
602 static_cast<int>(true),
603 &op);
604 AssertThrowMPI(ierr);
605
606 /* Ensure that we free the allocated op again at the end of the
607 * program run just before we call MPI_Finalize():*/
608 MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
609 int ierr = MPI_Op_free(&op);
610 AssertThrowMPI(ierr);
611 });
612
613 return op;
614 }();
615
616 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
617 Utilities::MPI::max(my_values.size(), mpi_communicator));
618
619 AssertDimension(my_values.size(), result.size());
620
621 // To avoid uninitialized values on some MPI implementations, provide
622 // result with a default value already...
623 MinMaxAvg dummy = {0.,
624 std::numeric_limits<double>::max(),
625 std::numeric_limits<double>::lowest(),
626 0,
627 0,
628 0.};
629
630 for (auto &i : result)
631 i = dummy;
632
633 const unsigned int my_id =
634 ::Utilities::MPI::this_mpi_process(mpi_communicator);
635 const unsigned int numproc =
636 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
637
638 std::vector<MinMaxAvg> in(my_values.size());
639
640 for (unsigned int i = 0; i < my_values.size(); ++i)
641 {
642 in[i].sum = in[i].min = in[i].max = my_values[i];
643 in[i].min_index = in[i].max_index = my_id;
644 }
645
646 int ierr = MPI_Allreduce(
647 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
648 AssertThrowMPI(ierr);
649
650 for (auto &r : result)
651 r.avg = r.sum / numproc;
652 }
653
654
655#else
656
657 unsigned int
659 {
660 return 1;
661 }
662
663
664
665 unsigned int
667 {
668 return 0;
669 }
670
671
672
673 const std::vector<unsigned int>
675 {
676 return std::vector<unsigned int>{0};
677 }
678
679
680
681 std::vector<IndexSet>
683 const MPI_Comm /*comm*/,
684 const types::global_dof_index locally_owned_size)
685 {
686 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
687 }
688
691 const MPI_Comm /*comm*/,
692 const types::global_dof_index total_size)
693 {
694 return complete_index_set(total_size);
695 }
696
697
698
700 duplicate_communicator(const MPI_Comm mpi_communicator)
701 {
702 return mpi_communicator;
703 }
704
705
706
707 void free_communicator(MPI_Comm /*mpi_communicator*/)
708 {}
709
710
711
712 void
713 min_max_avg(const ArrayView<const double> &my_values,
714 const ArrayView<MinMaxAvg> & result,
715 const MPI_Comm)
716 {
717 AssertDimension(my_values.size(), result.size());
718
719 for (unsigned int i = 0; i < my_values.size(); ++i)
720 {
721 result[i].sum = my_values[i];
722 result[i].avg = my_values[i];
723 result[i].min = my_values[i];
724 result[i].max = my_values[i];
725 result[i].min_index = 0;
726 result[i].max_index = 0;
727 }
728 }
729
730#endif
731
732 /* Force initialization of static struct: */
733 MPI_InitFinalize::Signals MPI_InitFinalize::signals =
734 MPI_InitFinalize::Signals();
735
736
738 char **& argv,
739 const unsigned int max_num_threads)
740 {
741 static bool constructor_has_already_run = false;
742 (void)constructor_has_already_run;
743 Assert(constructor_has_already_run == false,
744 ExcMessage("You can only create a single object of this class "
745 "in a program since it initializes the MPI system."));
746
747
748 int ierr = 0;
749#ifdef DEAL_II_WITH_MPI
750 // if we have PETSc, we will initialize it and let it handle MPI.
751 // Otherwise, we will do it.
752 int MPI_has_been_started = 0;
753 ierr = MPI_Initialized(&MPI_has_been_started);
754 AssertThrowMPI(ierr);
755 AssertThrow(MPI_has_been_started == 0,
756 ExcMessage("MPI error. You can only start MPI once!"));
757
758 int provided;
759 // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
760 // we might use several threads but never call two MPI functions at the
761 // same time. For an explanation see on why we do this see
762 // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
763 int wanted = MPI_THREAD_SERIALIZED;
764 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
765 AssertThrowMPI(ierr);
766
767 // disable for now because at least some implementations always return
768 // MPI_THREAD_SINGLE.
769 // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
770 // ExcMessage("MPI reports that we are not allowed to use multiple
771 // threads."));
772#else
773 // make sure the compiler doesn't warn about these variables
774 (void)argc;
775 (void)argv;
776 (void)ierr;
777#endif
778
779 // Initialize Kokkos
780 {
781 // argv has argc+1 elements and the last one is a nullptr. For appending
782 // one element we thus create a new argv by copying the first argc
783 // elements, append the new option, and then a nullptr.
784 //
785 // We do get in trouble, though, if a user program is called with
786 // '--help' as a command line argument. This '--help' gets passed on to
787 // Kokkos, which promptly responds with a lengthy message that the user
788 // likely did not intend. As a consequence, filter out this specific
789 // flag.
790 std::vector<char *> argv_new;
791 for (const auto arg : make_array_view(&argv[0], &argv[0] + argc))
792 if (strcmp(arg, "--help") != 0)
793 argv_new.push_back(arg);
794
795 std::stringstream threads_flag;
796#if KOKKOS_VERSION >= 30700
797 threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
798#else
799 threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
800#endif
801 const std::string threads_flag_string = threads_flag.str();
802 argv_new.push_back(const_cast<char *>(threads_flag_string.c_str()));
803 argv_new.push_back(nullptr);
804
805 // The first argument in Kokkos::initialize is of type int&. Hence, we
806 // need to define a new variable to pass to it (instead of using argc+1
807 // inline).
808 int argc_new = argv_new.size() - 1;
809 Kokkos::initialize(argc_new, argv_new.data());
810 }
811
812 // we are allowed to call MPI_Init ourselves and PETScInitialize will
813 // detect this. This allows us to use MPI_Init_thread instead.
814#ifdef DEAL_II_WITH_PETSC
815# ifdef DEAL_II_WITH_SLEPC
816 // Initialize SLEPc (with PETSc):
817 finalize_petscslepc = SlepcInitializeCalled ? false : true;
818 ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
820# else
821 // or just initialize PETSc alone:
822 finalize_petscslepc = PetscInitializeCalled ? false : true;
823 ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
824 AssertThrow(ierr == 0, ExcPETScError(ierr));
825# endif
826
827 // Disable PETSc exception handling. This just prints a large wall
828 // of text that is not particularly helpful for what we do:
829 PetscPopSignalHandler();
830#endif
831
832 // Initialize zoltan
833#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
834 float version;
835 Zoltan_Initialize(argc, argv, &version);
836#endif
837
838#ifdef DEAL_II_WITH_P4EST
839 // Initialize p4est and libsc components
840# if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
841 // This feature is broken in version 2.0.0 for calls to
842 // MPI_Comm_create_group (see cburstedde/p4est#30).
843 // Disabling it leads to more verbose p4est error messages
844 // which should be fine.
845 sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
846# endif
847 p4est_init(nullptr, SC_LP_SILENT);
848#endif
849
850 constructor_has_already_run = true;
851
852
853 // Now also see how many threads we'd like to run
854 if (max_num_threads != numbers::invalid_unsigned_int)
855 {
856 // set maximum number of threads (also respecting the environment
857 // variable that the called function evaluates) based on what the
858 // user asked
859 MultithreadInfo::set_thread_limit(max_num_threads);
860 }
861 else
862 // user wants automatic choice
863 {
864#ifdef DEAL_II_WITH_MPI
865 // we need to figure out how many MPI processes there are on the
866 // current node, as well as how many CPU cores we have. for the
867 // first task, check what get_hostname() returns and then do an
868 // allgather so each processor gets the answer
869 //
870 // in calculating the length of the string, don't forget the
871 // terminating \0 on C-style strings
872 const std::string hostname = Utilities::System::get_hostname();
873 const unsigned int max_hostname_size =
874 Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
875 std::vector<char> hostname_array(max_hostname_size);
876 std::copy(hostname.c_str(),
877 hostname.c_str() + hostname.size() + 1,
878 hostname_array.begin());
879
880 std::vector<char> all_hostnames(max_hostname_size *
881 MPI::n_mpi_processes(MPI_COMM_WORLD));
882 const int ierr = MPI_Allgather(hostname_array.data(),
883 max_hostname_size,
884 MPI_CHAR,
885 all_hostnames.data(),
886 max_hostname_size,
887 MPI_CHAR,
888 MPI_COMM_WORLD);
889 AssertThrowMPI(ierr);
890
891 // search how often our own hostname appears and the how-manyth
892 // instance the current process represents
893 unsigned int n_local_processes = 0;
894 unsigned int nth_process_on_host = 0;
895 for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
896 ++i)
897 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
898 hostname)
899 {
900 ++n_local_processes;
901 if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
902 ++nth_process_on_host;
903 }
904 Assert(nth_process_on_host > 0, ExcInternalError());
905
906
907 // compute how many cores each process gets. if the number does not
908 // divide evenly, then we get one more core if we are among the
909 // first few processes
910 //
911 // if the number would be zero, round up to one since every process
912 // needs to have at least one thread
913 const unsigned int n_threads =
914 std::max(MultithreadInfo::n_cores() / n_local_processes +
915 (nth_process_on_host <=
916 MultithreadInfo::n_cores() % n_local_processes ?
917 1 :
918 0),
919 1U);
920#else
921 const unsigned int n_threads = MultithreadInfo::n_cores();
922#endif
923
924 // finally set this number of threads
926 }
927
928 // As a final step call the at_mpi_init() signal handler.
930 }
931
932
933
934 void
936 {
937 // insert if it is not in the set already:
938 requests.insert(&request);
939 }
940
941
942
943 void
945 {
946 Assert(
947 requests.find(&request) != requests.end(),
949 "You tried to call unregister_request() with an invalid request."));
950
951 requests.erase(&request);
952 }
953
954
955
956 std::set<MPI_Request *> MPI_InitFinalize::requests;
957
958
959
961 {
962 // First, call the at_mpi_finalize() signal handler.
964
965 // make memory pool release all PETSc/Trilinos/MPI-based vectors that
966 // are no longer used at this point. this is relevant because the static
967 // object destructors run for these vectors at the end of the program
968 // would run after MPI_Finalize is called, leading to errors
969
970#ifdef DEAL_II_WITH_MPI
971 // Before exiting, wait for nonblocking communication to complete:
972 for (auto request : requests)
973 {
974 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
975 AssertThrowMPI(ierr);
976 }
977
978 // Start with deal.II MPI vectors and delete vectors from the pools:
980 LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
982 release_unused_memory();
984 LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
986 release_unused_memory();
987
988 // Next with Trilinos:
989# ifdef DEAL_II_WITH_TRILINOS
991 TrilinosWrappers::MPI::Vector>::release_unused_memory();
993 TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
994# endif
995#endif
996
997
998 // Now deal with PETSc (with or without MPI). Only delete the vectors if
999 // finalize hasn't been called yet, otherwise this will lead to errors.
1000#ifdef DEAL_II_WITH_PETSC
1001 if (!PetscFinalizeCalled)
1002 {
1004 PETScWrappers::MPI::Vector>::release_unused_memory();
1006 PETScWrappers::MPI::BlockVector>::release_unused_memory();
1007 }
1008# ifdef DEAL_II_WITH_SLEPC
1009 // and now end SLEPc with PETSc if we did so
1011 SlepcFinalize();
1012# else
1013 // or just end PETSc if we did so
1015 PetscFinalize();
1016# endif
1017#endif
1018
1019#ifdef DEAL_II_WITH_P4EST
1020 // now end p4est and libsc
1021 // Note: p4est has no finalize function
1022 sc_finalize();
1023#endif
1024
1025
1026 // Finalize Kokkos
1027 Kokkos::finalize();
1028
1029 // only MPI_Finalize if we are running with MPI. We also need to do this
1030 // when running PETSc, because we initialize MPI ourselves before
1031 // calling PetscInitialize
1032#ifdef DEAL_II_WITH_MPI
1033 if (job_supports_mpi() == true)
1034 {
1035# if __cpp_lib_uncaught_exceptions >= 201411
1036 // std::uncaught_exception() is deprecated in c++17
1037 if (std::uncaught_exceptions() > 0)
1038# else
1039 if (std::uncaught_exception() == true)
1040# endif
1041 {
1042 // do not try to call MPI_Finalize to avoid a deadlock.
1043 }
1044 else
1045 {
1046 const int ierr = MPI_Finalize();
1047 (void)ierr;
1048 AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1049 }
1050 }
1051#endif
1052 }
1053
1054
1055
1056 bool
1058 {
1059#ifdef DEAL_II_WITH_MPI
1060 int MPI_has_been_started = 0;
1061 const int ierr = MPI_Initialized(&MPI_has_been_started);
1062 AssertThrowMPI(ierr);
1063
1064 return (MPI_has_been_started > 0);
1065#else
1066 return false;
1067#endif
1068 }
1069
1070
1071
1072 std::vector<unsigned int>
1073 compute_index_owner(const IndexSet &owned_indices,
1074 const IndexSet &indices_to_look_up,
1075 const MPI_Comm comm)
1076 {
1077 Assert(owned_indices.size() == indices_to_look_up.size(),
1078 ExcMessage("IndexSets have to have the same sizes."));
1079
1080 Assert(
1081 owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1082 ExcMessage("IndexSets have to have the same size on all processes."));
1083
1084 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1085
1086 // Step 1: setup dictionary
1087 // The input owned_indices can be partitioned arbitrarily. In the
1088 // dictionary, the index set is statically repartitioned among the
1089 // processes again and extended with information with the actual owner
1090 // of that the index.
1092 owned_indices, indices_to_look_up, comm, owning_ranks);
1093
1094 // Step 2: read dictionary
1095 // Communicate with the process who owns the index in the static
1096 // partition (i.e. in the dictionary). This process returns the actual
1097 // owner of the index.
1099 std::vector<
1100 std::pair<types::global_dof_index, types::global_dof_index>>,
1101 std::vector<unsigned int>>
1102 consensus_algorithm;
1103 consensus_algorithm.run(process, comm);
1104
1105 return owning_ranks;
1106 }
1107
1108
1109
1110 namespace internal
1111 {
1112 namespace CollectiveMutexImplementation
1113 {
1118 void
1120 {
1121#ifdef DEAL_II_WITH_MPI
1122# if __cpp_lib_uncaught_exceptions >= 201411
1123 // std::uncaught_exception() is deprecated in c++17
1124 if (std::uncaught_exceptions() != 0)
1125# else
1126 if (std::uncaught_exception() == true)
1127# endif
1128 {
1129 std::cerr
1130 << "---------------------------------------------------------\n"
1131 << "An exception was thrown inside a section of the program\n"
1132 << "guarded by a CollectiveMutex.\n"
1133 << "Because a CollectiveMutex guards critical communication\n"
1134 << "handling the exception would likely\n"
1135 << "deadlock because only the current process is aware of the\n"
1136 << "exception. To prevent this deadlock, the program will be\n"
1137 << "aborted.\n"
1138 << "---------------------------------------------------------"
1139 << std::endl;
1140
1141 MPI_Abort(MPI_COMM_WORLD, 1);
1142 }
1143#endif
1144 }
1145 } // namespace CollectiveMutexImplementation
1146 } // namespace internal
1147
1148
1149
1151 : locked(false)
1152 , request(MPI_REQUEST_NULL)
1153 {
1155 }
1156
1157
1158
1160 {
1161 // First check if this destructor is called during exception handling
1162 // if so, abort.
1164
1165 Assert(
1166 !locked,
1167 ExcMessage(
1168 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1169
1171 }
1172
1173
1174
1175 void
1177 {
1178 (void)comm;
1179
1180 Assert(
1181 !locked,
1182 ExcMessage(
1183 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1184
1185#ifdef DEAL_II_WITH_MPI
1186
1187 // TODO: For now, we implement this mutex with a blocking barrier
1188 // in the lock and unlock. It needs to be tested, if we can move
1189 // to a nonblocking barrier (code disabled below).
1190
1191 const int ierr = MPI_Barrier(comm);
1192 AssertThrowMPI(ierr);
1193
1194# if 0
1195 // wait for non-blocking barrier to finish. This is a noop the
1196 // first time we lock().
1197 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1198 AssertThrowMPI(ierr);
1199# else
1200 // nothing to do as blocking barrier already completed
1201# endif
1202#endif
1203
1204 locked = true;
1205 }
1206
1207
1208
1209 void
1211 {
1212 (void)comm;
1213
1214 // First check if this function is called during exception handling
1215 // if so, abort. This can happen if a ScopedLock is destroyed.
1217
1218 Assert(
1219 locked,
1220 ExcMessage(
1221 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1222
1223#ifdef DEAL_II_WITH_MPI
1224
1225 // TODO: For now, we implement this mutex with a blocking barrier
1226 // in the lock and unlock. It needs to be tested, if we can move
1227 // to a nonblocking barrier (code disabled below):
1228# if 0
1229 const int ierr = MPI_Ibarrier(comm, &request);
1230 AssertThrowMPI(ierr);
1231# else
1232 const int ierr = MPI_Barrier(comm);
1233 AssertThrowMPI(ierr);
1234# endif
1235#endif
1236
1237 locked = false;
1238 }
1239
1240
1241#ifndef DOXYGEN
1242 // explicit instantiations
1243
1244 // booleans aren't in MPI_SCALARS
1245 template bool
1246 reduce(const bool &,
1247 const MPI_Comm,
1248 const std::function<bool(const bool &, const bool &)> &,
1249 const unsigned int);
1250
1251 template std::vector<bool>
1252 reduce(const std::vector<bool> &,
1253 const MPI_Comm,
1254 const std::function<std::vector<bool>(const std::vector<bool> &,
1255 const std::vector<bool> &)> &,
1256 const unsigned int);
1257
1258 template bool
1259 all_reduce(const bool &,
1260 const MPI_Comm,
1261 const std::function<bool(const bool &, const bool &)> &);
1262
1263 template std::vector<bool>
1264 all_reduce(
1265 const std::vector<bool> &,
1266 const MPI_Comm,
1267 const std::function<std::vector<bool>(const std::vector<bool> &,
1268 const std::vector<bool> &)> &);
1269
1270 // We need an explicit instantiation of this for the same reason as the
1271 // other types described in mpi.inst.in
1272 template void
1273 internal::all_reduce<bool>(const MPI_Op &,
1274 const ArrayView<const bool> &,
1275 const MPI_Comm,
1276 const ArrayView<bool> &);
1277
1278
1279 template bool
1280 logical_or<bool>(const bool &, const MPI_Comm);
1281
1282
1283 template void
1284 logical_or<bool>(const ArrayView<const bool> &,
1285 const MPI_Comm,
1286 const ArrayView<bool> &);
1287
1288
1289 template std::vector<unsigned int>
1290 compute_set_union(const std::vector<unsigned int> &vec,
1291 const MPI_Comm comm);
1292
1293
1294 template std::set<unsigned int>
1295 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
1296#endif
1297
1298#include "mpi.inst"
1299 } // end of namespace MPI
1300} // end of namespace Utilities
1301
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition array_view.h:704
value_type * data() const noexcept
Definition array_view.h:553
std::size_t size() const
Definition array_view.h:576
size_type size() const
Definition index_set.h:1661
size_type n_elements() const
Definition index_set.h:1816
void add_range(const size_type begin, const size_type end)
Definition index_set.h:1697
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void lock(const MPI_Comm comm)
Definition mpi.cc:1176
void unlock(const MPI_Comm comm)
Definition mpi.cc:1210
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
static void unregister_request(MPI_Request &request)
Definition mpi.cc:944
static std::set< MPI_Request * > requests
Definition mpi.h:1207
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition mpi.cc:737
static void register_request(MPI_Request &request)
Definition mpi.cc:935
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:472
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:473
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition index_set.h:1089
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition mpi_tags.h:57
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:227
template const MPI_Datatype mpi_type_id_for_type< int >
Definition mpi.cc:109
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
Definition mpi.cc:111
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:271
template const MPI_Datatype mpi_type_id_for_type< signed char >
Definition mpi.cc:107
template const MPI_Datatype mpi_type_id_for_type< bool >
Definition mpi.cc:105
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
Definition mpi.cc:112
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1073
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
template const MPI_Datatype mpi_type_id_for_type< short >
Definition mpi.cc:108
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:161
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:256
template const MPI_Datatype mpi_type_id_for_type< double >
Definition mpi.cc:116
template const MPI_Datatype mpi_type_id_for_type< char >
Definition mpi.cc:106
template const MPI_Datatype mpi_type_id_for_type< long double >
Definition mpi.cc:117
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:314
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
Definition mpi.cc:114
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
Definition mpi.cc:113
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition mpi.cc:214
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:193
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:173
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:1057
template const MPI_Datatype mpi_type_id_for_type< long int >
Definition mpi.cc:110
template const MPI_Datatype mpi_type_id_for_type< float >
Definition mpi.cc:115
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:204
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:429
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:124
std::string get_hostname()
Definition utilities.cc:999
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:79
static const unsigned int invalid_unsigned_int
Definition types.h:213
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition types.h:33
****code *  *  MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
Definition mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition mpi.h:1198
const MPI_Comm comm