Reference documentation for deal.II version 9.3.3
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1// ---------------------------------------------------------------------
2//
3// Copyright (C) 2005 - 2021 by the deal.II authors
4//
5// This file is part of the deal.II library.
6//
7// The deal.II library is free software; you can use it, redistribute
8// it, and/or modify it under the terms of the GNU Lesser General
9// Public License as published by the Free Software Foundation; either
10// version 2.1 of the License, or (at your option) any later version.
11// The full text of the license can be found in the file LICENSE.md at
12// the top level directory of deal.II.
13//
14// ---------------------------------------------------------------------
15
16
19#include <deal.II/base/mpi.h>
20#include <deal.II/base/mpi.templates.h>
25
29
30#include <iostream>
31#include <numeric>
32#include <set>
33#include <vector>
34
35#ifdef DEAL_II_WITH_TRILINOS
36# ifdef DEAL_II_WITH_MPI
39
40# include <Epetra_MpiComm.h>
41# endif
42#endif
43
44#ifdef DEAL_II_WITH_PETSC
47
48# include <petscsys.h>
49#endif
50
51#ifdef DEAL_II_WITH_SLEPC
53
54# include <slepcsys.h>
55#endif
56
57#ifdef DEAL_II_WITH_P4EST
58# include <p4est_bits.h>
59#endif
60
61#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62# include <zoltan_cpp.h>
63#endif
64
66
67
68namespace Utilities
69{
71 create_evenly_distributed_partitioning(const unsigned int my_partition_id,
72 const unsigned int n_partitions,
73 const IndexSet::size_type total_size)
74 {
75 const unsigned int remain = total_size % n_partitions;
76
77 const IndexSet::size_type min_size = total_size / n_partitions;
78
80 min_size * my_partition_id + std::min(my_partition_id, remain);
82 min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
83 IndexSet result(total_size);
84 result.add_range(begin, end);
85 return result;
86 }
87
88 namespace MPI
89 {
90 MinMaxAvg
91 min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
92 {
93 MinMaxAvg result;
96 mpi_communicator);
97
98 return result;
99 }
100
101
102
103 std::vector<MinMaxAvg>
104 min_max_avg(const std::vector<double> &my_values,
105 const MPI_Comm & mpi_communicator)
106 {
107 std::vector<MinMaxAvg> results(my_values.size());
108 min_max_avg(my_values, results, mpi_communicator);
109
110 return results;
111 }
112
113
114
115#ifdef DEAL_II_WITH_MPI
116 unsigned int
117 n_mpi_processes(const MPI_Comm &mpi_communicator)
118 {
119 int n_jobs = 1;
120 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
121 AssertThrowMPI(ierr);
122
123 return n_jobs;
124 }
125
126
127 unsigned int
128 this_mpi_process(const MPI_Comm &mpi_communicator)
129 {
130 int rank = 0;
131 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
132 AssertThrowMPI(ierr);
133
134 return rank;
135 }
136
137
138
139 const std::vector<unsigned int>
141 const MPI_Comm &comm_small)
142 {
144 return std::vector<unsigned int>{0};
145
146 const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
147 const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
148
149 std::vector<unsigned int> ranks(size);
150 const int ierr = MPI_Allgather(
151 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
152 AssertThrowMPI(ierr);
153
154 return ranks;
155 }
156
157
158
160 duplicate_communicator(const MPI_Comm &mpi_communicator)
161 {
162 MPI_Comm new_communicator;
163 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
164 AssertThrowMPI(ierr);
165 return new_communicator;
166 }
167
168
169
170 void
171 free_communicator(MPI_Comm &mpi_communicator)
172 {
173 // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
174 const int ierr = MPI_Comm_free(&mpi_communicator);
175 AssertThrowMPI(ierr);
176 }
177
178
179
180 int
182 const MPI_Group &group,
183 const int tag,
184 MPI_Comm * new_comm)
185 {
186# if DEAL_II_MPI_VERSION_GTE(3, 0)
187 return MPI_Comm_create_group(comm, group, tag, new_comm);
188# else
189 int rank;
190 int ierr = MPI_Comm_rank(comm, &rank);
191 AssertThrowMPI(ierr);
192
193 int grp_rank;
194 ierr = MPI_Group_rank(group, &grp_rank);
195 AssertThrowMPI(ierr);
196 if (grp_rank == MPI_UNDEFINED)
197 {
198 *new_comm = MPI_COMM_NULL;
199 return MPI_SUCCESS;
200 }
201
202 int grp_size;
203 ierr = MPI_Group_size(group, &grp_size);
204 AssertThrowMPI(ierr);
205
206 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
207 AssertThrowMPI(ierr);
208
209 MPI_Group parent_grp;
210 ierr = MPI_Comm_group(comm, &parent_grp);
211 AssertThrowMPI(ierr);
212
213 std::vector<int> pids(grp_size);
214 std::vector<int> grp_pids(grp_size);
215 std::iota(grp_pids.begin(), grp_pids.end(), 0);
216 ierr = MPI_Group_translate_ranks(
217 group, grp_size, grp_pids.data(), parent_grp, pids.data());
218 AssertThrowMPI(ierr);
219 ierr = MPI_Group_free(&parent_grp);
220 AssertThrowMPI(ierr);
221
222 MPI_Comm comm_old = *new_comm;
223 MPI_Comm ic;
224 for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
225 {
226 const int gid = grp_rank / merge_sz;
227 comm_old = *new_comm;
228 if (gid % 2 == 0)
229 {
230 if ((gid + 1) * merge_sz < grp_size)
231 {
232 ierr = (MPI_Intercomm_create(
233 *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
234 AssertThrowMPI(ierr);
235 ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
236 AssertThrowMPI(ierr);
237 }
238 }
239 else
240 {
241 ierr = MPI_Intercomm_create(
242 *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
243 AssertThrowMPI(ierr);
244 ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
245 AssertThrowMPI(ierr);
246 }
247 if (*new_comm != comm_old)
248 {
249 ierr = MPI_Comm_free(&ic);
250 AssertThrowMPI(ierr);
251 ierr = MPI_Comm_free(&comm_old);
252 AssertThrowMPI(ierr);
253 }
254 }
255
256 return MPI_SUCCESS;
257# endif
258 }
259
260
261
262 std::vector<IndexSet>
264 const IndexSet::size_type locally_owned_size)
265 {
266 const unsigned int n_proc = n_mpi_processes(comm);
267 const std::vector<IndexSet::size_type> sizes =
268 all_gather(comm, locally_owned_size);
269 const auto total_size =
270 std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
271
272 std::vector<IndexSet> res(n_proc, IndexSet(total_size));
273
275 for (unsigned int i = 0; i < n_proc; ++i)
276 {
277 res[i].add_range(begin, begin + sizes[i]);
278 begin = begin + sizes[i];
279 }
280
281 return res;
282 }
283
286 const IndexSet::size_type total_size)
287 {
288 const unsigned int this_proc = this_mpi_process(comm);
289 const unsigned int n_proc = n_mpi_processes(comm);
290
292 n_proc,
293 total_size);
294 }
295
296
297
303 : public ConsensusAlgorithms::Process<unsigned int, unsigned int>
304 {
305 public:
306 ConsensusAlgorithmsProcessTargets(const std::vector<unsigned int> &target)
307 : target(target)
308 {}
309
310 using T1 = unsigned int;
311 using T2 = unsigned int;
312
313 virtual void
314 answer_request(const unsigned int other_rank,
315 const std::vector<T1> &,
316 std::vector<T2> &) override
317 {
318 this->sources.push_back(other_rank);
319 }
320
326 virtual std::vector<unsigned int>
328 {
329 return target;
330 }
331
337 std::vector<unsigned int>
339 {
340 std::sort(sources.begin(), sources.end());
341 return sources;
342 }
343
344 private:
348 const std::vector<unsigned int> &target;
349
353 std::vector<unsigned int> sources;
354 };
355
356
357
358 std::vector<unsigned int>
360 const MPI_Comm & mpi_comm,
361 const std::vector<unsigned int> &destinations)
362 {
363 const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
364 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
365 (void)myid;
366 (void)n_procs;
367
368 for (const unsigned int destination : destinations)
369 {
370 (void)destination;
371 AssertIndexRange(destination, n_procs);
372 }
373
374# if DEAL_II_MPI_VERSION_GTE(3, 0)
375
376 ConsensusAlgorithmsProcessTargets process(destinations);
379 consensus_algorithm(process, mpi_comm);
380 consensus_algorithm.run();
381 return process.get_result();
382
383# elif DEAL_II_MPI_VERSION_GTE(2, 2)
384
385 static CollectiveMutex mutex;
386 CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
387
388 const int mpi_tag =
390
391 // Calculate the number of messages to send to each process
392 std::vector<unsigned int> dest_vector(n_procs);
393 for (const auto &el : destinations)
394 ++dest_vector[el];
395
396 // Find how many processes will send to this one
397 // by reducing with sum and then scattering the
398 // results over all processes
399 unsigned int n_recv_from;
400 const int ierr = MPI_Reduce_scatter_block(
401 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
402
403 AssertThrowMPI(ierr);
404
405 // Send myid to every process in `destinations` vector...
406 std::vector<MPI_Request> send_requests(destinations.size());
407 for (const auto &el : destinations)
408 {
409 const int ierr =
410 MPI_Isend(&myid,
411 1,
412 MPI_UNSIGNED,
413 el,
414 mpi_tag,
415 mpi_comm,
416 send_requests.data() + (&el - destinations.data()));
417 AssertThrowMPI(ierr);
418 }
419
420
421 // Receive `n_recv_from` times from the processes
422 // who communicate with this one. Store the obtained id's
423 // in the resulting vector
424 std::vector<unsigned int> origins(n_recv_from);
425 for (auto &el : origins)
426 {
427 const int ierr = MPI_Recv(&el,
428 1,
429 MPI_UNSIGNED,
430 MPI_ANY_SOURCE,
431 mpi_tag,
432 mpi_comm,
433 MPI_STATUS_IGNORE);
434 AssertThrowMPI(ierr);
435 }
436
437 if (destinations.size() > 0)
438 {
439 const int ierr = MPI_Waitall(destinations.size(),
440 send_requests.data(),
441 MPI_STATUSES_IGNORE);
442 AssertThrowMPI(ierr);
443 }
444
445 return origins;
446# else
447 // let all processors communicate the maximal number of destinations
448 // they have
449 const unsigned int max_n_destinations =
450 Utilities::MPI::max(destinations.size(), mpi_comm);
451
452 if (max_n_destinations == 0)
453 // all processes have nothing to send/receive:
454 return std::vector<unsigned int>();
455
456 // now that we know the number of data packets every processor wants to
457 // send, set up a buffer with the maximal size and copy our destinations
458 // in there, padded with -1's
459 std::vector<unsigned int> my_destinations(max_n_destinations,
461 std::copy(destinations.begin(),
462 destinations.end(),
463 my_destinations.begin());
464
465 // now exchange these (we could communicate less data if we used
466 // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
467 // processors in this case, which is more expensive than the reduction
468 // operation above in MPI_Allreduce)
469 std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
470 const int ierr = MPI_Allgather(my_destinations.data(),
471 max_n_destinations,
472 MPI_UNSIGNED,
473 all_destinations.data(),
474 max_n_destinations,
475 MPI_UNSIGNED,
476 mpi_comm);
477 AssertThrowMPI(ierr);
478
479 // now we know who is going to communicate with whom. collect who is
480 // going to communicate with us!
481 std::vector<unsigned int> origins;
482 for (unsigned int i = 0; i < n_procs; ++i)
483 for (unsigned int j = 0; j < max_n_destinations; ++j)
484 if (all_destinations[i * max_n_destinations + j] == myid)
485 origins.push_back(i);
486 else if (all_destinations[i * max_n_destinations + j] ==
488 break;
489
490 return origins;
491# endif
492 }
493
494
495
496 unsigned int
498 const MPI_Comm & mpi_comm,
499 const std::vector<unsigned int> &destinations)
500 {
501 const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
502
503 for (const unsigned int destination : destinations)
504 {
505 (void)destination;
506 AssertIndexRange(destination, n_procs);
507 Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
509 "There is no point in communicating with ourselves."));
510 }
511
512 // Calculate the number of messages to send to each process
513 std::vector<unsigned int> dest_vector(n_procs);
514 for (const auto &el : destinations)
515 ++dest_vector[el];
516
517# if DEAL_II_MPI_VERSION_GTE(2, 2)
518 // Find out how many processes will send to this one
519 // MPI_Reduce_scatter(_block) does exactly this
520 unsigned int n_recv_from = 0;
521
522 const int ierr = MPI_Reduce_scatter_block(
523 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
524
525 AssertThrowMPI(ierr);
526
527 return n_recv_from;
528# else
529 // Find out how many processes will send to this one
530 // by reducing with sum and then scattering the
531 // results over all processes
532 std::vector<unsigned int> buffer(dest_vector.size());
533 unsigned int n_recv_from = 0;
534
535 int ierr = MPI_Reduce(dest_vector.data(),
536 buffer.data(),
537 dest_vector.size(),
538 MPI_UNSIGNED,
539 MPI_SUM,
540 0,
541 mpi_comm);
542 AssertThrowMPI(ierr);
543 ierr = MPI_Scatter(buffer.data(),
544 1,
545 MPI_UNSIGNED,
546 &n_recv_from,
547 1,
548 MPI_UNSIGNED,
549 0,
550 mpi_comm);
551 AssertThrowMPI(ierr);
552
553 return n_recv_from;
554# endif
555 }
556
557
558
559 namespace
560 {
561 // custom MIP_Op for calculate_collective_mpi_min_max_avg
562 void
563 max_reduce(const void *in_lhs_,
564 void * inout_rhs_,
565 int * len,
566 MPI_Datatype *)
567 {
568 const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
569 MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
570
571 for (int i = 0; i < *len; i++)
572 {
573 inout_rhs[i].sum += in_lhs[i].sum;
574 if (inout_rhs[i].min > in_lhs[i].min)
575 {
576 inout_rhs[i].min = in_lhs[i].min;
577 inout_rhs[i].min_index = in_lhs[i].min_index;
578 }
579 else if (inout_rhs[i].min == in_lhs[i].min)
580 {
581 // choose lower cpu index when tied to make operator commutative
582 if (inout_rhs[i].min_index > in_lhs[i].min_index)
583 inout_rhs[i].min_index = in_lhs[i].min_index;
584 }
585
586 if (inout_rhs[i].max < in_lhs[i].max)
587 {
588 inout_rhs[i].max = in_lhs[i].max;
589 inout_rhs[i].max_index = in_lhs[i].max_index;
590 }
591 else if (inout_rhs[i].max == in_lhs[i].max)
592 {
593 // choose lower cpu index when tied to make operator commutative
594 if (inout_rhs[i].max_index > in_lhs[i].max_index)
595 inout_rhs[i].max_index = in_lhs[i].max_index;
596 }
597 }
598 }
599 } // namespace
600
601
602
603 void
605 const ArrayView<MinMaxAvg> & result,
606 const MPI_Comm & mpi_communicator)
607 {
608 // If MPI was not started, we have a serial computation and cannot run
609 // the other MPI commands
610 if (job_supports_mpi() == false ||
611 Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
612 {
613 for (unsigned int i = 0; i < my_values.size(); i++)
614 {
615 result[i].sum = my_values[i];
616 result[i].avg = my_values[i];
617 result[i].min = my_values[i];
618 result[i].max = my_values[i];
619 result[i].min_index = 0;
620 result[i].max_index = 0;
621 }
622 return;
623 }
624
625 /*
626 * A custom MPI datatype handle describing the memory layout of the
627 * MinMaxAvg struct. Initialized on first pass control reaches the
628 * static variable. So hopefully not initialized too early.
629 */
630 static MPI_Datatype type = []() {
631 MPI_Datatype type;
632
633 int lengths[] = {3, 2, 1};
634
635 MPI_Aint displacements[] = {0,
636 offsetof(MinMaxAvg, min_index),
637 offsetof(MinMaxAvg, avg)};
638
639 MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
640
641 int ierr =
642 MPI_Type_create_struct(3, lengths, displacements, types, &type);
643 AssertThrowMPI(ierr);
644
645 ierr = MPI_Type_commit(&type);
646 AssertThrowMPI(ierr);
647
648 /* Ensure that we free the allocated datatype again at the end of
649 * the program run just before we call MPI_Finalize():*/
650 MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
651 int ierr = MPI_Type_free(&type);
652 AssertThrowMPI(ierr);
653 });
654
655 return type;
656 }();
657
658 /*
659 * A custom MPI op handle for our max_reduce function.
660 * Initialized on first pass control reaches the static variable. So
661 * hopefully not initialized too early.
662 */
663 static MPI_Op op = []() {
664 MPI_Op op;
665
666 int ierr =
667 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
668 true,
669 &op);
670 AssertThrowMPI(ierr);
671
672 /* Ensure that we free the allocated op again at the end of the
673 * program run just before we call MPI_Finalize():*/
674 MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
675 int ierr = MPI_Op_free(&op);
676 AssertThrowMPI(ierr);
677 });
678
679 return op;
680 }();
681
682 AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
683 Utilities::MPI::max(my_values.size(), mpi_communicator));
684
685 AssertDimension(my_values.size(), result.size());
686
687 // To avoid uninitialized values on some MPI implementations, provide
688 // result with a default value already...
689 MinMaxAvg dummy = {0.,
692 0,
693 0,
694 0.};
695
696 for (auto &i : result)
697 i = dummy;
698
699 const unsigned int my_id =
700 ::Utilities::MPI::this_mpi_process(mpi_communicator);
701 const unsigned int numproc =
702 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
703
704 std::vector<MinMaxAvg> in(my_values.size());
705
706 for (unsigned int i = 0; i < my_values.size(); i++)
707 {
708 in[i].sum = in[i].min = in[i].max = my_values[i];
709 in[i].min_index = in[i].max_index = my_id;
710 }
711
712 int ierr = MPI_Allreduce(
713 in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
714 AssertThrowMPI(ierr);
715
716 for (auto &r : result)
717 r.avg = r.sum / numproc;
718 }
719
720
721#else
722
723 unsigned int
725 {
726 return 1;
727 }
728
729
730
731 unsigned int
733 {
734 return 0;
735 }
736
737
738
739 const std::vector<unsigned int>
741 {
742 return std::vector<unsigned int>{0};
743 }
744
745
746
747 std::vector<IndexSet>
749 const IndexSet::size_type locally_owned_size)
750 {
751 return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
752 }
753
756 const IndexSet::size_type total_size)
757 {
758 return complete_index_set(total_size);
759 }
760
761
762
764 duplicate_communicator(const MPI_Comm &mpi_communicator)
765 {
766 return mpi_communicator;
767 }
768
769
770
771 void
772 free_communicator(MPI_Comm & /*mpi_communicator*/)
773 {}
774
775
776
777 void
778 min_max_avg(const ArrayView<const double> &my_values,
779 const ArrayView<MinMaxAvg> & result,
780 const MPI_Comm &)
781 {
782 AssertDimension(my_values.size(), result.size());
783
784 for (unsigned int i = 0; i < my_values.size(); i++)
785 {
786 result[i].sum = my_values[i];
787 result[i].avg = my_values[i];
788 result[i].min = my_values[i];
789 result[i].max = my_values[i];
790 result[i].min_index = 0;
791 result[i].max_index = 0;
792 }
793 }
794
795#endif
796
797 /* Force initialization of static struct: */
798 MPI_InitFinalize::Signals MPI_InitFinalize::signals =
799 MPI_InitFinalize::Signals();
800
801
803 char **& argv,
804 const unsigned int max_num_threads)
805 {
806 static bool constructor_has_already_run = false;
807 (void)constructor_has_already_run;
808 Assert(constructor_has_already_run == false,
809 ExcMessage("You can only create a single object of this class "
810 "in a program since it initializes the MPI system."));
811
812
813 int ierr = 0;
814#ifdef DEAL_II_WITH_MPI
815 // if we have PETSc, we will initialize it and let it handle MPI.
816 // Otherwise, we will do it.
817 int MPI_has_been_started = 0;
818 ierr = MPI_Initialized(&MPI_has_been_started);
819 AssertThrowMPI(ierr);
820 AssertThrow(MPI_has_been_started == 0,
821 ExcMessage("MPI error. You can only start MPI once!"));
822
823 int provided;
824 // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
825 // we might use several threads but never call two MPI functions at the
826 // same time. For an explanation see on why we do this see
827 // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
828 int wanted = MPI_THREAD_SERIALIZED;
829 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
830 AssertThrowMPI(ierr);
831
832 // disable for now because at least some implementations always return
833 // MPI_THREAD_SINGLE.
834 // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
835 // ExcMessage("MPI reports that we are not allowed to use multiple
836 // threads."));
837#else
838 // make sure the compiler doesn't warn about these variables
839 (void)argc;
840 (void)argv;
841 (void)ierr;
842#endif
843
844 // we are allowed to call MPI_Init ourselves and PETScInitialize will
845 // detect this. This allows us to use MPI_Init_thread instead.
846#ifdef DEAL_II_WITH_PETSC
847# ifdef DEAL_II_WITH_SLEPC
848 // Initialize SLEPc (with PETSc):
849 ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
851# else
852 // or just initialize PETSc alone:
853 ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
854 AssertThrow(ierr == 0, ExcPETScError(ierr));
855# endif
856
857 // Disable PETSc exception handling. This just prints a large wall
858 // of text that is not particularly helpful for what we do:
859 PetscPopSignalHandler();
860#endif
861
862 // Initialize zoltan
863#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
864 float version;
865 Zoltan_Initialize(argc, argv, &version);
866#endif
867
868#ifdef DEAL_II_WITH_P4EST
869 // Initialize p4est and libsc components
870# if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
871# else
872 // This feature is broken in version 2.0.0 for calls to
873 // MPI_Comm_create_group (see cburstedde/p4est#30).
874 // Disabling it leads to more verbose p4est error messages
875 // which should be fine.
876 sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
877# endif
878 p4est_init(nullptr, SC_LP_SILENT);
879#endif
880
881 constructor_has_already_run = true;
882
883
884 // Now also see how many threads we'd like to run
885 if (max_num_threads != numbers::invalid_unsigned_int)
886 {
887 // set maximum number of threads (also respecting the environment
888 // variable that the called function evaluates) based on what the
889 // user asked
890 MultithreadInfo::set_thread_limit(max_num_threads);
891 }
892 else
893 // user wants automatic choice
894 {
895#ifdef DEAL_II_WITH_MPI
896 // we need to figure out how many MPI processes there are on the
897 // current node, as well as how many CPU cores we have. for the
898 // first task, check what get_hostname() returns and then do an
899 // allgather so each processor gets the answer
900 //
901 // in calculating the length of the string, don't forget the
902 // terminating \0 on C-style strings
903 const std::string hostname = Utilities::System::get_hostname();
904 const unsigned int max_hostname_size =
905 Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
906 std::vector<char> hostname_array(max_hostname_size);
907 std::copy(hostname.c_str(),
908 hostname.c_str() + hostname.size() + 1,
909 hostname_array.begin());
910
911 std::vector<char> all_hostnames(max_hostname_size *
912 MPI::n_mpi_processes(MPI_COMM_WORLD));
913 const int ierr = MPI_Allgather(hostname_array.data(),
914 max_hostname_size,
915 MPI_CHAR,
916 all_hostnames.data(),
917 max_hostname_size,
918 MPI_CHAR,
919 MPI_COMM_WORLD);
920 AssertThrowMPI(ierr);
921
922 // search how often our own hostname appears and the how-manyth
923 // instance the current process represents
924 unsigned int n_local_processes = 0;
925 unsigned int nth_process_on_host = 0;
926 for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
927 ++i)
928 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
929 hostname)
930 {
931 ++n_local_processes;
932 if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
933 ++nth_process_on_host;
934 }
935 Assert(nth_process_on_host > 0, ExcInternalError());
936
937
938 // compute how many cores each process gets. if the number does not
939 // divide evenly, then we get one more core if we are among the
940 // first few processes
941 //
942 // if the number would be zero, round up to one since every process
943 // needs to have at least one thread
944 const unsigned int n_threads =
945 std::max(MultithreadInfo::n_cores() / n_local_processes +
946 (nth_process_on_host <=
947 MultithreadInfo::n_cores() % n_local_processes ?
948 1 :
949 0),
950 1U);
951#else
952 const unsigned int n_threads = MultithreadInfo::n_cores();
953#endif
954
955 // finally set this number of threads
957 }
958
959 // As a final step call the at_mpi_init() signal handler.
961 }
962
963
964
965 void
967 {
968 // insert if it is not in the set already:
969 requests.insert(&request);
970 }
971
972
973
974 void
976 {
977 Assert(
978 requests.find(&request) != requests.end(),
980 "You tried to call unregister_request() with an invalid request."));
981
982 requests.erase(&request);
983 }
984
985
986
987 std::set<MPI_Request *> MPI_InitFinalize::requests;
988
989
990
992 {
993 // First, call the at_mpi_finalize() signal handler.
995
996 // make memory pool release all PETSc/Trilinos/MPI-based vectors that
997 // are no longer used at this point. this is relevant because the static
998 // object destructors run for these vectors at the end of the program
999 // would run after MPI_Finalize is called, leading to errors
1000
1001#ifdef DEAL_II_WITH_MPI
1002 // Before exiting, wait for nonblocking communication to complete:
1003 for (auto request : requests)
1004 {
1005 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
1006 AssertThrowMPI(ierr);
1007 }
1008
1009 // Start with deal.II MPI vectors and delete vectors from the pools:
1011 LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
1013 release_unused_memory();
1015 LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
1017 release_unused_memory();
1018
1019 // Next with Trilinos:
1020# if defined(DEAL_II_WITH_TRILINOS)
1022 TrilinosWrappers::MPI::Vector>::release_unused_memory();
1024 TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
1025# endif
1026#endif
1027
1028
1029 // Now deal with PETSc (with or without MPI). Only delete the vectors if
1030 // finalize hasn't been called yet, otherwise this will lead to errors.
1031#ifdef DEAL_II_WITH_PETSC
1032 if ((PetscInitializeCalled == PETSC_TRUE) &&
1033 (PetscFinalizeCalled == PETSC_FALSE))
1034 {
1036 PETScWrappers::MPI::Vector>::release_unused_memory();
1038 PETScWrappers::MPI::BlockVector>::release_unused_memory();
1039
1040# ifdef DEAL_II_WITH_SLEPC
1041 // and now end SLEPc (with PETSc)
1042 SlepcFinalize();
1043# else
1044 // or just end PETSc.
1045 PetscFinalize();
1046# endif
1047 }
1048#endif
1049
1050// There is a similar issue with CUDA: The destructor of static objects might
1051// run after the CUDA driver is unloaded. Hence, also release all memory
1052// related to CUDA vectors.
1053#ifdef DEAL_II_WITH_CUDA
1056 release_unused_memory();
1059 release_unused_memory();
1060#endif
1061
1062#ifdef DEAL_II_WITH_P4EST
1063 // now end p4est and libsc
1064 // Note: p4est has no finalize function
1065 sc_finalize();
1066#endif
1067
1068
1069 // only MPI_Finalize if we are running with MPI. We also need to do this
1070 // when running PETSc, because we initialize MPI ourselves before
1071 // calling PetscInitialize
1072#ifdef DEAL_II_WITH_MPI
1073 if (job_supports_mpi() == true)
1074 {
1075# if __cpp_lib_uncaught_exceptions >= 201411
1076 // std::uncaught_exception() is deprecated in c++17
1077 if (std::uncaught_exceptions() > 0)
1078# else
1079 if (std::uncaught_exception() == true)
1080# endif
1081 {
1082 // do not try to call MPI_Finalize to avoid a deadlock.
1083 }
1084 else
1085 {
1086 const int ierr = MPI_Finalize();
1087 (void)ierr;
1088 AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1089 }
1090 }
1091#endif
1092 }
1093
1094
1095
1096 bool
1098 {
1099#ifdef DEAL_II_WITH_MPI
1100 int MPI_has_been_started = 0;
1101 const int ierr = MPI_Initialized(&MPI_has_been_started);
1102 AssertThrowMPI(ierr);
1103
1104 return (MPI_has_been_started > 0);
1105#else
1106 return false;
1107#endif
1108 }
1109
1110
1111
1112 std::vector<unsigned int>
1113 compute_index_owner(const IndexSet &owned_indices,
1114 const IndexSet &indices_to_look_up,
1115 const MPI_Comm &comm)
1116 {
1117 Assert(owned_indices.size() == indices_to_look_up.size(),
1118 ExcMessage("IndexSets have to have the same sizes."));
1119
1120 Assert(
1121 owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1122 ExcMessage("IndexSets have to have the same size on all processes."));
1123
1124 std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1125
1126 // Step 1: setup dictionary
1127 // The input owned_indices can be partitioned arbitrarily. In the
1128 // dictionary, the index set is statically repartitioned among the
1129 // processes again and extended with information with the actual owner
1130 // of that the index.
1132 owned_indices, indices_to_look_up, comm, owning_ranks);
1133
1134 // Step 2: read dictionary
1135 // Communicate with the process who owns the index in the static
1136 // partition (i.e. in the dictionary). This process returns the actual
1137 // owner of the index.
1139 std::pair<types::global_dof_index, types::global_dof_index>,
1140 unsigned int>
1141 consensus_algorithm(process, comm);
1142 consensus_algorithm.run();
1143
1144 return owning_ranks;
1145 }
1146
1147
1148
1150 : locked(false)
1151 , request(MPI_REQUEST_NULL)
1152 {
1154 }
1155
1156
1157
1159 {
1160 Assert(
1161 !locked,
1162 ExcMessage(
1163 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1164
1166 }
1167
1168
1169
1170 void
1172 {
1173 (void)comm;
1174
1175 Assert(
1176 !locked,
1177 ExcMessage(
1178 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1179
1180#ifdef DEAL_II_WITH_MPI
1181
1182 // TODO: For now, we implement this mutex with a blocking barrier
1183 // in the lock and unlock. It needs to be tested, if we can move
1184 // to a nonblocking barrier (code disabled below).
1185
1186 const int ierr = MPI_Barrier(comm);
1187 AssertThrowMPI(ierr);
1188
1189# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1190 // wait for non-blocking barrier to finish. This is a noop the
1191 // first time we lock().
1192 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1193 AssertThrowMPI(ierr);
1194# else
1195 // nothing to do as blocking barrier already completed
1196# endif
1197#endif
1198
1199 locked = true;
1200 }
1201
1202
1203
1204 void
1206 {
1207 (void)comm;
1208
1209 Assert(
1210 locked,
1211 ExcMessage(
1212 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1213
1214#ifdef DEAL_II_WITH_MPI
1215
1216 // TODO: For now, we implement this mutex with a blocking barrier
1217 // in the lock and unlock. It needs to be tested, if we can move
1218 // to a nonblocking barrier (code disabled below):
1219
1220# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1221 const int ierr = MPI_Ibarrier(comm, &request);
1222 AssertThrowMPI(ierr);
1223# else
1224 const int ierr = MPI_Barrier(comm);
1225 AssertThrowMPI(ierr);
1226# endif
1227#endif
1228
1229 locked = false;
1230 }
1231
1232
1233#ifndef DOXYGEN
1234 // explicit instantiations
1235 template bool
1236 logical_or<bool>(const bool &, const MPI_Comm &);
1237
1238
1239 template void
1240 logical_or<bool>(const ArrayView<const bool> &,
1241 const MPI_Comm &,
1242 const ArrayView<bool> &);
1243
1244
1245 template std::vector<unsigned int>
1246 compute_set_union(const std::vector<unsigned int> &vec,
1247 const MPI_Comm & comm);
1248
1249
1250 template std::set<unsigned int>
1251 compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1252#endif
1253
1254#include "mpi.inst"
1255 } // end of namespace MPI
1256} // end of namespace Utilities
1257
value_type * data() const noexcept
Definition: array_view.h:551
std::size_t size() const
Definition: array_view.h:574
size_type size() const
Definition: index_set.h:1634
size_type n_elements() const
Definition: index_set.h:1832
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1673
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1205
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1171
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:314
ConsensusAlgorithmsProcessTargets(const std::vector< unsigned int > &target)
Definition: mpi.cc:306
std::vector< unsigned int > get_result()
Definition: mpi.cc:338
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:327
std::vector< unsigned int > sources
Definition: mpi.cc:353
const std::vector< unsigned int > & target
Definition: mpi.cc:348
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:975
static std::set< MPI_Request * > requests
Definition: mpi.h:1026
static Signals signals
Definition: mpi.h:1020
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:802
static void register_request(MPI_Request &request)
Definition: mpi.cc:966
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:402
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:403
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1465
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1622
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1746
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1528
#define AssertIndexRange(index, range)
Definition: exceptions.h:1690
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1575
int gid(const Epetra_BlockMap &map, int i)
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1013
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:171
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1113
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:497
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
Definition: mpi.cc:263
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:285
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:359
T min(const T &t, const MPI_Comm &mpi_communicator)
bool job_supports_mpi()
Definition: mpi.cc:1097
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:160
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:140
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
T max(const T &t, const MPI_Comm &mpi_communicator)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:181
std::string get_hostname()
Definition: utilities.cc:1004
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:196
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
Definition: types.h:32
****code ** MPI_Finalize()
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1009
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1017