Reference documentation for deal.II version GIT a189bc2bdf 2022-12-07 02:45:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
23 #include <deal.II/base/mpi_tags.h>
25 #include <deal.II/base/utilities.h>
26 
30 
31 #include <boost/serialization/utility.hpp>
32 
33 #include <iostream>
34 #include <limits>
35 #include <numeric>
36 #include <set>
37 #include <vector>
38 
39 #ifdef DEAL_II_WITH_TRILINOS
40 # ifdef DEAL_II_WITH_MPI
43 
44 # include <Epetra_MpiComm.h>
45 # endif
46 #endif
47 
48 #ifdef DEAL_II_WITH_PETSC
51 
52 # include <petscsys.h>
53 #endif
54 
55 #ifdef DEAL_II_WITH_SLEPC
57 
58 # include <slepcsys.h>
59 #endif
60 
61 #ifdef DEAL_II_WITH_P4EST
62 # include <p4est_bits.h>
63 #endif
64 
65 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
66 # include <zoltan_cpp.h>
67 #endif
68 
70 
71 
72 namespace Utilities
73 {
74  IndexSet
76  const unsigned int my_partition_id,
77  const unsigned int n_partitions,
78  const types::global_dof_index total_size)
79  {
80  static_assert(
81  std::is_same<types::global_dof_index, IndexSet::size_type>::value,
82  "IndexSet::size_type must match types::global_dof_index for "
83  "using this function");
84  const unsigned int remain = total_size % n_partitions;
85 
86  const IndexSet::size_type min_size = total_size / n_partitions;
87 
89  min_size * my_partition_id + std::min(my_partition_id, remain);
90  const IndexSet::size_type end =
91  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
92  IndexSet result(total_size);
93  result.add_range(begin, end);
94  return result;
95  }
96 
97  namespace MPI
98  {
99 #ifdef DEAL_II_WITH_MPI
100  // Provide definitions of template variables for all valid instantiations.
101  template const MPI_Datatype mpi_type_id_for_type<bool>;
102  template const MPI_Datatype mpi_type_id_for_type<char>;
103  template const MPI_Datatype mpi_type_id_for_type<signed char>;
104  template const MPI_Datatype mpi_type_id_for_type<short>;
105  template const MPI_Datatype mpi_type_id_for_type<int>;
106  template const MPI_Datatype mpi_type_id_for_type<long int>;
107  template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
108  template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
109  template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
110  template const MPI_Datatype mpi_type_id_for_type<unsigned long long int>;
111  template const MPI_Datatype mpi_type_id_for_type<float>;
112  template const MPI_Datatype mpi_type_id_for_type<double>;
113  template const MPI_Datatype mpi_type_id_for_type<long double>;
114  template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
115  template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
116 #endif
117 
118 
119  MinMaxAvg
120  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
121  {
122  MinMaxAvg result;
124  ArrayView<MinMaxAvg>(result),
125  mpi_communicator);
126 
127  return result;
128  }
129 
130 
131 
132  std::vector<MinMaxAvg>
133  min_max_avg(const std::vector<double> &my_values,
134  const MPI_Comm & mpi_communicator)
135  {
136  std::vector<MinMaxAvg> results(my_values.size());
137  min_max_avg(my_values, results, mpi_communicator);
138 
139  return results;
140  }
141 
142 
143 
144 #ifdef DEAL_II_WITH_MPI
145  unsigned int
146  n_mpi_processes(const MPI_Comm &mpi_communicator)
147  {
148  int n_jobs = 1;
149  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
150  AssertThrowMPI(ierr);
151 
152  return n_jobs;
153  }
154 
155 
156  unsigned int
157  this_mpi_process(const MPI_Comm &mpi_communicator)
158  {
159  int rank = 0;
160  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
161  AssertThrowMPI(ierr);
162 
163  return rank;
164  }
165 
166 
167 
168  const std::vector<unsigned int>
169  mpi_processes_within_communicator(const MPI_Comm &comm_large,
170  const MPI_Comm &comm_small)
171  {
172  if (Utilities::MPI::job_supports_mpi() == false)
173  return std::vector<unsigned int>{0};
174 
175  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
176  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
177 
178  std::vector<unsigned int> ranks(size);
179  const int ierr = MPI_Allgather(
180  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
181  AssertThrowMPI(ierr);
182 
183  return ranks;
184  }
185 
186 
187 
188  MPI_Comm
189  duplicate_communicator(const MPI_Comm &mpi_communicator)
190  {
191  MPI_Comm new_communicator;
192  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
193  AssertThrowMPI(ierr);
194  return new_communicator;
195  }
196 
197 
198 
199  void
200  free_communicator(MPI_Comm &mpi_communicator)
201  {
202  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
203  const int ierr = MPI_Comm_free(&mpi_communicator);
204  AssertThrowMPI(ierr);
205  }
206 
207 
208 
209  int
210  create_group(const MPI_Comm & comm,
211  const MPI_Group &group,
212  const int tag,
213  MPI_Comm * new_comm)
214  {
215  const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
216  AssertThrowMPI(ierr);
217  return ierr;
218  }
219 
220 
221 
222  std::vector<IndexSet>
224  const MPI_Comm & comm,
225  const types::global_dof_index locally_owned_size)
226  {
227  static_assert(
228  std::is_same<types::global_dof_index, IndexSet::size_type>::value,
229  "IndexSet::size_type must match types::global_dof_index for "
230  "using this function");
231  const unsigned int n_proc = n_mpi_processes(comm);
232  const std::vector<IndexSet::size_type> sizes =
233  all_gather(comm, locally_owned_size);
234  const auto total_size =
235  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
236 
237  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
238 
240  for (unsigned int i = 0; i < n_proc; ++i)
241  {
242  res[i].add_range(begin, begin + sizes[i]);
243  begin = begin + sizes[i];
244  }
245 
246  return res;
247  }
248 
249 
250 
251  IndexSet
253  const MPI_Comm & comm,
254  const types::global_dof_index total_size)
255  {
256  const unsigned int this_proc = this_mpi_process(comm);
257  const unsigned int n_proc = n_mpi_processes(comm);
258 
260  n_proc,
261  total_size);
262  }
263 
264 
265 
266  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
267  create_mpi_data_type_n_bytes(const std::size_t n_bytes)
268  {
269  MPI_Datatype result;
270  int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
271  AssertThrowMPI(ierr);
272  ierr = MPI_Type_commit(&result);
273  AssertThrowMPI(ierr);
274 
275 # ifdef DEBUG
276  MPI_Count size64;
277  ierr = MPI_Type_size_x(result, &size64);
278  AssertThrowMPI(ierr);
279 
280  Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
281 # endif
282 
283  // Now put the new data type into a std::unique_ptr with a custom
284  // deleter. We call the std::unique_ptr constructor that as first
285  // argument takes a pointer (here, a pointer to a copy of the `result`
286  // object, and as second argument a pointer-to-function, for which
287  // we here use a lambda function without captures that acts as the
288  // 'deleter' object: it calls `MPI_Type_free` and then deletes the
289  // pointer. To avoid a compiler warning about a null this pointer
290  // in the lambda (which don't make sense: the lambda doesn't store
291  // anything), we create the deleter first.
292  auto deleter = [](MPI_Datatype *p) {
293  if (p != nullptr)
294  {
295  const int ierr = MPI_Type_free(p);
296  (void)ierr;
297  AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
298 
299  delete p;
300  }
301  };
302 
303  return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
304  new MPI_Datatype(result), deleter);
305  }
306 
307 
308 
309  std::vector<unsigned int>
311  const MPI_Comm & mpi_comm,
312  const std::vector<unsigned int> &destinations)
313  {
314  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
315  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
316  (void)myid;
317  (void)n_procs;
318 
319  for (const unsigned int destination : destinations)
320  {
321  (void)destination;
322  AssertIndexRange(destination, n_procs);
323  }
324 
325 
326  // Have a little function that checks if destinations provided
327  // to the current process are unique. The way it does this is
328  // to create a sorted list of destinations and then walk through
329  // the list and look at successive elements -- if we find the
330  // same number twice, we know that the destinations were not
331  // unique
332  const bool my_destinations_are_unique = [destinations]() {
333  if (destinations.size() == 0)
334  return true;
335  else
336  {
337  std::vector<unsigned int> my_destinations = destinations;
338  std::sort(my_destinations.begin(), my_destinations.end());
339  return (std::adjacent_find(my_destinations.begin(),
340  my_destinations.end()) ==
341  my_destinations.end());
342  }
343  }();
344 
345  // If all processes report that they have unique destinations,
346  // then we can short-cut the process using a consensus algorithm (which
347  // is implemented only for the case of unique destinations):
348  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
349  1)
350  {
351  return ConsensusAlgorithms::nbx<char, char>(
352  destinations, {}, {}, {}, mpi_comm);
353  }
354 
355  // So we need to run a different algorithm, specifically one that
356  // requires more memory -- MPI_Reduce_scatter_block will require memory
357  // proportional to the number of processes involved; that function is
358  // available for MPI 2.2 or later:
359  static CollectiveMutex mutex;
360  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
361 
362  const int mpi_tag =
364 
365  // Calculate the number of messages to send to each process
366  std::vector<unsigned int> dest_vector(n_procs);
367  for (const auto &el : destinations)
368  ++dest_vector[el];
369 
370  // Find how many processes will send to this one
371  // by reducing with sum and then scattering the
372  // results over all processes
373  unsigned int n_recv_from;
374  const int ierr = MPI_Reduce_scatter_block(
375  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
376 
377  AssertThrowMPI(ierr);
378 
379  // Send myid to every process in `destinations` vector...
380  std::vector<MPI_Request> send_requests(destinations.size());
381  for (const auto &el : destinations)
382  {
383  const int ierr =
384  MPI_Isend(&myid,
385  1,
386  MPI_UNSIGNED,
387  el,
388  mpi_tag,
389  mpi_comm,
390  send_requests.data() + (&el - destinations.data()));
391  AssertThrowMPI(ierr);
392  }
393 
394 
395  // Receive `n_recv_from` times from the processes
396  // who communicate with this one. Store the obtained id's
397  // in the resulting vector
398  std::vector<unsigned int> origins(n_recv_from);
399  for (auto &el : origins)
400  {
401  const int ierr = MPI_Recv(&el,
402  1,
403  MPI_UNSIGNED,
404  MPI_ANY_SOURCE,
405  mpi_tag,
406  mpi_comm,
407  MPI_STATUS_IGNORE);
408  AssertThrowMPI(ierr);
409  }
410 
411  if (destinations.size() > 0)
412  {
413  const int ierr = MPI_Waitall(destinations.size(),
414  send_requests.data(),
415  MPI_STATUSES_IGNORE);
416  AssertThrowMPI(ierr);
417  }
418 
419  return origins;
420  }
421 
422 
423 
424  unsigned int
426  const MPI_Comm & mpi_comm,
427  const std::vector<unsigned int> &destinations)
428  {
429  // Have a little function that checks if destinations provided
430  // to the current process are unique:
431  const bool my_destinations_are_unique = [destinations]() {
432  std::vector<unsigned int> my_destinations = destinations;
433  const unsigned int n_destinations = my_destinations.size();
434  std::sort(my_destinations.begin(), my_destinations.end());
435  my_destinations.erase(std::unique(my_destinations.begin(),
436  my_destinations.end()),
437  my_destinations.end());
438  return (my_destinations.size() == n_destinations);
439  }();
440 
441  // If all processes report that they have unique destinations,
442  // then we can short-cut the process using a consensus algorithm:
443 
444  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
445  1)
446  {
447  return ConsensusAlgorithms::nbx<char, char>(
448  destinations, {}, {}, {}, mpi_comm)
449  .size();
450  }
451  else
452  {
453  const unsigned int n_procs =
455 
456  for (const unsigned int destination : destinations)
457  {
458  (void)destination;
459  AssertIndexRange(destination, n_procs);
460  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
461  ExcMessage(
462  "There is no point in communicating with ourselves."));
463  }
464 
465  // Calculate the number of messages to send to each process
466  std::vector<unsigned int> dest_vector(n_procs);
467  for (const auto &el : destinations)
468  ++dest_vector[el];
469 
470  // Find out how many processes will send to this one
471  // MPI_Reduce_scatter(_block) does exactly this
472  unsigned int n_recv_from = 0;
473 
474  const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
475  &n_recv_from,
476  1,
477  MPI_UNSIGNED,
478  MPI_SUM,
479  mpi_comm);
480 
481  AssertThrowMPI(ierr);
482 
483  return n_recv_from;
484  }
485  }
486 
487 
488 
489  namespace
490  {
491  // custom MIP_Op for calculate_collective_mpi_min_max_avg
492  void
493  max_reduce(const void *in_lhs_,
494  void * inout_rhs_,
495  int * len,
496  MPI_Datatype *)
497  {
498  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
499  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
500 
501  for (int i = 0; i < *len; ++i)
502  {
503  inout_rhs[i].sum += in_lhs[i].sum;
504  if (inout_rhs[i].min > in_lhs[i].min)
505  {
506  inout_rhs[i].min = in_lhs[i].min;
507  inout_rhs[i].min_index = in_lhs[i].min_index;
508  }
509  else if (inout_rhs[i].min == in_lhs[i].min)
510  {
511  // choose lower cpu index when tied to make operator commutative
512  if (inout_rhs[i].min_index > in_lhs[i].min_index)
513  inout_rhs[i].min_index = in_lhs[i].min_index;
514  }
515 
516  if (inout_rhs[i].max < in_lhs[i].max)
517  {
518  inout_rhs[i].max = in_lhs[i].max;
519  inout_rhs[i].max_index = in_lhs[i].max_index;
520  }
521  else if (inout_rhs[i].max == in_lhs[i].max)
522  {
523  // choose lower cpu index when tied to make operator commutative
524  if (inout_rhs[i].max_index > in_lhs[i].max_index)
525  inout_rhs[i].max_index = in_lhs[i].max_index;
526  }
527  }
528  }
529  } // namespace
530 
531 
532 
533  void
535  const ArrayView<MinMaxAvg> & result,
536  const MPI_Comm & mpi_communicator)
537  {
538  // If MPI was not started, we have a serial computation and cannot run
539  // the other MPI commands
540  if (job_supports_mpi() == false ||
541  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
542  {
543  for (unsigned int i = 0; i < my_values.size(); ++i)
544  {
545  result[i].sum = my_values[i];
546  result[i].avg = my_values[i];
547  result[i].min = my_values[i];
548  result[i].max = my_values[i];
549  result[i].min_index = 0;
550  result[i].max_index = 0;
551  }
552  return;
553  }
554 
555  /*
556  * A custom MPI datatype handle describing the memory layout of the
557  * MinMaxAvg struct. Initialized on first pass control reaches the
558  * static variable. So hopefully not initialized too early.
559  */
560  static MPI_Datatype type = []() {
561  MPI_Datatype type;
562 
563  int lengths[] = {3, 2, 1};
564 
565  MPI_Aint displacements[] = {0,
566  offsetof(MinMaxAvg, min_index),
567  offsetof(MinMaxAvg, avg)};
568 
569  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
570 
571  int ierr =
572  MPI_Type_create_struct(3, lengths, displacements, types, &type);
573  AssertThrowMPI(ierr);
574 
575  ierr = MPI_Type_commit(&type);
576  AssertThrowMPI(ierr);
577 
578  /* Ensure that we free the allocated datatype again at the end of
579  * the program run just before we call MPI_Finalize():*/
580  MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
581  int ierr = MPI_Type_free(&type);
582  AssertThrowMPI(ierr);
583  });
584 
585  return type;
586  }();
587 
588  /*
589  * A custom MPI op handle for our max_reduce function.
590  * Initialized on first pass control reaches the static variable. So
591  * hopefully not initialized too early.
592  */
593  static MPI_Op op = []() {
594  MPI_Op op;
595 
596  int ierr =
597  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
598  static_cast<int>(true),
599  &op);
600  AssertThrowMPI(ierr);
601 
602  /* Ensure that we free the allocated op again at the end of the
603  * program run just before we call MPI_Finalize():*/
604  MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
605  int ierr = MPI_Op_free(&op);
606  AssertThrowMPI(ierr);
607  });
608 
609  return op;
610  }();
611 
612  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
613  Utilities::MPI::max(my_values.size(), mpi_communicator));
614 
615  AssertDimension(my_values.size(), result.size());
616 
617  // To avoid uninitialized values on some MPI implementations, provide
618  // result with a default value already...
619  MinMaxAvg dummy = {0.,
621  std::numeric_limits<double>::lowest(),
622  0,
623  0,
624  0.};
625 
626  for (auto &i : result)
627  i = dummy;
628 
629  const unsigned int my_id =
630  ::Utilities::MPI::this_mpi_process(mpi_communicator);
631  const unsigned int numproc =
632  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
633 
634  std::vector<MinMaxAvg> in(my_values.size());
635 
636  for (unsigned int i = 0; i < my_values.size(); ++i)
637  {
638  in[i].sum = in[i].min = in[i].max = my_values[i];
639  in[i].min_index = in[i].max_index = my_id;
640  }
641 
642  int ierr = MPI_Allreduce(
643  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
644  AssertThrowMPI(ierr);
645 
646  for (auto &r : result)
647  r.avg = r.sum / numproc;
648  }
649 
650 
651 #else
652 
653  unsigned int
654  n_mpi_processes(const MPI_Comm &)
655  {
656  return 1;
657  }
658 
659 
660 
661  unsigned int
662  this_mpi_process(const MPI_Comm &)
663  {
664  return 0;
665  }
666 
667 
668 
669  const std::vector<unsigned int>
670  mpi_processes_within_communicator(const MPI_Comm &, const MPI_Comm &)
671  {
672  return std::vector<unsigned int>{0};
673  }
674 
675 
676 
677  std::vector<IndexSet>
679  const MPI_Comm & /*comm*/,
680  const types::global_dof_index locally_owned_size)
681  {
682  return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
683  }
684 
685  IndexSet
687  const MPI_Comm & /*comm*/,
688  const types::global_dof_index total_size)
689  {
690  return complete_index_set(total_size);
691  }
692 
693 
694 
695  MPI_Comm
696  duplicate_communicator(const MPI_Comm &mpi_communicator)
697  {
698  return mpi_communicator;
699  }
700 
701 
702 
703  void
704  free_communicator(MPI_Comm & /*mpi_communicator*/)
705  {}
706 
707 
708 
709  void
710  min_max_avg(const ArrayView<const double> &my_values,
711  const ArrayView<MinMaxAvg> & result,
712  const MPI_Comm &)
713  {
714  AssertDimension(my_values.size(), result.size());
715 
716  for (unsigned int i = 0; i < my_values.size(); ++i)
717  {
718  result[i].sum = my_values[i];
719  result[i].avg = my_values[i];
720  result[i].min = my_values[i];
721  result[i].max = my_values[i];
722  result[i].min_index = 0;
723  result[i].max_index = 0;
724  }
725  }
726 
727 #endif
728 
729  /* Force initialization of static struct: */
730  MPI_InitFinalize::Signals MPI_InitFinalize::signals =
731  MPI_InitFinalize::Signals();
732 
733 
735  char **& argv,
736  const unsigned int max_num_threads)
737  {
738  static bool constructor_has_already_run = false;
739  (void)constructor_has_already_run;
740  Assert(constructor_has_already_run == false,
741  ExcMessage("You can only create a single object of this class "
742  "in a program since it initializes the MPI system."));
743 
744 
745  int ierr = 0;
746 #ifdef DEAL_II_WITH_MPI
747  // if we have PETSc, we will initialize it and let it handle MPI.
748  // Otherwise, we will do it.
749  int MPI_has_been_started = 0;
750  ierr = MPI_Initialized(&MPI_has_been_started);
751  AssertThrowMPI(ierr);
752  AssertThrow(MPI_has_been_started == 0,
753  ExcMessage("MPI error. You can only start MPI once!"));
754 
755  int provided;
756  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
757  // we might use several threads but never call two MPI functions at the
758  // same time. For an explanation see on why we do this see
759  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
760  int wanted = MPI_THREAD_SERIALIZED;
761  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
762  AssertThrowMPI(ierr);
763 
764  // disable for now because at least some implementations always return
765  // MPI_THREAD_SINGLE.
766  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
767  // ExcMessage("MPI reports that we are not allowed to use multiple
768  // threads."));
769 #else
770  // make sure the compiler doesn't warn about these variables
771  (void)argc;
772  (void)argv;
773  (void)ierr;
774 #endif
775 
776  // we are allowed to call MPI_Init ourselves and PETScInitialize will
777  // detect this. This allows us to use MPI_Init_thread instead.
778 #ifdef DEAL_II_WITH_PETSC
779 # ifdef DEAL_II_WITH_SLEPC
780  // Initialize SLEPc (with PETSc):
781  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
783 # else
784  // or just initialize PETSc alone:
785  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
786  AssertThrow(ierr == 0, ExcPETScError(ierr));
787 # endif
788 
789  // Disable PETSc exception handling. This just prints a large wall
790  // of text that is not particularly helpful for what we do:
791  PetscPopSignalHandler();
792 #endif
793 
794  // Initialize zoltan
795 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
796  float version;
797  Zoltan_Initialize(argc, argv, &version);
798 #endif
799 
800 #ifdef DEAL_II_WITH_P4EST
801  // Initialize p4est and libsc components
802 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
803  // This feature is broken in version 2.0.0 for calls to
804  // MPI_Comm_create_group (see cburstedde/p4est#30).
805  // Disabling it leads to more verbose p4est error messages
806  // which should be fine.
807  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
808 # endif
809  p4est_init(nullptr, SC_LP_SILENT);
810 #endif
811 
812  constructor_has_already_run = true;
813 
814 
815  // Now also see how many threads we'd like to run
816  if (max_num_threads != numbers::invalid_unsigned_int)
817  {
818  // set maximum number of threads (also respecting the environment
819  // variable that the called function evaluates) based on what the
820  // user asked
821  MultithreadInfo::set_thread_limit(max_num_threads);
822  }
823  else
824  // user wants automatic choice
825  {
826 #ifdef DEAL_II_WITH_MPI
827  // we need to figure out how many MPI processes there are on the
828  // current node, as well as how many CPU cores we have. for the
829  // first task, check what get_hostname() returns and then do an
830  // allgather so each processor gets the answer
831  //
832  // in calculating the length of the string, don't forget the
833  // terminating \0 on C-style strings
834  const std::string hostname = Utilities::System::get_hostname();
835  const unsigned int max_hostname_size =
836  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
837  std::vector<char> hostname_array(max_hostname_size);
838  std::copy(hostname.c_str(),
839  hostname.c_str() + hostname.size() + 1,
840  hostname_array.begin());
841 
842  std::vector<char> all_hostnames(max_hostname_size *
843  MPI::n_mpi_processes(MPI_COMM_WORLD));
844  const int ierr = MPI_Allgather(hostname_array.data(),
845  max_hostname_size,
846  MPI_CHAR,
847  all_hostnames.data(),
848  max_hostname_size,
849  MPI_CHAR,
850  MPI_COMM_WORLD);
851  AssertThrowMPI(ierr);
852 
853  // search how often our own hostname appears and the how-manyth
854  // instance the current process represents
855  unsigned int n_local_processes = 0;
856  unsigned int nth_process_on_host = 0;
857  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
858  ++i)
859  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
860  hostname)
861  {
862  ++n_local_processes;
863  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
864  ++nth_process_on_host;
865  }
866  Assert(nth_process_on_host > 0, ExcInternalError());
867 
868 
869  // compute how many cores each process gets. if the number does not
870  // divide evenly, then we get one more core if we are among the
871  // first few processes
872  //
873  // if the number would be zero, round up to one since every process
874  // needs to have at least one thread
875  const unsigned int n_threads =
876  std::max(MultithreadInfo::n_cores() / n_local_processes +
877  (nth_process_on_host <=
878  MultithreadInfo::n_cores() % n_local_processes ?
879  1 :
880  0),
881  1U);
882 #else
883  const unsigned int n_threads = MultithreadInfo::n_cores();
884 #endif
885 
886  // finally set this number of threads
888  }
889 
890  // As a final step call the at_mpi_init() signal handler.
892  }
893 
894 
895 
896  void
898  {
899  // insert if it is not in the set already:
900  requests.insert(&request);
901  }
902 
903 
904 
905  void
907  {
908  Assert(
909  requests.find(&request) != requests.end(),
910  ExcMessage(
911  "You tried to call unregister_request() with an invalid request."));
912 
913  requests.erase(&request);
914  }
915 
916 
917 
918  std::set<MPI_Request *> MPI_InitFinalize::requests;
919 
920 
921 
923  {
924  // First, call the at_mpi_finalize() signal handler.
926 
927  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
928  // are no longer used at this point. this is relevant because the static
929  // object destructors run for these vectors at the end of the program
930  // would run after MPI_Finalize is called, leading to errors
931 
932 #ifdef DEAL_II_WITH_MPI
933  // Before exiting, wait for nonblocking communication to complete:
934  for (auto request : requests)
935  {
936  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
937  AssertThrowMPI(ierr);
938  }
939 
940  // Start with deal.II MPI vectors and delete vectors from the pools:
942  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
944  release_unused_memory();
946  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
948  release_unused_memory();
949 
950  // Next with Trilinos:
951 # ifdef DEAL_II_WITH_TRILINOS
953  TrilinosWrappers::MPI::Vector>::release_unused_memory();
955  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
956 # endif
957 #endif
958 
959 
960  // Now deal with PETSc (with or without MPI). Only delete the vectors if
961  // finalize hasn't been called yet, otherwise this will lead to errors.
962 #ifdef DEAL_II_WITH_PETSC
963  if ((PetscInitializeCalled == PETSC_TRUE) &&
964  (PetscFinalizeCalled == PETSC_FALSE))
965  {
967  PETScWrappers::MPI::Vector>::release_unused_memory();
969  PETScWrappers::MPI::BlockVector>::release_unused_memory();
970 
971 # ifdef DEAL_II_WITH_SLEPC
972  // and now end SLEPc (with PETSc)
973  SlepcFinalize();
974 # else
975  // or just end PETSc.
976  PetscFinalize();
977 # endif
978  }
979 #endif
980 
981 // There is a similar issue with CUDA: The destructor of static objects might
982 // run after the CUDA driver is unloaded. Hence, also release all memory
983 // related to CUDA vectors.
984 #ifdef DEAL_II_WITH_CUDA
987  release_unused_memory();
990  release_unused_memory();
991 #endif
992 
993 #ifdef DEAL_II_WITH_P4EST
994  // now end p4est and libsc
995  // Note: p4est has no finalize function
996  sc_finalize();
997 #endif
998 
999 
1000  // only MPI_Finalize if we are running with MPI. We also need to do this
1001  // when running PETSc, because we initialize MPI ourselves before
1002  // calling PetscInitialize
1003 #ifdef DEAL_II_WITH_MPI
1004  if (job_supports_mpi() == true)
1005  {
1006 # if __cpp_lib_uncaught_exceptions >= 201411
1007  // std::uncaught_exception() is deprecated in c++17
1008  if (std::uncaught_exceptions() > 0)
1009 # else
1010  if (std::uncaught_exception() == true)
1011 # endif
1012  {
1013  // do not try to call MPI_Finalize to avoid a deadlock.
1014  }
1015  else
1016  {
1017  const int ierr = MPI_Finalize();
1018  (void)ierr;
1019  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1020  }
1021  }
1022 #endif
1023  }
1024 
1025 
1026 
1027  bool
1029  {
1030 #ifdef DEAL_II_WITH_MPI
1031  int MPI_has_been_started = 0;
1032  const int ierr = MPI_Initialized(&MPI_has_been_started);
1033  AssertThrowMPI(ierr);
1034 
1035  return (MPI_has_been_started > 0);
1036 #else
1037  return false;
1038 #endif
1039  }
1040 
1041 
1042 
1043  std::vector<unsigned int>
1044  compute_index_owner(const IndexSet &owned_indices,
1045  const IndexSet &indices_to_look_up,
1046  const MPI_Comm &comm)
1047  {
1048  Assert(owned_indices.size() == indices_to_look_up.size(),
1049  ExcMessage("IndexSets have to have the same sizes."));
1050 
1051  Assert(
1052  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1053  ExcMessage("IndexSets have to have the same size on all processes."));
1054 
1055  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1056 
1057  // Step 1: setup dictionary
1058  // The input owned_indices can be partitioned arbitrarily. In the
1059  // dictionary, the index set is statically repartitioned among the
1060  // processes again and extended with information with the actual owner
1061  // of that the index.
1063  owned_indices, indices_to_look_up, comm, owning_ranks);
1064 
1065  // Step 2: read dictionary
1066  // Communicate with the process who owns the index in the static
1067  // partition (i.e. in the dictionary). This process returns the actual
1068  // owner of the index.
1070  std::vector<
1071  std::pair<types::global_dof_index, types::global_dof_index>>,
1072  std::vector<unsigned int>>
1073  consensus_algorithm(process, comm);
1074  consensus_algorithm.run();
1075 
1076  return owning_ranks;
1077  }
1078 
1079 
1080 
1081  namespace internal
1082  {
1083  namespace CollectiveMutexImplementation
1084  {
1089  void
1091  {
1092 #ifdef DEAL_II_WITH_MPI
1093 # if __cpp_lib_uncaught_exceptions >= 201411
1094  // std::uncaught_exception() is deprecated in c++17
1095  if (std::uncaught_exceptions() != 0)
1096 # else
1097  if (std::uncaught_exception() == true)
1098 # endif
1099  {
1100  std::cerr
1101  << "---------------------------------------------------------\n"
1102  << "An exception was thrown inside a section of the program\n"
1103  << "guarded by a CollectiveMutex.\n"
1104  << "Because a CollectiveMutex guards critical communication\n"
1105  << "handling the exception would likely\n"
1106  << "deadlock because only the current process is aware of the\n"
1107  << "exception. To prevent this deadlock, the program will be\n"
1108  << "aborted.\n"
1109  << "---------------------------------------------------------"
1110  << std::endl;
1111 
1112  MPI_Abort(MPI_COMM_WORLD, 1);
1113  }
1114 #endif
1115  }
1116  } // namespace CollectiveMutexImplementation
1117  } // namespace internal
1118 
1119 
1120 
1122  : locked(false)
1123  , request(MPI_REQUEST_NULL)
1124  {
1126  }
1127 
1128 
1129 
1131  {
1132  // First check if this destructor is called during exception handling
1133  // if so, abort.
1135 
1136  Assert(
1137  !locked,
1138  ExcMessage(
1139  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1140 
1142  }
1143 
1144 
1145 
1146  void
1147  CollectiveMutex::lock(const MPI_Comm &comm)
1148  {
1149  (void)comm;
1150 
1151  Assert(
1152  !locked,
1153  ExcMessage(
1154  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1155 
1156 #ifdef DEAL_II_WITH_MPI
1157 
1158  // TODO: For now, we implement this mutex with a blocking barrier
1159  // in the lock and unlock. It needs to be tested, if we can move
1160  // to a nonblocking barrier (code disabled below).
1161 
1162  const int ierr = MPI_Barrier(comm);
1163  AssertThrowMPI(ierr);
1164 
1165 # if 0
1166  // wait for non-blocking barrier to finish. This is a noop the
1167  // first time we lock().
1168  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1169  AssertThrowMPI(ierr);
1170 # else
1171  // nothing to do as blocking barrier already completed
1172 # endif
1173 #endif
1174 
1175  locked = true;
1176  }
1177 
1178 
1179 
1180  void
1182  {
1183  (void)comm;
1184 
1185  // First check if this function is called during exception handling
1186  // if so, abort. This can happen if a ScopedLock is destroyed.
1188 
1189  Assert(
1190  locked,
1191  ExcMessage(
1192  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1193 
1194 #ifdef DEAL_II_WITH_MPI
1195 
1196  // TODO: For now, we implement this mutex with a blocking barrier
1197  // in the lock and unlock. It needs to be tested, if we can move
1198  // to a nonblocking barrier (code disabled below):
1199 # if 0
1200  const int ierr = MPI_Ibarrier(comm, &request);
1201  AssertThrowMPI(ierr);
1202 # else
1203  const int ierr = MPI_Barrier(comm);
1204  AssertThrowMPI(ierr);
1205 # endif
1206 #endif
1207 
1208  locked = false;
1209  }
1210 
1211 
1212 #ifndef DOXYGEN
1213  // explicit instantiations
1214 
1215  // booleans aren't in MPI_SCALARS
1216  template bool
1217  reduce(const bool &,
1218  const MPI_Comm &,
1219  const std::function<bool(const bool &, const bool &)> &,
1220  const unsigned int);
1221 
1222  template std::vector<bool>
1223  reduce(const std::vector<bool> &,
1224  const MPI_Comm &,
1225  const std::function<std::vector<bool>(const std::vector<bool> &,
1226  const std::vector<bool> &)> &,
1227  const unsigned int);
1228 
1229  template bool
1230  all_reduce(const bool &,
1231  const MPI_Comm &,
1232  const std::function<bool(const bool &, const bool &)> &);
1233 
1234  template std::vector<bool>
1235  all_reduce(
1236  const std::vector<bool> &,
1237  const MPI_Comm &,
1238  const std::function<std::vector<bool>(const std::vector<bool> &,
1239  const std::vector<bool> &)> &);
1240 
1241  // We need an explicit instantiation of this for the same reason as the
1242  // other types described in mpi.inst.in
1243  template void
1244  internal::all_reduce<bool>(const MPI_Op &,
1245  const ArrayView<const bool> &,
1246  const MPI_Comm &,
1247  const ArrayView<bool> &);
1248 
1249 
1250  template bool
1251  logical_or<bool>(const bool &, const MPI_Comm &);
1252 
1253 
1254  template void
1255  logical_or<bool>(const ArrayView<const bool> &,
1256  const MPI_Comm &,
1257  const ArrayView<bool> &);
1258 
1259 
1260  template std::vector<unsigned int>
1261  compute_set_union(const std::vector<unsigned int> &vec,
1262  const MPI_Comm & comm);
1263 
1264 
1265  template std::set<unsigned int>
1266  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1267 #endif
1268 
1269 #include "mpi.inst"
1270  } // end of namespace MPI
1271 } // end of namespace Utilities
1272 
value_type * data() const noexcept
Definition: array_view.h:553
std::size_t size() const
Definition: array_view.h:576
size_type size() const
Definition: index_set.h:1626
size_type n_elements() const
Definition: index_set.h:1774
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1662
types::global_dof_index size_type
Definition: index_set.h:75
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1181
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1147
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:906
static std::set< MPI_Request * > requests
Definition: mpi.h:1207
static Signals signals
Definition: mpi.h:1201
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:734
static void register_request(MPI_Request &request)
Definition: mpi.cc:897
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:458
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:459
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1501
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1695
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1818
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1564
#define AssertIndexRange(index, range)
Definition: exceptions.h:1760
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1611
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1054
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
template const MPI_Datatype mpi_type_id_for_type< int >
Definition: mpi.cc:105
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:200
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1044
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
Definition: mpi.cc:107
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:267
template const MPI_Datatype mpi_type_id_for_type< signed char >
Definition: mpi.cc:103
template const MPI_Datatype mpi_type_id_for_type< bool >
Definition: mpi.cc:101
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:223
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
Definition: mpi.cc:108
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
Definition: mpi.cc:252
template const MPI_Datatype mpi_type_id_for_type< short >
Definition: mpi.cc:104
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:425
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
template const MPI_Datatype mpi_type_id_for_type< double >
Definition: mpi.cc:112
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:157
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:310
template const MPI_Datatype mpi_type_id_for_type< char >
Definition: mpi.cc:102
template const MPI_Datatype mpi_type_id_for_type< long double >
Definition: mpi.cc:113
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
Definition: mpi.cc:110
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
Definition: mpi.cc:109
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1028
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:189
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:169
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:146
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:120
template const MPI_Datatype mpi_type_id_for_type< long int >
Definition: mpi.cc:106
T max(const T &t, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< float >
Definition: mpi.cc:111
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:210
std::string get_hostname()
Definition: utilities.cc:1001
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:75
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:206
Definition: types.h:32
unsigned int global_dof_index
Definition: types.h:81
****code ** MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1198
const MPI_Comm & comm