Reference documentation for deal.II version GIT 8d72163873 2022-05-16 02:25:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2021 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
22 #include <deal.II/base/mpi_consensus_algorithms.templates.h>
23 #include <deal.II/base/mpi_tags.h>
25 #include <deal.II/base/utilities.h>
26 
30 
31 #include <boost/serialization/utility.hpp>
32 
33 #include <iostream>
34 #include <numeric>
35 #include <set>
36 #include <vector>
37 
38 #ifdef DEAL_II_WITH_TRILINOS
39 # ifdef DEAL_II_WITH_MPI
42 
43 # include <Epetra_MpiComm.h>
44 # endif
45 #endif
46 
47 #ifdef DEAL_II_WITH_PETSC
50 
51 # include <petscsys.h>
52 #endif
53 
54 #ifdef DEAL_II_WITH_SLEPC
56 
57 # include <slepcsys.h>
58 #endif
59 
60 #ifdef DEAL_II_WITH_P4EST
61 # include <p4est_bits.h>
62 #endif
63 
64 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
65 # include <zoltan_cpp.h>
66 #endif
67 
69 
70 
71 namespace Utilities
72 {
73  IndexSet
74  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
75  const unsigned int n_partitions,
76  const IndexSet::size_type total_size)
77  {
78  const unsigned int remain = total_size % n_partitions;
79 
80  const IndexSet::size_type min_size = total_size / n_partitions;
81 
83  min_size * my_partition_id + std::min(my_partition_id, remain);
84  const IndexSet::size_type end =
85  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
86  IndexSet result(total_size);
87  result.add_range(begin, end);
88  return result;
89  }
90 
91  namespace MPI
92  {
93 #ifdef DEAL_II_WITH_MPI
94  // Provide definitions of template variables for all valid instantiations.
95  template const MPI_Datatype mpi_type_id_for_type<bool>;
96  template const MPI_Datatype mpi_type_id_for_type<char>;
97  template const MPI_Datatype mpi_type_id_for_type<signed char>;
98  template const MPI_Datatype mpi_type_id_for_type<short>;
99  template const MPI_Datatype mpi_type_id_for_type<int>;
100  template const MPI_Datatype mpi_type_id_for_type<long int>;
101  template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
102  template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
103  template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
104  template const MPI_Datatype mpi_type_id_for_type<unsigned long long int>;
105  template const MPI_Datatype mpi_type_id_for_type<float>;
106  template const MPI_Datatype mpi_type_id_for_type<double>;
107  template const MPI_Datatype mpi_type_id_for_type<long double>;
108  template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
109  template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
110 #endif
111 
112 
113  MinMaxAvg
114  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
115  {
116  MinMaxAvg result;
118  ArrayView<MinMaxAvg>(result),
119  mpi_communicator);
120 
121  return result;
122  }
123 
124 
125 
126  std::vector<MinMaxAvg>
127  min_max_avg(const std::vector<double> &my_values,
128  const MPI_Comm & mpi_communicator)
129  {
130  std::vector<MinMaxAvg> results(my_values.size());
131  min_max_avg(my_values, results, mpi_communicator);
132 
133  return results;
134  }
135 
136 
137 
138 #ifdef DEAL_II_WITH_MPI
139  unsigned int
140  n_mpi_processes(const MPI_Comm &mpi_communicator)
141  {
142  int n_jobs = 1;
143  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
144  AssertThrowMPI(ierr);
145 
146  return n_jobs;
147  }
148 
149 
150  unsigned int
151  this_mpi_process(const MPI_Comm &mpi_communicator)
152  {
153  int rank = 0;
154  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
155  AssertThrowMPI(ierr);
156 
157  return rank;
158  }
159 
160 
161 
162  const std::vector<unsigned int>
163  mpi_processes_within_communicator(const MPI_Comm &comm_large,
164  const MPI_Comm &comm_small)
165  {
166  if (Utilities::MPI::job_supports_mpi() == false)
167  return std::vector<unsigned int>{0};
168 
169  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
170  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
171 
172  std::vector<unsigned int> ranks(size);
173  const int ierr = MPI_Allgather(
174  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
175  AssertThrowMPI(ierr);
176 
177  return ranks;
178  }
179 
180 
181 
182  MPI_Comm
183  duplicate_communicator(const MPI_Comm &mpi_communicator)
184  {
185  MPI_Comm new_communicator;
186  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
187  AssertThrowMPI(ierr);
188  return new_communicator;
189  }
190 
191 
192 
193  void
194  free_communicator(MPI_Comm &mpi_communicator)
195  {
196  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
197  const int ierr = MPI_Comm_free(&mpi_communicator);
198  AssertThrowMPI(ierr);
199  }
200 
201 
202 
203  int
204  create_group(const MPI_Comm & comm,
205  const MPI_Group &group,
206  const int tag,
207  MPI_Comm * new_comm)
208  {
209  const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
210  AssertThrowMPI(ierr);
211  return ierr;
212  }
213 
214 
215 
216  std::vector<IndexSet>
218  const IndexSet::size_type locally_owned_size)
219  {
220  const unsigned int n_proc = n_mpi_processes(comm);
221  const std::vector<IndexSet::size_type> sizes =
222  all_gather(comm, locally_owned_size);
223  const auto total_size =
224  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
225 
226  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
227 
229  for (unsigned int i = 0; i < n_proc; ++i)
230  {
231  res[i].add_range(begin, begin + sizes[i]);
232  begin = begin + sizes[i];
233  }
234 
235  return res;
236  }
237 
238 
239 
240  IndexSet
242  const IndexSet::size_type total_size)
243  {
244  const unsigned int this_proc = this_mpi_process(comm);
245  const unsigned int n_proc = n_mpi_processes(comm);
246 
248  n_proc,
249  total_size);
250  }
251 
252 
253 
254  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
255  create_mpi_data_type_n_bytes(const std::size_t n_bytes)
256  {
257  // Simplified version from BigMPI repository, see
258  // https://github.com/jeffhammond/BigMPI/blob/5300b18cc8ec1b2431bf269ee494054ee7bd9f72/src/type_contiguous_x.c#L74
259  // (code is MIT licensed)
260 
261  // We create an MPI datatype that has the layout A*n+B where A is
262  // max_signed_int bytes repeated n times and B is the remainder.
263 
264  const MPI_Count max_signed_int = std::numeric_limits<int>::max();
265 
266  const MPI_Count n_chunks = n_bytes / max_signed_int;
267  const MPI_Count n_bytes_remainder = n_bytes % max_signed_int;
268 
269  Assert(static_cast<std::size_t>(max_signed_int * n_chunks +
270  n_bytes_remainder) == n_bytes,
271  ExcInternalError());
272 
273  MPI_Datatype chunks;
274 
275  int ierr = MPI_Type_vector(
276  n_chunks, max_signed_int, max_signed_int, MPI_BYTE, &chunks);
277  AssertThrowMPI(ierr);
278 
279  MPI_Datatype remainder;
280  ierr = MPI_Type_contiguous(n_bytes_remainder, MPI_BYTE, &remainder);
281  AssertThrowMPI(ierr);
282 
283  const int blocklengths[2] = {1, 1};
284  const MPI_Aint displacements[2] = {0,
285  static_cast<MPI_Aint>(n_chunks) *
286  max_signed_int};
287 
288  // This fails if Aint happens to be 32 bits (maybe on some 32bit
289  // systems as it has type "long" which is usually 64bits) or the
290  // message is very, very big.
291  AssertThrow(
292  displacements[1] == n_chunks * max_signed_int,
293  ExcMessage(
294  "Error in create_mpi_data_type_n_bytes(): the size is too big to support."));
295 
296  MPI_Datatype result;
297 
298  const MPI_Datatype types[2] = {chunks, remainder};
299  ierr =
300  MPI_Type_create_struct(2, blocklengths, displacements, types, &result);
301  AssertThrowMPI(ierr);
302 
303  ierr = MPI_Type_commit(&result);
304  AssertThrowMPI(ierr);
305 
306  ierr = MPI_Type_free(&chunks);
307  AssertThrowMPI(ierr);
308  ierr = MPI_Type_free(&remainder);
309  AssertThrowMPI(ierr);
310 
311 # ifdef DEBUG
312  MPI_Count size64;
313  ierr = MPI_Type_size_x(result, &size64);
314  AssertThrowMPI(ierr);
315 
316  Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
317 # endif
318 
319  // Now put the new data type into a std::unique_ptr with a custom
320  // deleter. We call the std::unique_ptr constructor that as first
321  // argument takes a pointer (here, a pointer to a copy of the `result`
322  // object, and as second argument a pointer-to-function, for which
323  // we here use a lambda function without captures that acts as the
324  // 'deleter' object: it calls `MPI_Type_free` and then deletes the
325  // pointer. To avoid a compiler warning about a null this pointer
326  // in the lambda (which don't make sense: the lambda doesn't store
327  // anything), we create the deleter first.
328  auto deleter = [](MPI_Datatype *p) {
329  if (p != nullptr)
330  {
331  const int ierr = MPI_Type_free(p);
332  (void)ierr;
333  AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
334 
335  delete p;
336  }
337  };
338 
339  return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
340  new MPI_Datatype(result), deleter);
341  }
342 
343 
344 
345  std::vector<unsigned int>
347  const MPI_Comm & mpi_comm,
348  const std::vector<unsigned int> &destinations)
349  {
350  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
351  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
352  (void)myid;
353  (void)n_procs;
354 
355  for (const unsigned int destination : destinations)
356  {
357  (void)destination;
358  AssertIndexRange(destination, n_procs);
359  }
360 
361 
362  // Have a little function that checks if destinations provided
363  // to the current process are unique. The way it does this is
364  // to create a sorted list of destinations and then walk through
365  // the list and look at successive elements -- if we find the
366  // same number twice, we know that the destinations were not
367  // unique
368  const bool my_destinations_are_unique = [destinations]() {
369  if (destinations.size() == 0)
370  return true;
371  else
372  {
373  std::vector<unsigned int> my_destinations = destinations;
374  std::sort(my_destinations.begin(), my_destinations.end());
375  return (std::adjacent_find(my_destinations.begin(),
376  my_destinations.end()) ==
377  my_destinations.end());
378  }
379  }();
380 
381  // If all processes report that they have unique destinations,
382  // then we can short-cut the process using a consensus algorithm (which
383  // is implemented only for the case of unique destinations):
384  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
385  1)
386  {
387  return ConsensusAlgorithms::nbx<char, char>(
388  destinations, {}, {}, {}, mpi_comm);
389  }
390 
391  // So we need to run a different algorithm, specifically one that
392  // requires more memory -- MPI_Reduce_scatter_block will require memory
393  // proportional to the number of processes involved; that function is
394  // available for MPI 2.2 or later:
395  static CollectiveMutex mutex;
396  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
397 
398  const int mpi_tag =
400 
401  // Calculate the number of messages to send to each process
402  std::vector<unsigned int> dest_vector(n_procs);
403  for (const auto &el : destinations)
404  ++dest_vector[el];
405 
406  // Find how many processes will send to this one
407  // by reducing with sum and then scattering the
408  // results over all processes
409  unsigned int n_recv_from;
410  const int ierr = MPI_Reduce_scatter_block(
411  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
412 
413  AssertThrowMPI(ierr);
414 
415  // Send myid to every process in `destinations` vector...
416  std::vector<MPI_Request> send_requests(destinations.size());
417  for (const auto &el : destinations)
418  {
419  const int ierr =
420  MPI_Isend(&myid,
421  1,
422  MPI_UNSIGNED,
423  el,
424  mpi_tag,
425  mpi_comm,
426  send_requests.data() + (&el - destinations.data()));
427  AssertThrowMPI(ierr);
428  }
429 
430 
431  // Receive `n_recv_from` times from the processes
432  // who communicate with this one. Store the obtained id's
433  // in the resulting vector
434  std::vector<unsigned int> origins(n_recv_from);
435  for (auto &el : origins)
436  {
437  const int ierr = MPI_Recv(&el,
438  1,
439  MPI_UNSIGNED,
440  MPI_ANY_SOURCE,
441  mpi_tag,
442  mpi_comm,
443  MPI_STATUS_IGNORE);
444  AssertThrowMPI(ierr);
445  }
446 
447  if (destinations.size() > 0)
448  {
449  const int ierr = MPI_Waitall(destinations.size(),
450  send_requests.data(),
451  MPI_STATUSES_IGNORE);
452  AssertThrowMPI(ierr);
453  }
454 
455  return origins;
456  }
457 
458 
459 
460  unsigned int
462  const MPI_Comm & mpi_comm,
463  const std::vector<unsigned int> &destinations)
464  {
465  // Have a little function that checks if destinations provided
466  // to the current process are unique:
467  const bool my_destinations_are_unique = [destinations]() {
468  std::vector<unsigned int> my_destinations = destinations;
469  const unsigned int n_destinations = my_destinations.size();
470  std::sort(my_destinations.begin(), my_destinations.end());
471  my_destinations.erase(std::unique(my_destinations.begin(),
472  my_destinations.end()),
473  my_destinations.end());
474  return (my_destinations.size() == n_destinations);
475  }();
476 
477  // If all processes report that they have unique destinations,
478  // then we can short-cut the process using a consensus algorithm:
479 
480  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
481  1)
482  {
483  return ConsensusAlgorithms::nbx<char, char>(
484  destinations, {}, {}, {}, mpi_comm)
485  .size();
486  }
487  else
488  {
489  const unsigned int n_procs =
491 
492  for (const unsigned int destination : destinations)
493  {
494  (void)destination;
495  AssertIndexRange(destination, n_procs);
496  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
497  ExcMessage(
498  "There is no point in communicating with ourselves."));
499  }
500 
501  // Calculate the number of messages to send to each process
502  std::vector<unsigned int> dest_vector(n_procs);
503  for (const auto &el : destinations)
504  ++dest_vector[el];
505 
506  // Find out how many processes will send to this one
507  // MPI_Reduce_scatter(_block) does exactly this
508  unsigned int n_recv_from = 0;
509 
510  const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
511  &n_recv_from,
512  1,
513  MPI_UNSIGNED,
514  MPI_SUM,
515  mpi_comm);
516 
517  AssertThrowMPI(ierr);
518 
519  return n_recv_from;
520  }
521  }
522 
523 
524 
525  namespace
526  {
527  // custom MIP_Op for calculate_collective_mpi_min_max_avg
528  void
529  max_reduce(const void *in_lhs_,
530  void * inout_rhs_,
531  int * len,
532  MPI_Datatype *)
533  {
534  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
535  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
536 
537  for (int i = 0; i < *len; ++i)
538  {
539  inout_rhs[i].sum += in_lhs[i].sum;
540  if (inout_rhs[i].min > in_lhs[i].min)
541  {
542  inout_rhs[i].min = in_lhs[i].min;
543  inout_rhs[i].min_index = in_lhs[i].min_index;
544  }
545  else if (inout_rhs[i].min == in_lhs[i].min)
546  {
547  // choose lower cpu index when tied to make operator commutative
548  if (inout_rhs[i].min_index > in_lhs[i].min_index)
549  inout_rhs[i].min_index = in_lhs[i].min_index;
550  }
551 
552  if (inout_rhs[i].max < in_lhs[i].max)
553  {
554  inout_rhs[i].max = in_lhs[i].max;
555  inout_rhs[i].max_index = in_lhs[i].max_index;
556  }
557  else if (inout_rhs[i].max == in_lhs[i].max)
558  {
559  // choose lower cpu index when tied to make operator commutative
560  if (inout_rhs[i].max_index > in_lhs[i].max_index)
561  inout_rhs[i].max_index = in_lhs[i].max_index;
562  }
563  }
564  }
565  } // namespace
566 
567 
568 
569  void
571  const ArrayView<MinMaxAvg> & result,
572  const MPI_Comm & mpi_communicator)
573  {
574  // If MPI was not started, we have a serial computation and cannot run
575  // the other MPI commands
576  if (job_supports_mpi() == false ||
577  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
578  {
579  for (unsigned int i = 0; i < my_values.size(); ++i)
580  {
581  result[i].sum = my_values[i];
582  result[i].avg = my_values[i];
583  result[i].min = my_values[i];
584  result[i].max = my_values[i];
585  result[i].min_index = 0;
586  result[i].max_index = 0;
587  }
588  return;
589  }
590 
591  /*
592  * A custom MPI datatype handle describing the memory layout of the
593  * MinMaxAvg struct. Initialized on first pass control reaches the
594  * static variable. So hopefully not initialized too early.
595  */
596  static MPI_Datatype type = []() {
597  MPI_Datatype type;
598 
599  int lengths[] = {3, 2, 1};
600 
601  MPI_Aint displacements[] = {0,
602  offsetof(MinMaxAvg, min_index),
603  offsetof(MinMaxAvg, avg)};
604 
605  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
606 
607  int ierr =
608  MPI_Type_create_struct(3, lengths, displacements, types, &type);
609  AssertThrowMPI(ierr);
610 
611  ierr = MPI_Type_commit(&type);
612  AssertThrowMPI(ierr);
613 
614  /* Ensure that we free the allocated datatype again at the end of
615  * the program run just before we call MPI_Finalize():*/
616  MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
617  int ierr = MPI_Type_free(&type);
618  AssertThrowMPI(ierr);
619  });
620 
621  return type;
622  }();
623 
624  /*
625  * A custom MPI op handle for our max_reduce function.
626  * Initialized on first pass control reaches the static variable. So
627  * hopefully not initialized too early.
628  */
629  static MPI_Op op = []() {
630  MPI_Op op;
631 
632  int ierr =
633  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
634  static_cast<int>(true),
635  &op);
636  AssertThrowMPI(ierr);
637 
638  /* Ensure that we free the allocated op again at the end of the
639  * program run just before we call MPI_Finalize():*/
640  MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
641  int ierr = MPI_Op_free(&op);
642  AssertThrowMPI(ierr);
643  });
644 
645  return op;
646  }();
647 
648  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
649  Utilities::MPI::max(my_values.size(), mpi_communicator));
650 
651  AssertDimension(my_values.size(), result.size());
652 
653  // To avoid uninitialized values on some MPI implementations, provide
654  // result with a default value already...
655  MinMaxAvg dummy = {0.,
657  std::numeric_limits<double>::lowest(),
658  0,
659  0,
660  0.};
661 
662  for (auto &i : result)
663  i = dummy;
664 
665  const unsigned int my_id =
666  ::Utilities::MPI::this_mpi_process(mpi_communicator);
667  const unsigned int numproc =
668  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
669 
670  std::vector<MinMaxAvg> in(my_values.size());
671 
672  for (unsigned int i = 0; i < my_values.size(); ++i)
673  {
674  in[i].sum = in[i].min = in[i].max = my_values[i];
675  in[i].min_index = in[i].max_index = my_id;
676  }
677 
678  int ierr = MPI_Allreduce(
679  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
680  AssertThrowMPI(ierr);
681 
682  for (auto &r : result)
683  r.avg = r.sum / numproc;
684  }
685 
686 
687 #else
688 
689  unsigned int
690  n_mpi_processes(const MPI_Comm &)
691  {
692  return 1;
693  }
694 
695 
696 
697  unsigned int
698  this_mpi_process(const MPI_Comm &)
699  {
700  return 0;
701  }
702 
703 
704 
705  const std::vector<unsigned int>
706  mpi_processes_within_communicator(const MPI_Comm &, const MPI_Comm &)
707  {
708  return std::vector<unsigned int>{0};
709  }
710 
711 
712 
713  std::vector<IndexSet>
714  create_ascending_partitioning(const MPI_Comm & /*comm*/,
715  const IndexSet::size_type locally_owned_size)
716  {
717  return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
718  }
719 
720  IndexSet
721  create_evenly_distributed_partitioning(const MPI_Comm & /*comm*/,
722  const IndexSet::size_type total_size)
723  {
724  return complete_index_set(total_size);
725  }
726 
727 
728 
729  MPI_Comm
730  duplicate_communicator(const MPI_Comm &mpi_communicator)
731  {
732  return mpi_communicator;
733  }
734 
735 
736 
737  void
738  free_communicator(MPI_Comm & /*mpi_communicator*/)
739  {}
740 
741 
742 
743  void
744  min_max_avg(const ArrayView<const double> &my_values,
745  const ArrayView<MinMaxAvg> & result,
746  const MPI_Comm &)
747  {
748  AssertDimension(my_values.size(), result.size());
749 
750  for (unsigned int i = 0; i < my_values.size(); ++i)
751  {
752  result[i].sum = my_values[i];
753  result[i].avg = my_values[i];
754  result[i].min = my_values[i];
755  result[i].max = my_values[i];
756  result[i].min_index = 0;
757  result[i].max_index = 0;
758  }
759  }
760 
761 #endif
762 
763  /* Force initialization of static struct: */
764  MPI_InitFinalize::Signals MPI_InitFinalize::signals =
765  MPI_InitFinalize::Signals();
766 
767 
769  char **& argv,
770  const unsigned int max_num_threads)
771  {
772  static bool constructor_has_already_run = false;
773  (void)constructor_has_already_run;
774  Assert(constructor_has_already_run == false,
775  ExcMessage("You can only create a single object of this class "
776  "in a program since it initializes the MPI system."));
777 
778 
779  int ierr = 0;
780 #ifdef DEAL_II_WITH_MPI
781  // if we have PETSc, we will initialize it and let it handle MPI.
782  // Otherwise, we will do it.
783  int MPI_has_been_started = 0;
784  ierr = MPI_Initialized(&MPI_has_been_started);
785  AssertThrowMPI(ierr);
786  AssertThrow(MPI_has_been_started == 0,
787  ExcMessage("MPI error. You can only start MPI once!"));
788 
789  int provided;
790  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
791  // we might use several threads but never call two MPI functions at the
792  // same time. For an explanation see on why we do this see
793  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
794  int wanted = MPI_THREAD_SERIALIZED;
795  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
796  AssertThrowMPI(ierr);
797 
798  // disable for now because at least some implementations always return
799  // MPI_THREAD_SINGLE.
800  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
801  // ExcMessage("MPI reports that we are not allowed to use multiple
802  // threads."));
803 #else
804  // make sure the compiler doesn't warn about these variables
805  (void)argc;
806  (void)argv;
807  (void)ierr;
808 #endif
809 
810  // we are allowed to call MPI_Init ourselves and PETScInitialize will
811  // detect this. This allows us to use MPI_Init_thread instead.
812 #ifdef DEAL_II_WITH_PETSC
813 # ifdef DEAL_II_WITH_SLEPC
814  // Initialize SLEPc (with PETSc):
815  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
817 # else
818  // or just initialize PETSc alone:
819  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
820  AssertThrow(ierr == 0, ExcPETScError(ierr));
821 # endif
822 
823  // Disable PETSc exception handling. This just prints a large wall
824  // of text that is not particularly helpful for what we do:
825  PetscPopSignalHandler();
826 #endif
827 
828  // Initialize zoltan
829 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
830  float version;
831  Zoltan_Initialize(argc, argv, &version);
832 #endif
833 
834 #ifdef DEAL_II_WITH_P4EST
835  // Initialize p4est and libsc components
836 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
837  // This feature is broken in version 2.0.0 for calls to
838  // MPI_Comm_create_group (see cburstedde/p4est#30).
839  // Disabling it leads to more verbose p4est error messages
840  // which should be fine.
841  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
842 # endif
843  p4est_init(nullptr, SC_LP_SILENT);
844 #endif
845 
846  constructor_has_already_run = true;
847 
848 
849  // Now also see how many threads we'd like to run
850  if (max_num_threads != numbers::invalid_unsigned_int)
851  {
852  // set maximum number of threads (also respecting the environment
853  // variable that the called function evaluates) based on what the
854  // user asked
855  MultithreadInfo::set_thread_limit(max_num_threads);
856  }
857  else
858  // user wants automatic choice
859  {
860 #ifdef DEAL_II_WITH_MPI
861  // we need to figure out how many MPI processes there are on the
862  // current node, as well as how many CPU cores we have. for the
863  // first task, check what get_hostname() returns and then do an
864  // allgather so each processor gets the answer
865  //
866  // in calculating the length of the string, don't forget the
867  // terminating \0 on C-style strings
868  const std::string hostname = Utilities::System::get_hostname();
869  const unsigned int max_hostname_size =
870  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
871  std::vector<char> hostname_array(max_hostname_size);
872  std::copy(hostname.c_str(),
873  hostname.c_str() + hostname.size() + 1,
874  hostname_array.begin());
875 
876  std::vector<char> all_hostnames(max_hostname_size *
877  MPI::n_mpi_processes(MPI_COMM_WORLD));
878  const int ierr = MPI_Allgather(hostname_array.data(),
879  max_hostname_size,
880  MPI_CHAR,
881  all_hostnames.data(),
882  max_hostname_size,
883  MPI_CHAR,
884  MPI_COMM_WORLD);
885  AssertThrowMPI(ierr);
886 
887  // search how often our own hostname appears and the how-manyth
888  // instance the current process represents
889  unsigned int n_local_processes = 0;
890  unsigned int nth_process_on_host = 0;
891  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
892  ++i)
893  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
894  hostname)
895  {
896  ++n_local_processes;
897  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
898  ++nth_process_on_host;
899  }
900  Assert(nth_process_on_host > 0, ExcInternalError());
901 
902 
903  // compute how many cores each process gets. if the number does not
904  // divide evenly, then we get one more core if we are among the
905  // first few processes
906  //
907  // if the number would be zero, round up to one since every process
908  // needs to have at least one thread
909  const unsigned int n_threads =
910  std::max(MultithreadInfo::n_cores() / n_local_processes +
911  (nth_process_on_host <=
912  MultithreadInfo::n_cores() % n_local_processes ?
913  1 :
914  0),
915  1U);
916 #else
917  const unsigned int n_threads = MultithreadInfo::n_cores();
918 #endif
919 
920  // finally set this number of threads
922  }
923 
924  // As a final step call the at_mpi_init() signal handler.
926  }
927 
928 
929 
930  void
932  {
933  // insert if it is not in the set already:
934  requests.insert(&request);
935  }
936 
937 
938 
939  void
941  {
942  Assert(
943  requests.find(&request) != requests.end(),
944  ExcMessage(
945  "You tried to call unregister_request() with an invalid request."));
946 
947  requests.erase(&request);
948  }
949 
950 
951 
952  std::set<MPI_Request *> MPI_InitFinalize::requests;
953 
954 
955 
957  {
958  // First, call the at_mpi_finalize() signal handler.
960 
961  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
962  // are no longer used at this point. this is relevant because the static
963  // object destructors run for these vectors at the end of the program
964  // would run after MPI_Finalize is called, leading to errors
965 
966 #ifdef DEAL_II_WITH_MPI
967  // Before exiting, wait for nonblocking communication to complete:
968  for (auto request : requests)
969  {
970  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
971  AssertThrowMPI(ierr);
972  }
973 
974  // Start with deal.II MPI vectors and delete vectors from the pools:
976  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
978  release_unused_memory();
980  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
982  release_unused_memory();
983 
984  // Next with Trilinos:
985 # ifdef DEAL_II_WITH_TRILINOS
987  TrilinosWrappers::MPI::Vector>::release_unused_memory();
989  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
990 # endif
991 #endif
992 
993 
994  // Now deal with PETSc (with or without MPI). Only delete the vectors if
995  // finalize hasn't been called yet, otherwise this will lead to errors.
996 #ifdef DEAL_II_WITH_PETSC
997  if ((PetscInitializeCalled == PETSC_TRUE) &&
998  (PetscFinalizeCalled == PETSC_FALSE))
999  {
1001  PETScWrappers::MPI::Vector>::release_unused_memory();
1003  PETScWrappers::MPI::BlockVector>::release_unused_memory();
1004 
1005 # ifdef DEAL_II_WITH_SLEPC
1006  // and now end SLEPc (with PETSc)
1007  SlepcFinalize();
1008 # else
1009  // or just end PETSc.
1010  PetscFinalize();
1011 # endif
1012  }
1013 #endif
1014 
1015 // There is a similar issue with CUDA: The destructor of static objects might
1016 // run after the CUDA driver is unloaded. Hence, also release all memory
1017 // related to CUDA vectors.
1018 #ifdef DEAL_II_WITH_CUDA
1021  release_unused_memory();
1024  release_unused_memory();
1025 #endif
1026 
1027 #ifdef DEAL_II_WITH_P4EST
1028  // now end p4est and libsc
1029  // Note: p4est has no finalize function
1030  sc_finalize();
1031 #endif
1032 
1033 
1034  // only MPI_Finalize if we are running with MPI. We also need to do this
1035  // when running PETSc, because we initialize MPI ourselves before
1036  // calling PetscInitialize
1037 #ifdef DEAL_II_WITH_MPI
1038  if (job_supports_mpi() == true)
1039  {
1040 # if __cpp_lib_uncaught_exceptions >= 201411
1041  // std::uncaught_exception() is deprecated in c++17
1042  if (std::uncaught_exceptions() > 0)
1043 # else
1044  if (std::uncaught_exception() == true)
1045 # endif
1046  {
1047  // do not try to call MPI_Finalize to avoid a deadlock.
1048  }
1049  else
1050  {
1051  const int ierr = MPI_Finalize();
1052  (void)ierr;
1053  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1054  }
1055  }
1056 #endif
1057  }
1058 
1059 
1060 
1061  bool
1063  {
1064 #ifdef DEAL_II_WITH_MPI
1065  int MPI_has_been_started = 0;
1066  const int ierr = MPI_Initialized(&MPI_has_been_started);
1067  AssertThrowMPI(ierr);
1068 
1069  return (MPI_has_been_started > 0);
1070 #else
1071  return false;
1072 #endif
1073  }
1074 
1075 
1076 
1077  std::vector<unsigned int>
1078  compute_index_owner(const IndexSet &owned_indices,
1079  const IndexSet &indices_to_look_up,
1080  const MPI_Comm &comm)
1081  {
1082  Assert(owned_indices.size() == indices_to_look_up.size(),
1083  ExcMessage("IndexSets have to have the same sizes."));
1084 
1085  Assert(
1086  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1087  ExcMessage("IndexSets have to have the same size on all processes."));
1088 
1089  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1090 
1091  // Step 1: setup dictionary
1092  // The input owned_indices can be partitioned arbitrarily. In the
1093  // dictionary, the index set is statically repartitioned among the
1094  // processes again and extended with information with the actual owner
1095  // of that the index.
1097  owned_indices, indices_to_look_up, comm, owning_ranks);
1098 
1099  // Step 2: read dictionary
1100  // Communicate with the process who owns the index in the static
1101  // partition (i.e. in the dictionary). This process returns the actual
1102  // owner of the index.
1104  std::vector<
1105  std::pair<types::global_dof_index, types::global_dof_index>>,
1106  std::vector<unsigned int>>
1107  consensus_algorithm(process, comm);
1108  consensus_algorithm.run();
1109 
1110  return owning_ranks;
1111  }
1112 
1113 
1114 
1116  : locked(false)
1117  , request(MPI_REQUEST_NULL)
1118  {
1120  }
1121 
1122 
1123 
1125  {
1126  Assert(
1127  !locked,
1128  ExcMessage(
1129  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1130 
1132  }
1133 
1134 
1135 
1136  void
1137  CollectiveMutex::lock(const MPI_Comm &comm)
1138  {
1139  (void)comm;
1140 
1141  Assert(
1142  !locked,
1143  ExcMessage(
1144  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1145 
1146 #ifdef DEAL_II_WITH_MPI
1147 
1148  // TODO: For now, we implement this mutex with a blocking barrier
1149  // in the lock and unlock. It needs to be tested, if we can move
1150  // to a nonblocking barrier (code disabled below).
1151 
1152  const int ierr = MPI_Barrier(comm);
1153  AssertThrowMPI(ierr);
1154 
1155 # if 0
1156  // wait for non-blocking barrier to finish. This is a noop the
1157  // first time we lock().
1158  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1159  AssertThrowMPI(ierr);
1160 # else
1161  // nothing to do as blocking barrier already completed
1162 # endif
1163 #endif
1164 
1165  locked = true;
1166  }
1167 
1168 
1169 
1170  void
1172  {
1173  (void)comm;
1174 
1175  Assert(
1176  locked,
1177  ExcMessage(
1178  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1179 
1180 #ifdef DEAL_II_WITH_MPI
1181 
1182  // TODO: For now, we implement this mutex with a blocking barrier
1183  // in the lock and unlock. It needs to be tested, if we can move
1184  // to a nonblocking barrier (code disabled below):
1185 # if 0
1186  const int ierr = MPI_Ibarrier(comm, &request);
1187  AssertThrowMPI(ierr);
1188 # else
1189  const int ierr = MPI_Barrier(comm);
1190  AssertThrowMPI(ierr);
1191 # endif
1192 #endif
1193 
1194  locked = false;
1195  }
1196 
1197 
1198 #ifndef DOXYGEN
1199  // explicit instantiations
1200  template bool
1201  logical_or<bool>(const bool &, const MPI_Comm &);
1202 
1203 
1204  template void
1205  logical_or<bool>(const ArrayView<const bool> &,
1206  const MPI_Comm &,
1207  const ArrayView<bool> &);
1208 
1209 
1210  template std::vector<unsigned int>
1211  compute_set_union(const std::vector<unsigned int> &vec,
1212  const MPI_Comm & comm);
1213 
1214 
1215  template std::set<unsigned int>
1216  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1217 #endif
1218 
1219 #include "mpi.inst"
1220  } // end of namespace MPI
1221 } // end of namespace Utilities
1222 
value_type * data() const noexcept
Definition: array_view.h:553
std::size_t size() const
Definition: array_view.h:576
size_type size() const
Definition: index_set.h:1636
size_type n_elements() const
Definition: index_set.h:1834
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1675
types::global_dof_index size_type
Definition: index_set.h:80
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1171
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1137
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:940
static std::set< MPI_Request * > requests
Definition: mpi.h:1103
static Signals signals
Definition: mpi.h:1097
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:768
static void register_request(MPI_Request &request)
Definition: mpi.cc:931
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:416
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:417
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1667
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1536
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1583
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1014
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
template const MPI_Datatype mpi_type_id_for_type< int >
Definition: mpi.cc:99
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:194
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1078
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
Definition: mpi.cc:101
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:255
template const MPI_Datatype mpi_type_id_for_type< signed char >
Definition: mpi.cc:97
template const MPI_Datatype mpi_type_id_for_type< bool >
Definition: mpi.cc:95
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
Definition: mpi.cc:102
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
template const MPI_Datatype mpi_type_id_for_type< short >
Definition: mpi.cc:98
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:461
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
Definition: mpi.cc:217
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:241
template const MPI_Datatype mpi_type_id_for_type< double >
Definition: mpi.cc:106
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:151
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:346
template const MPI_Datatype mpi_type_id_for_type< char >
Definition: mpi.cc:96
template const MPI_Datatype mpi_type_id_for_type< long double >
Definition: mpi.cc:107
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
Definition: mpi.cc:104
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
Definition: mpi.cc:103
T min(const T &t, const MPI_Comm &mpi_communicator)
bool job_supports_mpi()
Definition: mpi.cc:1062
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:183
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:163
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:140
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:114
template const MPI_Datatype mpi_type_id_for_type< long int >
Definition: mpi.cc:100
T max(const T &t, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< float >
Definition: mpi.cc:105
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:204
std::string get_hostname()
Definition: utilities.cc:1001
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:74
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:201
Definition: types.h:32
****code ** MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1086
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1094
const MPI_Comm & comm