Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/array_view.h>
22 #include <deal.II/base/numbers.h>
23 
24 #include <map>
25 #include <numeric>
26 #include <vector>
27 
28 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
29 // without MPI, we would still like to use
30 // some constructs with MPI data
31 // types. Therefore, create some dummies
32 using MPI_Comm = int;
33 using MPI_Datatype = int;
34 using MPI_Op = int;
35 # ifndef MPI_COMM_WORLD
36 # define MPI_COMM_WORLD 0
37 # endif
38 # ifndef MPI_COMM_SELF
39 # define MPI_COMM_SELF 0
40 # endif
41 # ifndef MPI_MIN
42 # define MPI_MIN 0
43 # endif
44 # ifndef MPI_MAX
45 # define MPI_MAX 0
46 # endif
47 # ifndef MPI_SUM
48 # define MPI_SUM 0
49 # endif
50 #endif
51 
52 
53 
67 #ifdef DEAL_II_WITH_MPI
68 # if DEAL_II_MPI_VERSION_GTE(3, 0)
69 
70 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
71 
72 # else
73 
74 # include <type_traits>
75 
76 # define DEAL_II_MPI_CONST_CAST(expr) \
77  const_cast<typename std::remove_const< \
78  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
79 
80 # endif
81 #endif
82 
83 
84 
85 DEAL_II_NAMESPACE_OPEN
86 
87 
88 // Forward type declarations to allow MPI sums over tensorial types
89 template <int rank, int dim, typename Number>
90 class Tensor;
91 template <int rank, int dim, typename Number>
93 template <typename Number>
95 class IndexSet;
96 
97 namespace Utilities
98 {
106  namespace MPI
107  {
116  unsigned int
117  n_mpi_processes(const MPI_Comm &mpi_communicator);
118 
127  unsigned int
128  this_mpi_process(const MPI_Comm &mpi_communicator);
129 
151  std::vector<unsigned int>
153  const MPI_Comm & mpi_comm,
154  const std::vector<unsigned int> &destinations);
155 
175  unsigned int
177  const MPI_Comm & mpi_comm,
178  const std::vector<unsigned int> &destinations);
179 
193  MPI_Comm
194  duplicate_communicator(const MPI_Comm &mpi_communicator);
195 
223 #ifdef DEAL_II_WITH_MPI
224  int
225  create_group(const MPI_Comm & comm,
226  const MPI_Group &group,
227  const int tag,
228  MPI_Comm * new_comm);
229 #endif
230 
239  std::vector<IndexSet>
240  create_ascending_partitioning(const MPI_Comm & comm,
241  const IndexSet::size_type &local_size);
242 
243 #ifdef DEAL_II_WITH_MPI
244 
259  template <class Iterator, typename Number = long double>
260  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
261  mean_and_standard_deviation(const Iterator begin,
262  const Iterator end,
263  const MPI_Comm &comm);
264 #endif
265 
285  template <typename T>
286  T
287  sum(const T &t, const MPI_Comm &mpi_communicator);
288 
298  template <typename T, typename U>
299  void
300  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
301 
311  template <typename T>
312  void
313  sum(const ArrayView<const T> &values,
314  const MPI_Comm & mpi_communicator,
315  const ArrayView<T> & sums);
316 
322  template <int rank, int dim, typename Number>
325  const MPI_Comm & mpi_communicator);
326 
332  template <int rank, int dim, typename Number>
334  sum(const Tensor<rank, dim, Number> &local,
335  const MPI_Comm & mpi_communicator);
336 
345  template <typename Number>
346  void
347  sum(const SparseMatrix<Number> &local,
348  const MPI_Comm & mpi_communicator,
349  SparseMatrix<Number> & global);
350 
370  template <typename T>
371  T
372  max(const T &t, const MPI_Comm &mpi_communicator);
373 
383  template <typename T, typename U>
384  void
385  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
386 
396  template <typename T>
397  void
398  max(const ArrayView<const T> &values,
399  const MPI_Comm & mpi_communicator,
400  const ArrayView<T> & maxima);
401 
421  template <typename T>
422  T
423  min(const T &t, const MPI_Comm &mpi_communicator);
424 
434  template <typename T, typename U>
435  void
436  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
437 
447  template <typename T>
448  void
449  min(const ArrayView<const T> &values,
450  const MPI_Comm & mpi_communicator,
451  const ArrayView<T> & minima);
452 
467  struct MinMaxAvg
468  {
473  double sum;
474 
479  double min;
480 
485  double max;
486 
495  unsigned int min_index;
496 
505  unsigned int max_index;
506 
511  double avg;
512  };
513 
528  MinMaxAvg
529  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
530 
575  {
576  public:
623  int & argc,
624  char **& argv,
625  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
626 
632  };
633 
645  bool
647 
664  template <typename T>
665  std::map<unsigned int, T>
666  some_to_some(const MPI_Comm & comm,
667  const std::map<unsigned int, T> &objects_to_send);
668 
684  template <typename T>
685  std::vector<T>
686  all_gather(const MPI_Comm &comm, const T &object_to_send);
687 
705  template <typename T>
706  std::vector<T>
707  gather(const MPI_Comm & comm,
708  const T & object_to_send,
709  const unsigned int root_process = 0);
710 
711 #ifndef DOXYGEN
712  // declaration for an internal function that lives in mpi.templates.h
713  namespace internal
714  {
715  template <typename T>
716  void
717  all_reduce(const MPI_Op & mpi_op,
718  const ArrayView<const T> &values,
719  const MPI_Comm & mpi_communicator,
720  const ArrayView<T> & output);
721  }
722 
723  // Since these depend on N they must live in the header file
724  template <typename T, unsigned int N>
725  void
726  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
727  {
728  internal::all_reduce(MPI_SUM,
729  ArrayView<const T>(values, N),
730  mpi_communicator,
731  ArrayView<T>(sums, N));
732  }
733 
734  template <typename T, unsigned int N>
735  void
736  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
737  {
738  internal::all_reduce(MPI_MAX,
739  ArrayView<const T>(values, N),
740  mpi_communicator,
741  ArrayView<T>(maxima, N));
742  }
743 
744  template <typename T, unsigned int N>
745  void
746  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
747  {
748  internal::all_reduce(MPI_MIN,
749  ArrayView<const T>(values, N),
750  mpi_communicator,
751  ArrayView<T>(minima, N));
752  }
753 
754  template <typename T>
755  std::map<unsigned int, T>
756  some_to_some(const MPI_Comm & comm,
757  const std::map<unsigned int, T> &objects_to_send)
758  {
759 # ifndef DEAL_II_WITH_MPI
760  (void)comm;
761  Assert(objects_to_send.size() == 0,
762  ExcMessage("Cannot send to more than one processor."));
763  Assert(objects_to_send.find(0) != objects_to_send.end() ||
764  objects_to_send.size() == 0,
765  ExcMessage("Can only send to myself or to nobody."));
766  return objects_to_send;
767 # else
768 
769  std::vector<unsigned int> send_to(objects_to_send.size());
770  {
771  unsigned int i = 0;
772  for (const auto &m : objects_to_send)
773  send_to[i++] = m.first;
774  }
775  AssertDimension(send_to.size(), objects_to_send.size());
776 
777  const auto receive_from =
779  send_to);
780 
781  // Sending buffers
782  std::vector<std::vector<char>> buffers_to_send(send_to.size());
783  std::vector<MPI_Request> buffer_send_requests(send_to.size());
784  {
785  unsigned int i = 0;
786  for (const auto &rank_obj : objects_to_send)
787  {
788  const auto &rank = rank_obj.first;
789  buffers_to_send[i] = Utilities::pack(rank_obj.second);
790  const int ierr = MPI_Isend(buffers_to_send[i].data(),
791  buffers_to_send[i].size(),
792  MPI_CHAR,
793  rank,
794  21,
795  comm,
796  &buffer_send_requests[i]);
797  AssertThrowMPI(ierr);
798  ++i;
799  }
800  }
801 
802  // Receiving buffers
803  std::map<unsigned int, T> received_objects;
804  {
805  std::vector<char> buffer;
806  // We do this on a first come/first served basis
807  for (unsigned int i = 0; i < receive_from.size(); ++i)
808  {
809  // Probe what's going on. Take data from the first available sender
810  MPI_Status status;
811  int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
812  AssertThrowMPI(ierr);
813 
814  // Length of the message
815  int len;
816  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
817  AssertThrowMPI(ierr);
818  buffer.resize(len);
819 
820  // Source rank
821  const unsigned int rank = status.MPI_SOURCE;
822 
823  // Actually receive the message
824  ierr = MPI_Recv(
825  buffer.data(), len, MPI_CHAR, rank, 21, comm, MPI_STATUS_IGNORE);
826  AssertThrowMPI(ierr);
827  Assert(received_objects.find(rank) == received_objects.end(),
829  "I should not receive again from this rank"));
830  received_objects[rank] = Utilities::unpack<T>(buffer);
831  }
832  }
833 
834  // Wait to have sent all objects.
835  MPI_Waitall(send_to.size(),
836  buffer_send_requests.data(),
837  MPI_STATUSES_IGNORE);
838 
839  return received_objects;
840 # endif // deal.II with MPI
841  }
842 
843  template <typename T>
844  std::vector<T>
845  all_gather(const MPI_Comm &comm, const T &object)
846  {
847 # ifndef DEAL_II_WITH_MPI
848  (void)comm;
849  std::vector<T> v(1, object);
850  return v;
851 # else
852  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
853 
854  std::vector<char> buffer = Utilities::pack(object);
855 
856  int n_local_data = buffer.size();
857 
858  // Vector to store the size of loc_data_array for every process
859  std::vector<int> size_all_data(n_procs, 0);
860 
861  // Exchanging the size of each buffer
862  MPI_Allgather(
863  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
864 
865  // Now computing the displacement, relative to recvbuf,
866  // at which to store the incoming buffer
867  std::vector<int> rdispls(n_procs);
868  rdispls[0] = 0;
869  for (unsigned int i = 1; i < n_procs; ++i)
870  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
871 
872  // Step 3: exchange the buffer:
873  std::vector<char> received_unrolled_buffer(rdispls.back() +
874  size_all_data.back());
875 
876  MPI_Allgatherv(buffer.data(),
877  n_local_data,
878  MPI_CHAR,
879  received_unrolled_buffer.data(),
880  size_all_data.data(),
881  rdispls.data(),
882  MPI_CHAR,
883  comm);
884 
885  std::vector<T> received_objects(n_procs);
886  for (unsigned int i = 0; i < n_procs; ++i)
887  {
888  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
889  rdispls[i],
890  received_unrolled_buffer.begin() +
891  rdispls[i] + size_all_data[i]);
892  received_objects[i] = Utilities::unpack<T>(local_buffer);
893  }
894 
895  return received_objects;
896 # endif
897  }
898 
899  template <typename T>
900  std::vector<T>
901  gather(const MPI_Comm & comm,
902  const T & object_to_send,
903  const unsigned int root_process)
904  {
905 # ifndef DEAL_II_WITH_MPI
906  (void)comm;
907  (void)root_process;
908  std::vector<T> v(1, object_to_send);
909  return v;
910 # else
911  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
912  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
913 
914  Assert(root_process < n_procs, ExcIndexRange(root_process, 0, n_procs));
915 
916  std::vector<char> buffer = Utilities::pack(object_to_send);
917  int n_local_data = buffer.size();
918 
919  // Vector to store the size of loc_data_array for every process
920  // only the root process needs to allocate memory for that purpose
921  std::vector<int> size_all_data;
922  if (my_rank == root_process)
923  size_all_data.resize(n_procs, 0);
924 
925  // Exchanging the size of each buffer
926  int ierr = MPI_Gather(&n_local_data,
927  1,
928  MPI_INT,
929  size_all_data.data(),
930  1,
931  MPI_INT,
932  root_process,
933  comm);
934  AssertThrowMPI(ierr);
935 
936  // Now computing the displacement, relative to recvbuf,
937  // at which to store the incoming buffer; only for root
938  std::vector<int> rdispls;
939  if (my_rank == root_process)
940  {
941  rdispls.resize(n_procs, 0);
942  for (unsigned int i = 1; i < n_procs; ++i)
943  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
944  }
945  // exchange the buffer:
946  std::vector<char> received_unrolled_buffer;
947  if (my_rank == root_process)
948  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
949 
950  ierr = MPI_Gatherv(buffer.data(),
951  n_local_data,
952  MPI_CHAR,
953  received_unrolled_buffer.data(),
954  size_all_data.data(),
955  rdispls.data(),
956  MPI_CHAR,
957  root_process,
958  comm);
959  AssertThrowMPI(ierr);
960 
961  std::vector<T> received_objects;
962 
963  if (my_rank == root_process)
964  {
965  received_objects.resize(n_procs);
966 
967  for (unsigned int i = 0; i < n_procs; ++i)
968  {
969  const std::vector<char> local_buffer(
970  received_unrolled_buffer.begin() + rdispls[i],
971  received_unrolled_buffer.begin() + rdispls[i] +
972  size_all_data[i]);
973  received_objects[i] = Utilities::unpack<T>(local_buffer);
974  }
975  }
976  return received_objects;
977 # endif
978  }
979 
980 
981 # ifdef DEAL_II_WITH_MPI
982  template <class Iterator, typename Number>
983  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
984  mean_and_standard_deviation(const Iterator begin,
985  const Iterator end,
986  const MPI_Comm &comm)
987  {
988  // below we do simple and straight-forward implementation. More elaborate
989  // options are:
990  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
991  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
992  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
993  using Std = typename numbers::NumberTraits<Number>::real_type;
994  const Number sum = std::accumulate(begin, end, Number(0.));
995 
996  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
997  Assert(size > 0, ExcDivideByZero());
998  const Number mean =
999  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1000  Std sq_sum = 0.;
1001  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1002  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1003  });
1004  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1005  return std::make_pair(mean,
1006  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1007  }
1008 # endif
1009 
1010 #endif
1011  } // end of namespace MPI
1012 } // end of namespace Utilities
1013 
1014 
1015 DEAL_II_NAMESPACE_CLOSE
1016 
1017 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1567
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:324
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:513
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:547
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1407
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:104
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1174
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:93
Definition: mpi.h:90
T min(const T &t, const MPI_Comm &mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:82
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:186
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:429
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:210
bool job_supports_mpi()
Definition: mpi.cc:802
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:495
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:505
static ::ExceptionBase & ExcInternalError()