Reference documentation for deal.II version Git 409ee4b167 2020-08-14 09:46:12 -0400
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/numbers.h>
24 
25 #include <map>
26 #include <numeric>
27 #include <set>
28 #include <vector>
29 
30 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
31 // without MPI, we would still like to use
32 // some constructs with MPI data
33 // types. Therefore, create some dummies
34 using MPI_Comm = int;
35 using MPI_Request = int;
36 using MPI_Datatype = int;
37 using MPI_Op = int;
38 # ifndef MPI_COMM_WORLD
39 # define MPI_COMM_WORLD 0
40 # endif
41 # ifndef MPI_COMM_SELF
42 # define MPI_COMM_SELF 0
43 # endif
44 # ifndef MPI_REQUEST_NULL
45 # define MPI_REQUEST_NULL 0
46 # endif
47 # ifndef MPI_MIN
48 # define MPI_MIN 0
49 # endif
50 # ifndef MPI_MAX
51 # define MPI_MAX 0
52 # endif
53 # ifndef MPI_SUM
54 # define MPI_SUM 0
55 # endif
56 #endif
57 
58 
59 
73 #ifdef DEAL_II_WITH_MPI
74 # if DEAL_II_MPI_VERSION_GTE(3, 0)
75 
76 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
77 
78 # else
79 
80 # include <type_traits>
81 
82 # define DEAL_II_MPI_CONST_CAST(expr) \
83  const_cast<typename std::remove_const< \
84  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
85 
86 # endif
87 #endif
88 
89 
90 
92 
93 
94 // Forward type declarations to allow MPI sums over tensorial types
95 #ifndef DOXYGEN
96 template <int rank, int dim, typename Number>
97 class Tensor;
98 template <int rank, int dim, typename Number>
99 class SymmetricTensor;
100 template <typename Number>
101 class SparseMatrix;
102 class IndexSet;
103 #endif
104 
105 namespace Utilities
106 {
119  IndexSet
120  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
121  const unsigned int n_partitions,
122  const IndexSet::size_type total_size);
123 
131  namespace MPI
132  {
141  unsigned int
142  n_mpi_processes(const MPI_Comm &mpi_communicator);
143 
152  unsigned int
153  this_mpi_process(const MPI_Comm &mpi_communicator);
154 
176  std::vector<unsigned int>
178  const MPI_Comm & mpi_comm,
179  const std::vector<unsigned int> &destinations);
180 
200  unsigned int
202  const MPI_Comm & mpi_comm,
203  const std::vector<unsigned int> &destinations);
204 
221  MPI_Comm
222  duplicate_communicator(const MPI_Comm &mpi_communicator);
223 
233  void
234  free_communicator(MPI_Comm &mpi_communicator);
235 
249  {
250  public:
254  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
255  : comm(duplicate_communicator(communicator))
256  {}
257 
262 
267  {
269  }
270 
274  const MPI_Comm &operator*() const
275  {
276  return comm;
277  }
278 
279 
284  operator=(const DuplicatedCommunicator &) = delete;
285 
286  private:
290  MPI_Comm comm;
291  };
292 
323  {
324  public:
331  {
332  public:
336  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
337  : mutex(mutex)
338  , comm(comm)
339  {
340  mutex.lock(comm);
341  }
342 
347  {
348  mutex.unlock(comm);
349  }
350 
351  private:
359  const MPI_Comm comm;
360  };
361 
365  explicit CollectiveMutex();
366 
370  ~CollectiveMutex();
371 
378  void
379  lock(MPI_Comm comm);
380 
387  void
388  unlock(MPI_Comm comm);
389 
390  private:
394  bool locked;
395 
400  };
401 
402 
403 
431 #ifdef DEAL_II_WITH_MPI
432  int
433  create_group(const MPI_Comm & comm,
434  const MPI_Group &group,
435  const int tag,
436  MPI_Comm * new_comm);
437 #endif
438 
447  std::vector<IndexSet>
448  create_ascending_partitioning(const MPI_Comm & comm,
449  const IndexSet::size_type local_size);
450 
458  IndexSet
460  const MPI_Comm & comm,
461  const IndexSet::size_type total_size);
462 
463 #ifdef DEAL_II_WITH_MPI
464 
479  template <class Iterator, typename Number = long double>
480  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
481  mean_and_standard_deviation(const Iterator begin,
482  const Iterator end,
483  const MPI_Comm &comm);
484 #endif
485 
505  template <typename T>
506  T
507  sum(const T &t, const MPI_Comm &mpi_communicator);
508 
518  template <typename T, typename U>
519  void
520  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
521 
531  template <typename T>
532  void
534  const MPI_Comm & mpi_communicator,
535  const ArrayView<T> & sums);
536 
542  template <int rank, int dim, typename Number>
545  const MPI_Comm & mpi_communicator);
546 
552  template <int rank, int dim, typename Number>
554  sum(const Tensor<rank, dim, Number> &local,
555  const MPI_Comm & mpi_communicator);
556 
565  template <typename Number>
566  void
567  sum(const SparseMatrix<Number> &local,
568  const MPI_Comm & mpi_communicator,
569  SparseMatrix<Number> & global);
570 
590  template <typename T>
591  T
592  max(const T &t, const MPI_Comm &mpi_communicator);
593 
603  template <typename T, typename U>
604  void
605  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
606 
616  template <typename T>
617  void
619  const MPI_Comm & mpi_communicator,
620  const ArrayView<T> & maxima);
621 
641  template <typename T>
642  T
643  min(const T &t, const MPI_Comm &mpi_communicator);
644 
654  template <typename T, typename U>
655  void
656  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
657 
667  template <typename T>
668  void
670  const MPI_Comm & mpi_communicator,
671  const ArrayView<T> & minima);
672 
687  struct MinMaxAvg
688  {
693  double sum;
694 
699  double min;
700 
705  double max;
706 
715  unsigned int min_index;
716 
725  unsigned int max_index;
726 
731  double avg;
732  };
733 
748  MinMaxAvg
749  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
750 
762  std::vector<MinMaxAvg>
763  min_max_avg(const std::vector<double> &my_value,
764  const MPI_Comm & mpi_communicator);
765 
766 
779  void
780  min_max_avg(const ArrayView<const double> &my_values,
781  const ArrayView<MinMaxAvg> & result,
782  const MPI_Comm & mpi_communicator);
783 
784 
829  {
830  public:
877  int & argc,
878  char **& argv,
879  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
880 
885  ~MPI_InitFinalize();
886 
913  static void
914  register_request(MPI_Request &request);
915 
919  static void
920  unregister_request(MPI_Request &request);
921 
922  private:
926  static std::set<MPI_Request *> requests;
927  };
928 
940  bool
942 
960  template <typename T>
961  std::map<unsigned int, T>
962  some_to_some(const MPI_Comm & comm,
963  const std::map<unsigned int, T> &objects_to_send);
964 
978  template <typename T>
979  std::vector<T>
980  all_gather(const MPI_Comm &comm, const T &object_to_send);
981 
997  template <typename T>
998  std::vector<T>
999  gather(const MPI_Comm & comm,
1000  const T & object_to_send,
1001  const unsigned int root_process = 0);
1002 
1045  std::vector<unsigned int>
1046  compute_index_owner(const IndexSet &owned_indices,
1047  const IndexSet &indices_to_look_up,
1048  const MPI_Comm &comm);
1049 
1057  template <typename T>
1058  std::vector<T>
1059  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1060 
1064  template <typename T>
1065  std::set<T>
1066  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1067 
1068 #ifndef DOXYGEN
1069  // declaration for an internal function that lives in mpi.templates.h
1070  namespace internal
1071  {
1072  template <typename T>
1073  void
1074  all_reduce(const MPI_Op & mpi_op,
1075  const ArrayView<const T> &values,
1076  const MPI_Comm & mpi_communicator,
1077  const ArrayView<T> & output);
1078  }
1079 
1080  // Since these depend on N they must live in the header file
1081  template <typename T, unsigned int N>
1082  void
1083  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1084  {
1085  internal::all_reduce(MPI_SUM,
1087  mpi_communicator,
1088  ArrayView<T>(sums, N));
1089  }
1090 
1091  template <typename T, unsigned int N>
1092  void
1093  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1094  {
1095  internal::all_reduce(MPI_MAX,
1097  mpi_communicator,
1098  ArrayView<T>(maxima, N));
1099  }
1100 
1101  template <typename T, unsigned int N>
1102  void
1103  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1104  {
1105  internal::all_reduce(MPI_MIN,
1107  mpi_communicator,
1108  ArrayView<T>(minima, N));
1109  }
1110 
1111  template <typename T>
1112  std::map<unsigned int, T>
1113  some_to_some(const MPI_Comm & comm,
1114  const std::map<unsigned int, T> &objects_to_send)
1115  {
1116 # ifndef DEAL_II_WITH_MPI
1117  (void)comm;
1118  Assert(objects_to_send.size() < 2,
1119  ExcMessage("Cannot send to more than one processor."));
1120  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1121  objects_to_send.size() == 0,
1122  ExcMessage("Can only send to myself or to nobody."));
1123  return objects_to_send;
1124 # else
1125  const auto my_proc = this_mpi_process(comm);
1126 
1127  std::map<unsigned int, T> received_objects;
1128 
1129  std::vector<unsigned int> send_to;
1130  send_to.reserve(objects_to_send.size());
1131  for (const auto &m : objects_to_send)
1132  if (m.first == my_proc)
1133  received_objects[my_proc] = m.second;
1134  else
1135  send_to.emplace_back(m.first);
1136 
1137  const unsigned int n_point_point_communications =
1139 
1140  // Protect the following communication:
1141  static CollectiveMutex mutex;
1142  CollectiveMutex::ScopedLock lock(mutex, comm);
1143 
1144  // If we have something to send, or we expect something from other
1145  // processors, we need to visit one of the two scopes below. Otherwise,
1146  // no other action is required by this mpi process, and we can safely
1147  // return.
1148  if (send_to.size() == 0 && n_point_point_communications == 0)
1149  return received_objects;
1150 
1151  const int mpi_tag =
1153 
1154  // Sending buffers
1155  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1156  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1157  {
1158  unsigned int i = 0;
1159  for (const auto &rank_obj : objects_to_send)
1160  if (rank_obj.first != my_proc)
1161  {
1162  const auto &rank = rank_obj.first;
1163  buffers_to_send[i] = Utilities::pack(rank_obj.second);
1164  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1165  buffers_to_send[i].size(),
1166  MPI_CHAR,
1167  rank,
1168  mpi_tag,
1169  comm,
1170  &buffer_send_requests[i]);
1171  AssertThrowMPI(ierr);
1172  ++i;
1173  }
1174  }
1175 
1176  // Fill the output map
1177  {
1178  std::vector<char> buffer;
1179  // We do this on a first come/first served basis
1180  for (unsigned int i = 0; i < n_point_point_communications; ++i)
1181  {
1182  // Probe what's going on. Take data from the first available sender
1183  MPI_Status status;
1184  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1185  AssertThrowMPI(ierr);
1186 
1187  // Length of the message
1188  int len;
1189  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1190  AssertThrowMPI(ierr);
1191  buffer.resize(len);
1192 
1193  // Source rank
1194  const unsigned int rank = status.MPI_SOURCE;
1195 
1196  // Actually receive the message
1197  ierr = MPI_Recv(buffer.data(),
1198  len,
1199  MPI_CHAR,
1200  status.MPI_SOURCE,
1201  status.MPI_TAG,
1202  comm,
1203  MPI_STATUS_IGNORE);
1204  AssertThrowMPI(ierr);
1205  Assert(received_objects.find(rank) == received_objects.end(),
1207  "I should not receive again from this rank"));
1208  received_objects[rank] = Utilities::unpack<T>(buffer);
1209  }
1210  }
1211 
1212  // Wait to have sent all objects.
1213  const int ierr = MPI_Waitall(send_to.size(),
1214  buffer_send_requests.data(),
1215  MPI_STATUSES_IGNORE);
1216  AssertThrowMPI(ierr);
1217 
1218  return received_objects;
1219 # endif // deal.II with MPI
1220  }
1221 
1222  template <typename T>
1223  std::vector<T>
1224  all_gather(const MPI_Comm &comm, const T &object)
1225  {
1226  if (job_supports_mpi() == false)
1227  return {object};
1228 
1229 # ifndef DEAL_II_WITH_MPI
1230  (void)comm;
1231  std::vector<T> v(1, object);
1232  return v;
1233 # else
1234  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1235 
1236  std::vector<char> buffer = Utilities::pack(object);
1237 
1238  int n_local_data = buffer.size();
1239 
1240  // Vector to store the size of loc_data_array for every process
1241  std::vector<int> size_all_data(n_procs, 0);
1242 
1243  // Exchanging the size of each buffer
1244  MPI_Allgather(
1245  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1246 
1247  // Now computing the displacement, relative to recvbuf,
1248  // at which to store the incoming buffer
1249  std::vector<int> rdispls(n_procs);
1250  rdispls[0] = 0;
1251  for (unsigned int i = 1; i < n_procs; ++i)
1252  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1253 
1254  // Step 3: exchange the buffer:
1255  std::vector<char> received_unrolled_buffer(rdispls.back() +
1256  size_all_data.back());
1257 
1258  MPI_Allgatherv(buffer.data(),
1259  n_local_data,
1260  MPI_CHAR,
1261  received_unrolled_buffer.data(),
1262  size_all_data.data(),
1263  rdispls.data(),
1264  MPI_CHAR,
1265  comm);
1266 
1267  std::vector<T> received_objects(n_procs);
1268  for (unsigned int i = 0; i < n_procs; ++i)
1269  {
1270  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1271  rdispls[i],
1272  received_unrolled_buffer.begin() +
1273  rdispls[i] + size_all_data[i]);
1274  received_objects[i] = Utilities::unpack<T>(local_buffer);
1275  }
1276 
1277  return received_objects;
1278 # endif
1279  }
1280 
1281  template <typename T>
1282  std::vector<T>
1283  gather(const MPI_Comm & comm,
1284  const T & object_to_send,
1285  const unsigned int root_process)
1286  {
1287 # ifndef DEAL_II_WITH_MPI
1288  (void)comm;
1289  (void)root_process;
1290  std::vector<T> v(1, object_to_send);
1291  return v;
1292 # else
1293  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1294  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1295 
1296  AssertIndexRange(root_process, n_procs);
1297 
1298  std::vector<char> buffer = Utilities::pack(object_to_send);
1299  int n_local_data = buffer.size();
1300 
1301  // Vector to store the size of loc_data_array for every process
1302  // only the root process needs to allocate memory for that purpose
1303  std::vector<int> size_all_data;
1304  if (my_rank == root_process)
1305  size_all_data.resize(n_procs, 0);
1306 
1307  // Exchanging the size of each buffer
1308  int ierr = MPI_Gather(&n_local_data,
1309  1,
1310  MPI_INT,
1311  size_all_data.data(),
1312  1,
1313  MPI_INT,
1314  root_process,
1315  comm);
1316  AssertThrowMPI(ierr);
1317 
1318  // Now computing the displacement, relative to recvbuf,
1319  // at which to store the incoming buffer; only for root
1320  std::vector<int> rdispls;
1321  if (my_rank == root_process)
1322  {
1323  rdispls.resize(n_procs, 0);
1324  for (unsigned int i = 1; i < n_procs; ++i)
1325  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1326  }
1327  // exchange the buffer:
1328  std::vector<char> received_unrolled_buffer;
1329  if (my_rank == root_process)
1330  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1331 
1332  ierr = MPI_Gatherv(buffer.data(),
1333  n_local_data,
1334  MPI_CHAR,
1335  received_unrolled_buffer.data(),
1336  size_all_data.data(),
1337  rdispls.data(),
1338  MPI_CHAR,
1339  root_process,
1340  comm);
1341  AssertThrowMPI(ierr);
1342 
1343  std::vector<T> received_objects;
1344 
1345  if (my_rank == root_process)
1346  {
1347  received_objects.resize(n_procs);
1348 
1349  for (unsigned int i = 0; i < n_procs; ++i)
1350  {
1351  const std::vector<char> local_buffer(
1352  received_unrolled_buffer.begin() + rdispls[i],
1353  received_unrolled_buffer.begin() + rdispls[i] +
1354  size_all_data[i]);
1355  received_objects[i] = Utilities::unpack<T>(local_buffer);
1356  }
1357  }
1358  return received_objects;
1359 # endif
1360  }
1361 
1362 
1363 # ifdef DEAL_II_WITH_MPI
1364  template <class Iterator, typename Number>
1365  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1366  mean_and_standard_deviation(const Iterator begin,
1367  const Iterator end,
1368  const MPI_Comm &comm)
1369  {
1370  // below we do simple and straight-forward implementation. More elaborate
1371  // options are:
1372  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1373  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1374  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1375  using Std = typename numbers::NumberTraits<Number>::real_type;
1376  const Number sum = std::accumulate(begin, end, Number(0.));
1377 
1378  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1379  Assert(size > 0, ExcDivideByZero());
1380  const Number mean =
1381  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1382  Std sq_sum = 0.;
1383  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1384  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1385  });
1386  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1387  return std::make_pair(mean,
1388  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1389  }
1390 # endif
1391 
1392 #endif
1393  } // end of namespace MPI
1394 } // end of namespace Utilities
1395 
1396 
1398 
1399 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:196
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:264
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:479
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:577
#define AssertIndexRange(index, range)
Definition: exceptions.h:1636
static const char U
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:336
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:150
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char T
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1411
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:363
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:160
VectorType::value_type * end(VectorType &V)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const MPI_Comm & operator*() const
Definition: mpi.h:274
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1046
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1182
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
void lock(MPI_Comm comm)
Definition: mpi.cc:1104
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1692
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:139
Definition: tensor.h:448
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:362
VectorType::value_type * begin(VectorType &V)
T min(const T &t, const MPI_Comm &mpi_communicator)
static std::set< MPI_Request * > requests
Definition: mpi.h:926
static const char N
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type local_size)
Definition: mpi.cc:242
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:254
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:338
bool job_supports_mpi()
Definition: mpi.cc:1030
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:715
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:725
static ::ExceptionBase & ExcInternalError()
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete