Reference documentation for deal.II version Git 95d175110d 2020-02-17 11:10:25 +0100
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
21 #include <deal.II/base/array_view.h>
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/numbers.h>
24 
25 #include <map>
26 #include <numeric>
27 #include <set>
28 #include <vector>
29 
30 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
31 // without MPI, we would still like to use
32 // some constructs with MPI data
33 // types. Therefore, create some dummies
34 using MPI_Comm = int;
35 using MPI_Request = int;
36 using MPI_Datatype = int;
37 using MPI_Op = int;
38 # ifndef MPI_COMM_WORLD
39 # define MPI_COMM_WORLD 0
40 # endif
41 # ifndef MPI_COMM_SELF
42 # define MPI_COMM_SELF 0
43 # endif
44 # ifndef MPI_REQUEST_NULL
45 # define MPI_REQUEST_NULL 0
46 # endif
47 # ifndef MPI_MIN
48 # define MPI_MIN 0
49 # endif
50 # ifndef MPI_MAX
51 # define MPI_MAX 0
52 # endif
53 # ifndef MPI_SUM
54 # define MPI_SUM 0
55 # endif
56 #endif
57 
58 
59 
73 #ifdef DEAL_II_WITH_MPI
74 # if DEAL_II_MPI_VERSION_GTE(3, 0)
75 
76 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
77 
78 # else
79 
80 # include <type_traits>
81 
82 # define DEAL_II_MPI_CONST_CAST(expr) \
83  const_cast<typename std::remove_const< \
84  typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
85 
86 # endif
87 #endif
88 
89 
90 
91 DEAL_II_NAMESPACE_OPEN
92 
93 
94 // Forward type declarations to allow MPI sums over tensorial types
95 #ifndef DOXYGEN
96 template <int rank, int dim, typename Number>
97 class Tensor;
98 template <int rank, int dim, typename Number>
99 class SymmetricTensor;
100 template <typename Number>
101 class SparseMatrix;
102 class IndexSet;
103 #endif
104 
105 namespace Utilities
106 {
114  namespace MPI
115  {
124  unsigned int
125  n_mpi_processes(const MPI_Comm &mpi_communicator);
126 
135  unsigned int
136  this_mpi_process(const MPI_Comm &mpi_communicator);
137 
159  std::vector<unsigned int>
161  const MPI_Comm & mpi_comm,
162  const std::vector<unsigned int> &destinations);
163 
183  unsigned int
185  const MPI_Comm & mpi_comm,
186  const std::vector<unsigned int> &destinations);
187 
204  MPI_Comm
205  duplicate_communicator(const MPI_Comm &mpi_communicator);
206 
216  void
217  free_communicator(MPI_Comm &mpi_communicator);
218 
232  {
233  public:
237  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
238  : comm(duplicate_communicator(communicator))
239  {}
240 
245 
250  {
252  }
253 
257  const MPI_Comm &operator*() const
258  {
259  return comm;
260  }
261 
262 
267  operator=(const DuplicatedCommunicator &) = delete;
268 
269  private:
273  MPI_Comm comm;
274  };
275 
306  {
307  public:
314  {
315  public:
319  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
320  : mutex(mutex)
321  , comm(comm)
322  {
323  mutex.lock(comm);
324  }
325 
330  {
331  mutex.unlock(comm);
332  }
333 
334  private:
342  const MPI_Comm comm;
343  };
344 
348  explicit CollectiveMutex();
349 
353  ~CollectiveMutex();
354 
361  void
362  lock(MPI_Comm comm);
363 
370  void
371  unlock(MPI_Comm comm);
372 
373  private:
377  bool locked;
378 
382  MPI_Request request;
383  };
384 
385 
386 
414 #ifdef DEAL_II_WITH_MPI
415  int
416  create_group(const MPI_Comm & comm,
417  const MPI_Group &group,
418  const int tag,
419  MPI_Comm * new_comm);
420 #endif
421 
430  std::vector<IndexSet>
431  create_ascending_partitioning(const MPI_Comm & comm,
432  const IndexSet::size_type &local_size);
433 
434 #ifdef DEAL_II_WITH_MPI
435 
450  template <class Iterator, typename Number = long double>
451  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
452  mean_and_standard_deviation(const Iterator begin,
453  const Iterator end,
454  const MPI_Comm &comm);
455 #endif
456 
476  template <typename T>
477  T
478  sum(const T &t, const MPI_Comm &mpi_communicator);
479 
489  template <typename T, typename U>
490  void
491  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
492 
502  template <typename T>
503  void
504  sum(const ArrayView<const T> &values,
505  const MPI_Comm & mpi_communicator,
506  const ArrayView<T> & sums);
507 
513  template <int rank, int dim, typename Number>
516  const MPI_Comm & mpi_communicator);
517 
523  template <int rank, int dim, typename Number>
525  sum(const Tensor<rank, dim, Number> &local,
526  const MPI_Comm & mpi_communicator);
527 
536  template <typename Number>
537  void
538  sum(const SparseMatrix<Number> &local,
539  const MPI_Comm & mpi_communicator,
540  SparseMatrix<Number> & global);
541 
561  template <typename T>
562  T
563  max(const T &t, const MPI_Comm &mpi_communicator);
564 
574  template <typename T, typename U>
575  void
576  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
577 
587  template <typename T>
588  void
589  max(const ArrayView<const T> &values,
590  const MPI_Comm & mpi_communicator,
591  const ArrayView<T> & maxima);
592 
612  template <typename T>
613  T
614  min(const T &t, const MPI_Comm &mpi_communicator);
615 
625  template <typename T, typename U>
626  void
627  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
628 
638  template <typename T>
639  void
640  min(const ArrayView<const T> &values,
641  const MPI_Comm & mpi_communicator,
642  const ArrayView<T> & minima);
643 
658  struct MinMaxAvg
659  {
664  double sum;
665 
670  double min;
671 
676  double max;
677 
686  unsigned int min_index;
687 
696  unsigned int max_index;
697 
702  double avg;
703  };
704 
719  MinMaxAvg
720  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
721 
732  std::vector<MinMaxAvg>
733  min_max_avg(const std::vector<double> &my_value,
734  const MPI_Comm & mpi_communicator);
735 
736 
748  void
749  min_max_avg(const ArrayView<const double> &my_values,
750  const ArrayView<MinMaxAvg> & result,
751  const MPI_Comm & mpi_communicator);
752 
753 
798  {
799  public:
846  int & argc,
847  char **& argv,
848  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
849 
854  ~MPI_InitFinalize();
855 
882  static void
883  register_request(MPI_Request &request);
884 
888  static void
889  unregister_request(MPI_Request &request);
890 
891  private:
895  static std::set<MPI_Request *> requests;
896  };
897 
909  bool
911 
931  template <typename T>
932  std::map<unsigned int, T>
933  some_to_some(const MPI_Comm & comm,
934  const std::map<unsigned int, T> &objects_to_send);
935 
951  template <typename T>
952  std::vector<T>
953  all_gather(const MPI_Comm &comm, const T &object_to_send);
954 
972  template <typename T>
973  std::vector<T>
974  gather(const MPI_Comm & comm,
975  const T & object_to_send,
976  const unsigned int root_process = 0);
977 
1001  template <typename T1, typename T2>
1003  {
1004  public:
1008  virtual ~ConsensusAlgorithmProcess() = default;
1009 
1016  virtual std::vector<unsigned int>
1017  compute_targets() = 0;
1018 
1027  virtual void
1028  create_request(const unsigned int other_rank,
1029  std::vector<T1> & send_buffer);
1030 
1039  virtual void
1040  prepare_buffer_for_answer(const unsigned int other_rank,
1041  std::vector<T2> & recv_buffer);
1042 
1055  virtual void
1056  answer_request(const unsigned int other_rank,
1057  const std::vector<T1> &buffer_recv,
1058  std::vector<T2> & request_buffer);
1059 
1067  virtual void
1068  read_answer(const unsigned int other_rank,
1069  const std::vector<T2> &recv_buffer);
1070  };
1071 
1102  template <typename T1, typename T2>
1104  {
1105  public:
1107  const MPI_Comm & comm);
1108 
1112  virtual ~ConsensusAlgorithm() = default;
1113 
1117  virtual void
1118  run() = 0;
1119 
1120  protected:
1125 
1129  const MPI_Comm &comm;
1130 
1134  const unsigned int my_rank;
1135 
1139  const unsigned int n_procs;
1140  };
1141 
1142 
1161  template <typename T1, typename T2>
1163  {
1164  public:
1172  const MPI_Comm & comm);
1173 
1177  virtual ~ConsensusAlgorithm_NBX() = default;
1178 
1182  virtual void
1183  run() override;
1184 
1185  private:
1186 #ifdef DEAL_II_WITH_MPI
1187 
1190  std::vector<unsigned int> targets;
1191 
1195  std::vector<std::vector<T1>> send_buffers;
1196 
1200  std::vector<MPI_Request> send_requests;
1201 
1205  std::vector<std::vector<T2>> recv_buffers;
1206 
1207 
1211  std::vector<MPI_Request> recv_requests;
1212 
1216  std::vector<std::unique_ptr<std::vector<T2>>> request_buffers;
1217 
1221  std::vector<std::unique_ptr<MPI_Request>> request_requests;
1222 
1223  // request for barrier
1224  MPI_Request barrier_request;
1225 #endif
1226 
1227 #ifdef DEBUG
1228 
1231  std::set<unsigned int> requesting_processes;
1232 #endif
1233 
1237  bool
1238  check_own_state();
1239 
1244  void
1245  signal_finish();
1246 
1251  bool
1252  check_global_state();
1253 
1258  void
1259  answer_requests();
1260 
1265  void
1266  start_communication();
1267 
1272  void
1273  clean_up_and_end_communication();
1274  };
1275 
1301  template <typename T1, typename T2>
1303  {
1304  public:
1312  const MPI_Comm & comm);
1313 
1317  virtual ~ConsensusAlgorithm_PEX() = default;
1318 
1322  virtual void
1323  run() override;
1324 
1325  private:
1326 #ifdef DEAL_II_WITH_MPI
1327 
1330  std::vector<unsigned int> targets;
1331 
1335  std::vector<unsigned int> sources;
1336 
1337  // data structures to send and receive requests
1338 
1342  std::vector<std::vector<T1>> send_buffers;
1343 
1347  std::vector<std::vector<T2>> recv_buffers;
1348 
1352  std::vector<MPI_Request> send_and_recv_buffers;
1353 
1357  std::vector<std::vector<T2>> requests_buffers;
1358 
1362  std::vector<MPI_Request> requests_answers;
1363 #endif
1364 
1369  void
1370  answer_requests(int index);
1371 
1376  unsigned int
1377  start_communication();
1378 
1383  void
1384  clean_up_and_end_communication();
1385  };
1386 
1400  template <typename T1, typename T2>
1402  {
1403  public:
1411  const MPI_Comm & comm);
1412 
1416  virtual ~ConsensusAlgorithmSelector() = default;
1417 
1423  virtual void
1424  run() override;
1425 
1426  private:
1427  // Pointer to the actual ConsensusAlgorithm implementation.
1428  std::shared_ptr<ConsensusAlgorithm<T1, T2>> consensus_algo;
1429  };
1430 
1475  std::vector<unsigned int>
1476  compute_index_owner(const IndexSet &owned_indices,
1477  const IndexSet &indices_to_look_up,
1478  const MPI_Comm &comm);
1479 
1480 #ifndef DOXYGEN
1481  // declaration for an internal function that lives in mpi.templates.h
1482  namespace internal
1483  {
1484  template <typename T>
1485  void
1486  all_reduce(const MPI_Op & mpi_op,
1487  const ArrayView<const T> &values,
1488  const MPI_Comm & mpi_communicator,
1489  const ArrayView<T> & output);
1490  }
1491 
1492  // Since these depend on N they must live in the header file
1493  template <typename T, unsigned int N>
1494  void
1495  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1496  {
1497  internal::all_reduce(MPI_SUM,
1498  ArrayView<const T>(values, N),
1499  mpi_communicator,
1500  ArrayView<T>(sums, N));
1501  }
1502 
1503  template <typename T, unsigned int N>
1504  void
1505  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1506  {
1507  internal::all_reduce(MPI_MAX,
1508  ArrayView<const T>(values, N),
1509  mpi_communicator,
1510  ArrayView<T>(maxima, N));
1511  }
1512 
1513  template <typename T, unsigned int N>
1514  void
1515  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1516  {
1517  internal::all_reduce(MPI_MIN,
1518  ArrayView<const T>(values, N),
1519  mpi_communicator,
1520  ArrayView<T>(minima, N));
1521  }
1522 
1523  template <typename T>
1524  std::map<unsigned int, T>
1525  some_to_some(const MPI_Comm & comm,
1526  const std::map<unsigned int, T> &objects_to_send)
1527  {
1528 # ifndef DEAL_II_WITH_MPI
1529  (void)comm;
1530  Assert(objects_to_send.size() < 2,
1531  ExcMessage("Cannot send to more than one processor."));
1532  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1533  objects_to_send.size() == 0,
1534  ExcMessage("Can only send to myself or to nobody."));
1535  return objects_to_send;
1536 # else
1537  const auto my_proc = this_mpi_process(comm);
1538 
1539  std::map<unsigned int, T> received_objects;
1540 
1541  std::vector<unsigned int> send_to;
1542  send_to.reserve(objects_to_send.size());
1543  for (const auto &m : objects_to_send)
1544  if (m.first == my_proc)
1545  received_objects[my_proc] = m.second;
1546  else
1547  send_to.emplace_back(m.first);
1548 
1549  const unsigned int n_point_point_communications =
1551 
1552  // Protect the following communication:
1553  static CollectiveMutex mutex;
1554  CollectiveMutex::ScopedLock lock(mutex, comm);
1555 
1556  // If we have something to send, or we expect something from other
1557  // processors, we need to visit one of the two scopes below. Otherwise,
1558  // no other action is required by this mpi process, and we can safely
1559  // return.
1560  if (send_to.size() == 0 && n_point_point_communications == 0)
1561  return received_objects;
1562 
1563  const int mpi_tag =
1565 
1566  // Sending buffers
1567  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1568  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1569  {
1570  unsigned int i = 0;
1571  for (const auto &rank_obj : objects_to_send)
1572  if (rank_obj.first != my_proc)
1573  {
1574  const auto &rank = rank_obj.first;
1575  buffers_to_send[i] = Utilities::pack(rank_obj.second);
1576  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1577  buffers_to_send[i].size(),
1578  MPI_CHAR,
1579  rank,
1580  mpi_tag,
1581  comm,
1582  &buffer_send_requests[i]);
1583  AssertThrowMPI(ierr);
1584  ++i;
1585  }
1586  }
1587 
1588  // Fill the output map
1589  {
1590  std::vector<char> buffer;
1591  // We do this on a first come/first served basis
1592  for (unsigned int i = 0; i < n_point_point_communications; ++i)
1593  {
1594  // Probe what's going on. Take data from the first available sender
1595  MPI_Status status;
1596  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1597  AssertThrowMPI(ierr);
1598 
1599  // Length of the message
1600  int len;
1601  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1602  AssertThrowMPI(ierr);
1603  buffer.resize(len);
1604 
1605  // Source rank
1606  const unsigned int rank = status.MPI_SOURCE;
1607 
1608  // Actually receive the message
1609  ierr = MPI_Recv(buffer.data(),
1610  len,
1611  MPI_CHAR,
1612  status.MPI_SOURCE,
1613  status.MPI_TAG,
1614  comm,
1615  MPI_STATUS_IGNORE);
1616  AssertThrowMPI(ierr);
1617  Assert(received_objects.find(rank) == received_objects.end(),
1619  "I should not receive again from this rank"));
1620  received_objects[rank] = Utilities::unpack<T>(buffer);
1621  }
1622  }
1623 
1624  // Wait to have sent all objects.
1625  const int ierr = MPI_Waitall(send_to.size(),
1626  buffer_send_requests.data(),
1627  MPI_STATUSES_IGNORE);
1628  AssertThrowMPI(ierr);
1629 
1630  return received_objects;
1631 # endif // deal.II with MPI
1632  }
1633 
1634  template <typename T>
1635  std::vector<T>
1636  all_gather(const MPI_Comm &comm, const T &object)
1637  {
1638 # ifndef DEAL_II_WITH_MPI
1639  (void)comm;
1640  std::vector<T> v(1, object);
1641  return v;
1642 # else
1643  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1644 
1645  std::vector<char> buffer = Utilities::pack(object);
1646 
1647  int n_local_data = buffer.size();
1648 
1649  // Vector to store the size of loc_data_array for every process
1650  std::vector<int> size_all_data(n_procs, 0);
1651 
1652  // Exchanging the size of each buffer
1653  MPI_Allgather(
1654  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1655 
1656  // Now computing the displacement, relative to recvbuf,
1657  // at which to store the incoming buffer
1658  std::vector<int> rdispls(n_procs);
1659  rdispls[0] = 0;
1660  for (unsigned int i = 1; i < n_procs; ++i)
1661  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1662 
1663  // Step 3: exchange the buffer:
1664  std::vector<char> received_unrolled_buffer(rdispls.back() +
1665  size_all_data.back());
1666 
1667  MPI_Allgatherv(buffer.data(),
1668  n_local_data,
1669  MPI_CHAR,
1670  received_unrolled_buffer.data(),
1671  size_all_data.data(),
1672  rdispls.data(),
1673  MPI_CHAR,
1674  comm);
1675 
1676  std::vector<T> received_objects(n_procs);
1677  for (unsigned int i = 0; i < n_procs; ++i)
1678  {
1679  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1680  rdispls[i],
1681  received_unrolled_buffer.begin() +
1682  rdispls[i] + size_all_data[i]);
1683  received_objects[i] = Utilities::unpack<T>(local_buffer);
1684  }
1685 
1686  return received_objects;
1687 # endif
1688  }
1689 
1690  template <typename T>
1691  std::vector<T>
1692  gather(const MPI_Comm & comm,
1693  const T & object_to_send,
1694  const unsigned int root_process)
1695  {
1696 # ifndef DEAL_II_WITH_MPI
1697  (void)comm;
1698  (void)root_process;
1699  std::vector<T> v(1, object_to_send);
1700  return v;
1701 # else
1702  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1703  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1704 
1705  AssertIndexRange(root_process, n_procs);
1706 
1707  std::vector<char> buffer = Utilities::pack(object_to_send);
1708  int n_local_data = buffer.size();
1709 
1710  // Vector to store the size of loc_data_array for every process
1711  // only the root process needs to allocate memory for that purpose
1712  std::vector<int> size_all_data;
1713  if (my_rank == root_process)
1714  size_all_data.resize(n_procs, 0);
1715 
1716  // Exchanging the size of each buffer
1717  int ierr = MPI_Gather(&n_local_data,
1718  1,
1719  MPI_INT,
1720  size_all_data.data(),
1721  1,
1722  MPI_INT,
1723  root_process,
1724  comm);
1725  AssertThrowMPI(ierr);
1726 
1727  // Now computing the displacement, relative to recvbuf,
1728  // at which to store the incoming buffer; only for root
1729  std::vector<int> rdispls;
1730  if (my_rank == root_process)
1731  {
1732  rdispls.resize(n_procs, 0);
1733  for (unsigned int i = 1; i < n_procs; ++i)
1734  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1735  }
1736  // exchange the buffer:
1737  std::vector<char> received_unrolled_buffer;
1738  if (my_rank == root_process)
1739  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1740 
1741  ierr = MPI_Gatherv(buffer.data(),
1742  n_local_data,
1743  MPI_CHAR,
1744  received_unrolled_buffer.data(),
1745  size_all_data.data(),
1746  rdispls.data(),
1747  MPI_CHAR,
1748  root_process,
1749  comm);
1750  AssertThrowMPI(ierr);
1751 
1752  std::vector<T> received_objects;
1753 
1754  if (my_rank == root_process)
1755  {
1756  received_objects.resize(n_procs);
1757 
1758  for (unsigned int i = 0; i < n_procs; ++i)
1759  {
1760  const std::vector<char> local_buffer(
1761  received_unrolled_buffer.begin() + rdispls[i],
1762  received_unrolled_buffer.begin() + rdispls[i] +
1763  size_all_data[i]);
1764  received_objects[i] = Utilities::unpack<T>(local_buffer);
1765  }
1766  }
1767  return received_objects;
1768 # endif
1769  }
1770 
1771 
1772 # ifdef DEAL_II_WITH_MPI
1773  template <class Iterator, typename Number>
1774  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1775  mean_and_standard_deviation(const Iterator begin,
1776  const Iterator end,
1777  const MPI_Comm &comm)
1778  {
1779  // below we do simple and straight-forward implementation. More elaborate
1780  // options are:
1781  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
1782  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
1783  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
1784  using Std = typename numbers::NumberTraits<Number>::real_type;
1785  const Number sum = std::accumulate(begin, end, Number(0.));
1786 
1787  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
1788  Assert(size > 0, ExcDivideByZero());
1789  const Number mean =
1790  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
1791  Std sq_sum = 0.;
1792  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
1793  sq_sum += numbers::NumberTraits<Number>::abs_square(v - mean);
1794  });
1795  sq_sum = Utilities::MPI::sum(sq_sum, comm);
1796  return std::make_pair(mean,
1797  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1798  }
1799 # endif
1800 
1801 #endif
1802  } // end of namespace MPI
1803 } // end of namespace Utilities
1804 
1805 
1806 DEAL_II_NAMESPACE_CLOSE
1807 
1808 #endif
std::vector< unsigned int > sources
Definition: mpi.h:1335
static const unsigned int invalid_unsigned_int
Definition: types.h:187
std::vector< std::unique_ptr< std::vector< T2 > > > request_buffers
Definition: mpi.h:1216
std::vector< std::unique_ptr< MPI_Request > > request_requests
Definition: mpi.h:1221
std::vector< unsigned int > targets
Definition: mpi.h:1330
const unsigned int my_rank
Definition: mpi.h:1134
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:450
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:605
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1347
std::set< unsigned int > requesting_processes
Definition: mpi.h:1231
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1195
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:319
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:133
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
Definition: mpi.h:1211
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1205
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1411
const unsigned int n_procs
Definition: mpi.h:1139
std::vector< MPI_Request > send_requests
Definition: mpi.h:1200
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1342
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:143
const MPI_Comm & operator*() const
Definition: mpi.h:257
std::vector< std::vector< T2 > > requests_buffers
Definition: mpi.h:1357
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1014
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1227
std::vector< MPI_Request > requests_answers
Definition: mpi.h:1362
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:100
void lock(MPI_Comm comm)
Definition: mpi.cc:1076
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1699
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:122
Definition: tensor.h:422
ConsensusAlgorithmProcess< T1, T2 > & process
Definition: mpi.h:1124
T min(const T &t, const MPI_Comm &mpi_communicator)
std::vector< MPI_Request > send_and_recv_buffers
Definition: mpi.h:1352
static std::set< MPI_Request * > requests
Definition: mpi.h:895
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:111
std::vector< unsigned int > targets
Definition: mpi.h:1190
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:225
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:237
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:74
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:309
bool job_supports_mpi()
Definition: mpi.cc:998
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:686
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:696
static ::ExceptionBase & ExcInternalError()
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete