Reference documentation for deal.II version GIT 725517848b 2022-05-23 21:10:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/index_set.h>
23 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/numbers.h>
26 
27 #include <boost/signals2.hpp>
28 
29 #include <complex>
30 #include <map>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
36 // without MPI, we would still like to use
37 // some constructs with MPI data
38 // types. Therefore, create some dummies
39 using MPI_Comm = int;
40 using MPI_Request = int;
41 using MPI_Datatype = int;
42 using MPI_Op = int;
43 # ifndef MPI_COMM_WORLD
44 # define MPI_COMM_WORLD 0
45 # endif
46 # ifndef MPI_COMM_SELF
47 # define MPI_COMM_SELF 0
48 # endif
49 # ifndef MPI_COMM_NULL
50 # define MPI_COMM_NULL 0
51 # endif
52 # ifndef MPI_REQUEST_NULL
53 # define MPI_REQUEST_NULL 0
54 # endif
55 # ifndef MPI_MIN
56 # define MPI_MIN 0
57 # endif
58 # ifndef MPI_MAX
59 # define MPI_MAX 0
60 # endif
61 # ifndef MPI_SUM
62 # define MPI_SUM 0
63 # endif
64 # ifndef MPI_LOR
65 # define MPI_LOR 0
66 # endif
67 #endif
68 
69 
70 
84 #ifdef DEAL_II_WITH_MPI
85 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
86 #endif
87 
88 
89 
91 
92 
93 // Forward type declarations to allow MPI sums over tensorial types
94 #ifndef DOXYGEN
95 template <int rank, int dim, typename Number>
96 class Tensor;
97 template <int rank, int dim, typename Number>
98 class SymmetricTensor;
99 template <typename Number>
100 class SparseMatrix;
101 class IndexSet;
102 #endif
103 
104 namespace Utilities
105 {
118  IndexSet
119  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
120  const unsigned int n_partitions,
121  const IndexSet::size_type total_size);
122 
130  namespace MPI
131  {
140  template <typename T>
141  constexpr bool is_mpi_type = is_same_as_any_of<T,
142  char,
143  signed short,
144  signed int,
145  signed long,
146  signed long long,
147  signed char,
148  unsigned char,
149  unsigned short,
150  unsigned int,
151  unsigned long int,
152  unsigned long long,
153  float,
154  double,
155  long double,
156  bool,
157  std::complex<float>,
158  std::complex<double>,
159  std::complex<long double>,
160  wchar_t>::value;
161 
170  unsigned int
171  n_mpi_processes(const MPI_Comm &mpi_communicator);
172 
181  unsigned int
182  this_mpi_process(const MPI_Comm &mpi_communicator);
183 
188  const std::vector<unsigned int>
189  mpi_processes_within_communicator(const MPI_Comm &comm_large,
190  const MPI_Comm &comm_small);
191 
213  std::vector<unsigned int>
215  const MPI_Comm & mpi_comm,
216  const std::vector<unsigned int> &destinations);
217 
237  unsigned int
239  const MPI_Comm & mpi_comm,
240  const std::vector<unsigned int> &destinations);
241 
258  MPI_Comm
259  duplicate_communicator(const MPI_Comm &mpi_communicator);
260 
270  void
271  free_communicator(MPI_Comm &mpi_communicator);
272 
286  {
287  public:
291  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
292  : comm(duplicate_communicator(communicator))
293  {}
294 
299 
304  {
306  }
307 
311  const MPI_Comm &
312  operator*() const
313  {
314  return comm;
315  }
316 
317 
323 
324  private:
328  MPI_Comm comm;
329  };
330 
361  {
362  public:
369  {
370  public:
374  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
375  : mutex(mutex)
376  , comm(comm)
377  {
378  mutex.lock(comm);
379  }
380 
385  {
386  mutex.unlock(comm);
387  }
388 
389  private:
397  const MPI_Comm comm;
398  };
399 
403  explicit CollectiveMutex();
404 
409 
416  void
417  lock(const MPI_Comm &comm);
418 
425  void
426  unlock(const MPI_Comm &comm);
427 
428  private:
432  bool locked;
433 
437  MPI_Request request;
438  };
439 
440 
441 
471 #ifdef DEAL_II_WITH_MPI
472  DEAL_II_DEPRECATED_EARLY int
473  create_group(const MPI_Comm & comm,
474  const MPI_Group &group,
475  const int tag,
476  MPI_Comm * new_comm);
477 #endif
478 
487  std::vector<IndexSet>
488  create_ascending_partitioning(const MPI_Comm & comm,
489  const IndexSet::size_type locally_owned_size);
490 
498  IndexSet
500  const MPI_Comm & comm,
501  const IndexSet::size_type total_size);
502 
503 #ifdef DEAL_II_WITH_MPI
519  template <class Iterator, typename Number = long double>
520  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
522  const Iterator end,
523  const MPI_Comm &comm);
524 #endif
525 
526 
574  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
575  create_mpi_data_type_n_bytes(const std::size_t n_bytes);
576 
596  template <typename T>
597  T
598  sum(const T &t, const MPI_Comm &mpi_communicator);
599 
609  template <typename T, typename U>
610  void
611  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
612 
622  template <typename T>
623  void
625  const MPI_Comm & mpi_communicator,
626  const ArrayView<T> & sums);
627 
633  template <int rank, int dim, typename Number>
636  const MPI_Comm & mpi_communicator);
637 
643  template <int rank, int dim, typename Number>
646  const MPI_Comm & mpi_communicator);
647 
656  template <typename Number>
657  void
658  sum(const SparseMatrix<Number> &local,
659  const MPI_Comm & mpi_communicator,
660  SparseMatrix<Number> & global);
661 
681  template <typename T>
682  T
683  max(const T &t, const MPI_Comm &mpi_communicator);
684 
694  template <typename T, typename U>
695  void
696  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
697 
707  template <typename T>
708  void
710  const MPI_Comm & mpi_communicator,
711  const ArrayView<T> & maxima);
712 
732  template <typename T>
733  T
734  min(const T &t, const MPI_Comm &mpi_communicator);
735 
745  template <typename T, typename U>
746  void
747  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
748 
758  template <typename T>
759  void
761  const MPI_Comm & mpi_communicator,
762  const ArrayView<T> & minima);
763 
787  template <typename T>
788  T
789  logical_or(const T &t, const MPI_Comm &mpi_communicator);
790 
805  template <typename T, typename U>
806  void
807  logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
808 
818  template <typename T>
819  void
821  const MPI_Comm & mpi_communicator,
822  const ArrayView<T> & results);
823 
838  struct MinMaxAvg
839  {
844  double sum;
845 
850  double min;
851 
856  double max;
857 
866  unsigned int min_index;
867 
876  unsigned int max_index;
877 
882  double avg;
883  };
884 
899  MinMaxAvg
900  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
901 
913  std::vector<MinMaxAvg>
914  min_max_avg(const std::vector<double> &my_value,
915  const MPI_Comm & mpi_communicator);
916 
917 
930  void
931  min_max_avg(const ArrayView<const double> &my_values,
932  const ArrayView<MinMaxAvg> & result,
933  const MPI_Comm & mpi_communicator);
934 
935 
980  {
981  public:
1028  int & argc,
1029  char **& argv,
1030  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1031 
1037 
1064  static void
1065  register_request(MPI_Request &request);
1066 
1070  static void
1071  unregister_request(MPI_Request &request);
1072 
1080  struct Signals
1081  {
1086  boost::signals2::signal<void()> at_mpi_init;
1087 
1094  boost::signals2::signal<void()> at_mpi_finalize;
1095  };
1096 
1098 
1099  private:
1103  static std::set<MPI_Request *> requests;
1104  };
1105 
1117  bool
1118  job_supports_mpi();
1119 
1137  template <typename T>
1138  std::map<unsigned int, T>
1139  some_to_some(const MPI_Comm & comm,
1140  const std::map<unsigned int, T> &objects_to_send);
1141 
1155  template <typename T>
1156  std::vector<T>
1157  all_gather(const MPI_Comm &comm, const T &object_to_send);
1158 
1174  template <typename T>
1175  std::vector<T>
1176  gather(const MPI_Comm & comm,
1177  const T & object_to_send,
1178  const unsigned int root_process = 0);
1179 
1215  template <typename T>
1216  typename std::enable_if<is_mpi_type<T> == false, T>::type
1217  broadcast(const MPI_Comm & comm,
1218  const T & object_to_send,
1219  const unsigned int root_process = 0);
1220 
1243  template <typename T>
1244  typename std::enable_if<is_mpi_type<T> == true, T>::type
1245  broadcast(const MPI_Comm & comm,
1246  const T & object_to_send,
1247  const unsigned int root_process = 0);
1248 
1265  template <typename T>
1266  void
1267  broadcast(T * buffer,
1268  const size_t count,
1269  const unsigned int root,
1270  const MPI_Comm & comm);
1271 
1284  template <typename T>
1285  T
1286  reduce(const T & local_value,
1287  const MPI_Comm & comm,
1288  const std::function<T(const T &, const T &)> &combiner,
1289  const unsigned int root_process = 0);
1290 
1300  template <typename T>
1301  T
1302  all_reduce(const T & local_value,
1303  const MPI_Comm & comm,
1304  const std::function<T(const T &, const T &)> &combiner);
1305 
1348  std::vector<unsigned int>
1349  compute_index_owner(const IndexSet &owned_indices,
1350  const IndexSet &indices_to_look_up,
1351  const MPI_Comm &comm);
1352 
1360  template <typename T>
1361  std::vector<T>
1362  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1363 
1367  template <typename T>
1368  std::set<T>
1369  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1370 
1371 
1372 
1373  /* --------------------------- inline functions ------------------------- */
1374 
1375  namespace internal
1376  {
1382  namespace MPIDataTypes
1383  {
1384 #ifdef DEAL_II_WITH_MPI
1385  inline MPI_Datatype
1386  mpi_type_id(const bool *)
1387  {
1388  return MPI_CXX_BOOL;
1389  }
1390 
1391 
1392 
1393  inline MPI_Datatype
1394  mpi_type_id(const char *)
1395  {
1396  return MPI_CHAR;
1397  }
1398 
1399 
1400 
1401  inline MPI_Datatype
1402  mpi_type_id(const signed char *)
1403  {
1404  return MPI_SIGNED_CHAR;
1405  }
1406 
1407 
1408 
1409  inline MPI_Datatype
1410  mpi_type_id(const wchar_t *)
1411  {
1412  return MPI_WCHAR;
1413  }
1414 
1415 
1416 
1417  inline MPI_Datatype
1418  mpi_type_id(const short *)
1419  {
1420  return MPI_SHORT;
1421  }
1422 
1423 
1424 
1425  inline MPI_Datatype
1426  mpi_type_id(const int *)
1427  {
1428  return MPI_INT;
1429  }
1430 
1431 
1432 
1433  inline MPI_Datatype
1434  mpi_type_id(const long int *)
1435  {
1436  return MPI_LONG;
1437  }
1438 
1439 
1440 
1441  inline MPI_Datatype
1442  mpi_type_id(const long long int *)
1443  {
1444  return MPI_LONG_LONG;
1445  }
1446 
1447 
1448 
1449  inline MPI_Datatype
1450  mpi_type_id(const unsigned char *)
1451  {
1452  return MPI_UNSIGNED_CHAR;
1453  }
1454 
1455 
1456 
1457  inline MPI_Datatype
1458  mpi_type_id(const unsigned short *)
1459  {
1460  return MPI_UNSIGNED_SHORT;
1461  }
1462 
1463 
1464 
1465  inline MPI_Datatype
1466  mpi_type_id(const unsigned int *)
1467  {
1468  return MPI_UNSIGNED;
1469  }
1470 
1471 
1472 
1473  inline MPI_Datatype
1474  mpi_type_id(const unsigned long int *)
1475  {
1476  return MPI_UNSIGNED_LONG;
1477  }
1478 
1479 
1480 
1481  inline MPI_Datatype
1482  mpi_type_id(const unsigned long long int *)
1483  {
1484  return MPI_UNSIGNED_LONG_LONG;
1485  }
1486 
1487 
1488 
1489  inline MPI_Datatype
1490  mpi_type_id(const float *)
1491  {
1492  return MPI_FLOAT;
1493  }
1494 
1495 
1496 
1497  inline MPI_Datatype
1498  mpi_type_id(const double *)
1499  {
1500  return MPI_DOUBLE;
1501  }
1502 
1503 
1504 
1505  inline MPI_Datatype
1506  mpi_type_id(const long double *)
1507  {
1508  return MPI_LONG_DOUBLE;
1509  }
1510 
1511 
1512 
1513  inline MPI_Datatype
1514  mpi_type_id(const std::complex<float> *)
1515  {
1516  return MPI_COMPLEX;
1517  }
1518 
1519 
1520 
1521  inline MPI_Datatype
1522  mpi_type_id(const std::complex<double> *)
1523  {
1524  return MPI_DOUBLE_COMPLEX;
1525  }
1526 #endif
1527  } // namespace MPIDataTypes
1528  } // namespace internal
1529 
1530 
1531 
1532 #ifdef DEAL_II_WITH_MPI
1550  template <typename T>
1551  const MPI_Datatype
1553  static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1554 #endif
1555 
1556 #ifndef DOXYGEN
1557  namespace internal
1558  {
1559  // declaration for an internal function that lives in mpi.templates.h
1560  template <typename T>
1561  void
1562  all_reduce(const MPI_Op & mpi_op,
1563  const ArrayView<const T> &values,
1564  const MPI_Comm & mpi_communicator,
1565  const ArrayView<T> & output);
1566  } // namespace internal
1567 
1568 
1569 
1570  // Since these depend on N they must live in the header file
1571  template <typename T, unsigned int N>
1572  void
1573  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1574  {
1575  internal::all_reduce(MPI_SUM,
1577  mpi_communicator,
1578  ArrayView<T>(sums, N));
1579  }
1580 
1581 
1582 
1583  template <typename T, unsigned int N>
1584  void
1585  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1586  {
1587  internal::all_reduce(MPI_MAX,
1589  mpi_communicator,
1590  ArrayView<T>(maxima, N));
1591  }
1592 
1593 
1594 
1595  template <typename T, unsigned int N>
1596  void
1597  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1598  {
1599  internal::all_reduce(MPI_MIN,
1601  mpi_communicator,
1602  ArrayView<T>(minima, N));
1603  }
1604 
1605 
1606 
1607  template <typename T, unsigned int N>
1608  void
1609  logical_or(const T (&values)[N],
1610  const MPI_Comm &mpi_communicator,
1611  T (&results)[N])
1612  {
1613  static_assert(std::is_integral<T>::value,
1614  "The MPI_LOR operation only allows integral data types.");
1615 
1616  internal::all_reduce(MPI_LOR,
1618  mpi_communicator,
1619  ArrayView<T>(results, N));
1620  }
1621 
1622 
1623 
1624  template <typename T>
1625  std::map<unsigned int, T>
1626  some_to_some(const MPI_Comm & comm,
1627  const std::map<unsigned int, T> &objects_to_send)
1628  {
1629 # ifndef DEAL_II_WITH_MPI
1630  (void)comm;
1631  Assert(objects_to_send.size() < 2,
1632  ExcMessage("Cannot send to more than one processor."));
1633  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1634  objects_to_send.size() == 0,
1635  ExcMessage("Can only send to myself or to nobody."));
1636  return objects_to_send;
1637 # else
1638  const auto my_proc = this_mpi_process(comm);
1639 
1640  std::map<unsigned int, T> received_objects;
1641 
1642  std::vector<unsigned int> send_to;
1643  send_to.reserve(objects_to_send.size());
1644  for (const auto &m : objects_to_send)
1645  if (m.first == my_proc)
1646  received_objects[my_proc] = m.second;
1647  else
1648  send_to.emplace_back(m.first);
1649 
1650  const unsigned int n_expected_incoming_messages =
1652 
1653  // Protect the following communication:
1654  static CollectiveMutex mutex;
1655  CollectiveMutex::ScopedLock lock(mutex, comm);
1656 
1657  // If we have something to send, or we expect something from other
1658  // processors, we need to visit one of the two scopes below. Otherwise,
1659  // no other action is required by this mpi process, and we can safely
1660  // return.
1661  if (send_to.size() == 0 && n_expected_incoming_messages == 0)
1662  return received_objects;
1663 
1664  const int mpi_tag =
1666 
1667  // Sending buffers
1668  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1669  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1670  {
1671  unsigned int i = 0;
1672  for (const auto &rank_obj : objects_to_send)
1673  if (rank_obj.first != my_proc)
1674  {
1675  const auto &rank = rank_obj.first;
1676  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1677  /*allow_compression=*/false);
1678  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1679  buffers_to_send[i].size(),
1680  MPI_CHAR,
1681  rank,
1682  mpi_tag,
1683  comm,
1684  &buffer_send_requests[i]);
1685  AssertThrowMPI(ierr);
1686  ++i;
1687  }
1688  }
1689 
1690  // Fill the output map
1691  {
1692  std::vector<char> buffer;
1693  // We do this on a first come/first served basis
1694  for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1695  {
1696  // Probe what's going on. Take data from the first available sender
1697  MPI_Status status;
1698  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1699  AssertThrowMPI(ierr);
1700 
1701  // Length of the message
1702  int len;
1703  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1704  AssertThrowMPI(ierr);
1705  buffer.resize(len);
1706 
1707  // Source rank
1708  const unsigned int rank = status.MPI_SOURCE;
1709 
1710  // Actually receive the message
1711  ierr = MPI_Recv(buffer.data(),
1712  len,
1713  MPI_CHAR,
1714  status.MPI_SOURCE,
1715  status.MPI_TAG,
1716  comm,
1717  MPI_STATUS_IGNORE);
1718  AssertThrowMPI(ierr);
1719  Assert(received_objects.find(rank) == received_objects.end(),
1721  "I should not receive again from this rank"));
1722  received_objects[rank] =
1723  Utilities::unpack<T>(buffer,
1724  /*allow_compression=*/false);
1725  }
1726  }
1727 
1728  // Wait to have sent all objects.
1729  const int ierr = MPI_Waitall(send_to.size(),
1730  buffer_send_requests.data(),
1731  MPI_STATUSES_IGNORE);
1732  AssertThrowMPI(ierr);
1733 
1734  return received_objects;
1735 # endif // deal.II with MPI
1736  }
1737 
1738 
1739 
1740  template <typename T>
1741  std::vector<T>
1742  all_gather(const MPI_Comm &comm, const T &object)
1743  {
1744  if (job_supports_mpi() == false)
1745  return {object};
1746 
1747 # ifndef DEAL_II_WITH_MPI
1748  (void)comm;
1749  std::vector<T> v(1, object);
1750  return v;
1751 # else
1752  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1753 
1754  std::vector<char> buffer = Utilities::pack(object);
1755 
1756  int n_local_data = buffer.size();
1757 
1758  // Vector to store the size of loc_data_array for every process
1759  std::vector<int> size_all_data(n_procs, 0);
1760 
1761  // Exchanging the size of each buffer
1762  int ierr = MPI_Allgather(
1763  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1764  AssertThrowMPI(ierr);
1765 
1766  // Now computing the displacement, relative to recvbuf,
1767  // at which to store the incoming buffer
1768  std::vector<int> rdispls(n_procs);
1769  rdispls[0] = 0;
1770  for (unsigned int i = 1; i < n_procs; ++i)
1771  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1772 
1773  // Step 3: exchange the buffer:
1774  std::vector<char> received_unrolled_buffer(rdispls.back() +
1775  size_all_data.back());
1776 
1777  ierr = MPI_Allgatherv(buffer.data(),
1778  n_local_data,
1779  MPI_CHAR,
1780  received_unrolled_buffer.data(),
1781  size_all_data.data(),
1782  rdispls.data(),
1783  MPI_CHAR,
1784  comm);
1785  AssertThrowMPI(ierr);
1786 
1787  std::vector<T> received_objects(n_procs);
1788  for (unsigned int i = 0; i < n_procs; ++i)
1789  {
1790  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1791  rdispls[i],
1792  received_unrolled_buffer.begin() +
1793  rdispls[i] + size_all_data[i]);
1794  received_objects[i] = Utilities::unpack<T>(local_buffer);
1795  }
1796 
1797  return received_objects;
1798 # endif
1799  }
1800 
1801 
1802 
1803  template <typename T>
1804  std::vector<T>
1805  gather(const MPI_Comm & comm,
1806  const T & object_to_send,
1807  const unsigned int root_process)
1808  {
1809 # ifndef DEAL_II_WITH_MPI
1810  (void)comm;
1811  (void)root_process;
1812  std::vector<T> v(1, object_to_send);
1813  return v;
1814 # else
1815  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1816  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
1817 
1818  AssertIndexRange(root_process, n_procs);
1819 
1820  std::vector<char> buffer = Utilities::pack(object_to_send);
1821  int n_local_data = buffer.size();
1822 
1823  // Vector to store the size of loc_data_array for every process
1824  // only the root process needs to allocate memory for that purpose
1825  std::vector<int> size_all_data;
1826  if (my_rank == root_process)
1827  size_all_data.resize(n_procs, 0);
1828 
1829  // Exchanging the size of each buffer
1830  int ierr = MPI_Gather(&n_local_data,
1831  1,
1832  MPI_INT,
1833  size_all_data.data(),
1834  1,
1835  MPI_INT,
1836  root_process,
1837  comm);
1838  AssertThrowMPI(ierr);
1839 
1840  // Now computing the displacement, relative to recvbuf,
1841  // at which to store the incoming buffer; only for root
1842  std::vector<int> rdispls;
1843  if (my_rank == root_process)
1844  {
1845  rdispls.resize(n_procs, 0);
1846  for (unsigned int i = 1; i < n_procs; ++i)
1847  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1848  }
1849  // exchange the buffer:
1850  std::vector<char> received_unrolled_buffer;
1851  if (my_rank == root_process)
1852  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1853 
1854  ierr = MPI_Gatherv(buffer.data(),
1855  n_local_data,
1856  MPI_CHAR,
1857  received_unrolled_buffer.data(),
1858  size_all_data.data(),
1859  rdispls.data(),
1860  MPI_CHAR,
1861  root_process,
1862  comm);
1863  AssertThrowMPI(ierr);
1864 
1865  std::vector<T> received_objects;
1866 
1867  if (my_rank == root_process)
1868  {
1869  received_objects.resize(n_procs);
1870 
1871  for (unsigned int i = 0; i < n_procs; ++i)
1872  {
1873  const std::vector<char> local_buffer(
1874  received_unrolled_buffer.begin() + rdispls[i],
1875  received_unrolled_buffer.begin() + rdispls[i] +
1876  size_all_data[i]);
1877  received_objects[i] = Utilities::unpack<T>(local_buffer);
1878  }
1879  }
1880  return received_objects;
1881 # endif
1882  }
1883 
1884 
1885 
1886  template <typename T>
1887  void
1888  broadcast(T * buffer,
1889  const size_t count,
1890  const unsigned int root,
1891  const MPI_Comm & comm)
1892  {
1893 # ifndef DEAL_II_WITH_MPI
1894  (void)buffer;
1895  (void)count;
1896  (void)root;
1897  (void)comm;
1898 # else
1899  Assert(root < n_mpi_processes(comm),
1900  ExcMessage("Invalid root rank specified."));
1901 
1902  // MPI_Bcast's count is a signed int, so send at most 2^31 in each
1903  // iteration:
1904  const size_t max_send_count = std::numeric_limits<signed int>::max();
1905 
1906  size_t total_sent_count = 0;
1907  while (total_sent_count < count)
1908  {
1909  const size_t current_count =
1910  std::min(count - total_sent_count, max_send_count);
1911 
1912  const int ierr = MPI_Bcast(buffer + total_sent_count,
1913  current_count,
1914  mpi_type_id_for_type<decltype(*buffer)>,
1915  root,
1916  comm);
1917  AssertThrowMPI(ierr);
1918  total_sent_count += current_count;
1919  }
1920 # endif
1921  }
1922 
1923 
1924 
1925  template <typename T>
1926  typename std::enable_if<is_mpi_type<T> == false, T>::type
1927  broadcast(const MPI_Comm & comm,
1928  const T & object_to_send,
1929  const unsigned int root_process)
1930  {
1931 # ifndef DEAL_II_WITH_MPI
1932  (void)comm;
1933  (void)root_process;
1934  return object_to_send;
1935 # else
1936  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1937  AssertIndexRange(root_process, n_procs);
1938  (void)n_procs;
1939 
1940  std::vector<char> buffer;
1941  std::size_t buffer_size = numbers::invalid_size_type;
1942 
1943  // On the root process, pack the data and determine what the
1944  // buffer size needs to be.
1945  if (this_mpi_process(comm) == root_process)
1946  {
1947  buffer = Utilities::pack(object_to_send, false);
1948  buffer_size = buffer.size();
1949  }
1950 
1951  // Exchange the size of buffer
1952  int ierr = MPI_Bcast(&buffer_size,
1953  1,
1954  mpi_type_id_for_type<decltype(buffer_size)>,
1955  root_process,
1956  comm);
1957  AssertThrowMPI(ierr);
1958 
1959  // If not on the root process, correctly size the buffer to
1960  // receive the data, then do exactly that.
1961  if (this_mpi_process(comm) != root_process)
1962  buffer.resize(buffer_size);
1963 
1964  broadcast(buffer.data(), buffer_size, root_process, comm);
1965 
1966  if (Utilities::MPI::this_mpi_process(comm) == root_process)
1967  return object_to_send;
1968  else
1969  return Utilities::unpack<T>(buffer, false);
1970 # endif
1971  }
1972 
1973 
1974 
1975  template <typename T>
1976  typename std::enable_if<is_mpi_type<T> == true, T>::type
1977  broadcast(const MPI_Comm & comm,
1978  const T & object_to_send,
1979  const unsigned int root_process)
1980  {
1981 # ifndef DEAL_II_WITH_MPI
1982  (void)comm;
1983  (void)root_process;
1984  return object_to_send;
1985 # else
1986 
1987  T object = object_to_send;
1988  int ierr =
1989  MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
1990  AssertThrowMPI(ierr);
1991 
1992  return object;
1993 # endif
1994  }
1995 
1996 
1997 
1998 # ifdef DEAL_II_WITH_MPI
1999  template <class Iterator, typename Number>
2000  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2001  mean_and_standard_deviation(const Iterator begin,
2002  const Iterator end,
2003  const MPI_Comm &comm)
2004  {
2005  // below we do simple and straight-forward implementation. More elaborate
2006  // options are:
2007  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2008  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2009  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2010  using Std = typename numbers::NumberTraits<Number>::real_type;
2011  const Number sum = std::accumulate(begin, end, Number(0.));
2012 
2013  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2014  Assert(size > 0, ExcDivideByZero());
2015  const Number mean =
2016  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2017  Std sq_sum = 0.;
2018  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2020  });
2021  sq_sum = Utilities::MPI::sum(sq_sum, comm);
2022  return std::make_pair(mean,
2023  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2024  }
2025 # endif
2026 
2027 #endif
2028  } // end of namespace MPI
2029 } // end of namespace Utilities
2030 
2031 
2033 
2034 #endif
types::global_dof_index size_type
Definition: index_set.h:80
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Definition: tensor.h:503
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:374
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1124
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1090
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:291
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
const MPI_Comm & operator*() const
Definition: mpi.h:312
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:893
static std::set< MPI_Request * > requests
Definition: mpi.h:1103
static Signals signals
Definition: mpi.h:1097
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:721
static void register_request(MPI_Request &request)
Definition: mpi.cc:884
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:416
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:417
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcInternalError()
#define Assert(cond, exc)
Definition: exceptions.h:1473
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1790
#define AssertIndexRange(index, range)
Definition: exceptions.h:1732
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char U
static const char T
static const char N
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
MPI_Datatype mpi_type_id(const bool *)
Definition: mpi.h:1386
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:195
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1031
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
void min(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &minima)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:256
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:414
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
Definition: mpi.cc:218
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:242
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:152
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:299
void max(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &maxima)
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1015
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:184
T sum(const T &t, const MPI_Comm &mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:164
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1552
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:141
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:115
T max(const T &t, const MPI_Comm &mpi_communicator)
std::enable_if< is_mpi_type< T >==false, T >::type broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
constexpr bool is_mpi_type
Definition: mpi.h:141
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:205
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1330
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:75
static const unsigned int invalid_unsigned_int
Definition: types.h:201
const types::global_dof_index invalid_size_type
Definition: types.h:210
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1086
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1094
unsigned int max_index
Definition: mpi.h:876
unsigned int min_index
Definition: mpi.h:866
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
Definition: numbers.h:589
const MPI_Comm & comm