Reference documentation for deal.II version GIT 13632c9f70 2022-09-25 13:40:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.h
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2022 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 
22 #include <deal.II/base/mpi_stub.h>
23 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/numbers.h>
26 #include <deal.II/base/utilities.h>
27 
28 #include <boost/signals2.hpp>
29 
30 #include <complex>
31 #include <limits>
32 #include <map>
33 #include <numeric>
34 #include <set>
35 #include <vector>
36 
37 
38 
52 #ifdef DEAL_II_WITH_MPI
53 # define DEAL_II_MPI_CONST_CAST(expr) (expr)
54 #endif
55 
56 
57 
59 
60 
61 // Forward type declarations to allow MPI sums over tensorial types
62 #ifndef DOXYGEN
63 template <int rank, int dim, typename Number>
64 class Tensor;
65 template <int rank, int dim, typename Number>
66 class SymmetricTensor;
67 template <typename Number>
68 class SparseMatrix;
69 class IndexSet;
70 #endif
71 
72 namespace Utilities
73 {
86  IndexSet
88  const unsigned int my_partition_id,
89  const unsigned int n_partitions,
90  const types::global_dof_index total_size);
91 
99  namespace MPI
100  {
109  template <typename T>
110  constexpr bool is_mpi_type = is_same_as_any_of<T,
111  char,
112  signed short,
113  signed int,
114  signed long,
115  signed long long,
116  signed char,
117  unsigned char,
118  unsigned short,
119  unsigned int,
120  unsigned long int,
121  unsigned long long,
122  float,
123  double,
124  long double,
125  bool,
126  std::complex<float>,
127  std::complex<double>,
128  std::complex<long double>,
129  wchar_t>::value;
130 
139  unsigned int
140  n_mpi_processes(const MPI_Comm &mpi_communicator);
141 
150  unsigned int
151  this_mpi_process(const MPI_Comm &mpi_communicator);
152 
157  const std::vector<unsigned int>
158  mpi_processes_within_communicator(const MPI_Comm &comm_large,
159  const MPI_Comm &comm_small);
160 
182  std::vector<unsigned int>
184  const MPI_Comm & mpi_comm,
185  const std::vector<unsigned int> &destinations);
186 
206  unsigned int
208  const MPI_Comm & mpi_comm,
209  const std::vector<unsigned int> &destinations);
210 
227  MPI_Comm
228  duplicate_communicator(const MPI_Comm &mpi_communicator);
229 
239  void
240  free_communicator(MPI_Comm &mpi_communicator);
241 
255  {
256  public:
260  explicit DuplicatedCommunicator(const MPI_Comm &communicator)
261  : comm(duplicate_communicator(communicator))
262  {}
263 
268 
273  {
275  }
276 
280  const MPI_Comm &
281  operator*() const
282  {
283  return comm;
284  }
285 
286 
292 
293  private:
297  MPI_Comm comm;
298  };
299 
300 
301 
332  {
333  public:
340  {
341  public:
345  explicit ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
346  : mutex(mutex)
347  , comm(comm)
348  {
349  mutex.lock(comm);
350  }
351 
356  {
357  mutex.unlock(comm);
358  }
359 
360  private:
368  const MPI_Comm comm;
369  };
370 
374  explicit CollectiveMutex();
375 
380 
387  void
388  lock(const MPI_Comm &comm);
389 
396  void
397  unlock(const MPI_Comm &comm);
398 
399  private:
403  bool locked;
404 
408  MPI_Request request;
409  };
410 
411 
412 
459  template <typename T>
460  class Future
461  {
462  public:
467  template <typename W, typename G>
468  Future(W &&wait_operation, G &&get_and_cleanup_operation);
469 
475  Future(const Future &) = delete;
476 
480  Future(Future &&) noexcept = default;
481 
485  ~Future();
486 
492  Future &
493  operator=(const Future &) = delete;
494 
498  Future &
499  operator=(Future &&) noexcept = default;
500 
508  void
509  wait();
510 
522  T
523  get();
524 
525  private:
529  std::function<void()> wait_function;
530  std::function<T()> get_and_cleanup_function;
531 
535  bool is_done;
536 
541  };
542 
543 
544 
574 #ifdef DEAL_II_WITH_MPI
576  create_group(const MPI_Comm & comm,
577  const MPI_Group &group,
578  const int tag,
579  MPI_Comm * new_comm);
580 #endif
581 
590  std::vector<IndexSet>
592  const MPI_Comm & comm,
593  const types::global_dof_index locally_owned_size);
594 
602  IndexSet
604  const MPI_Comm & comm,
605  const types::global_dof_index total_size);
606 
607 #ifdef DEAL_II_WITH_MPI
623  template <class Iterator, typename Number = long double>
624  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
626  const Iterator end,
627  const MPI_Comm &comm);
628 #endif
629 
630 
678  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
679  create_mpi_data_type_n_bytes(const std::size_t n_bytes);
680 
700  template <typename T>
701  T
702  sum(const T &t, const MPI_Comm &mpi_communicator);
703 
713  template <typename T, typename U>
714  void
715  sum(const T &values, const MPI_Comm &mpi_communicator, U &sums);
716 
726  template <typename T>
727  void
729  const MPI_Comm & mpi_communicator,
730  const ArrayView<T> & sums);
731 
737  template <int rank, int dim, typename Number>
740  const MPI_Comm & mpi_communicator);
741 
747  template <int rank, int dim, typename Number>
750  const MPI_Comm & mpi_communicator);
751 
760  template <typename Number>
761  void
762  sum(const SparseMatrix<Number> &local,
763  const MPI_Comm & mpi_communicator,
764  SparseMatrix<Number> & global);
765 
785  template <typename T>
786  T
787  max(const T &t, const MPI_Comm &mpi_communicator);
788 
798  template <typename T, typename U>
799  void
800  max(const T &values, const MPI_Comm &mpi_communicator, U &maxima);
801 
811  template <typename T>
812  void
814  const MPI_Comm & mpi_communicator,
815  const ArrayView<T> & maxima);
816 
836  template <typename T>
837  T
838  min(const T &t, const MPI_Comm &mpi_communicator);
839 
849  template <typename T, typename U>
850  void
851  min(const T &values, const MPI_Comm &mpi_communicator, U &minima);
852 
862  template <typename T>
863  void
865  const MPI_Comm & mpi_communicator,
866  const ArrayView<T> & minima);
867 
891  template <typename T>
892  T
893  logical_or(const T &t, const MPI_Comm &mpi_communicator);
894 
909  template <typename T, typename U>
910  void
911  logical_or(const T &values, const MPI_Comm &mpi_communicator, U &results);
912 
922  template <typename T>
923  void
925  const MPI_Comm & mpi_communicator,
926  const ArrayView<T> & results);
927 
942  struct MinMaxAvg
943  {
948  double sum;
949 
954  double min;
955 
960  double max;
961 
970  unsigned int min_index;
971 
980  unsigned int max_index;
981 
986  double avg;
987  };
988 
1003  MinMaxAvg
1004  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator);
1005 
1017  std::vector<MinMaxAvg>
1018  min_max_avg(const std::vector<double> &my_value,
1019  const MPI_Comm & mpi_communicator);
1020 
1021 
1034  void
1035  min_max_avg(const ArrayView<const double> &my_values,
1036  const ArrayView<MinMaxAvg> & result,
1037  const MPI_Comm & mpi_communicator);
1038 
1039 
1084  {
1085  public:
1132  int & argc,
1133  char **& argv,
1134  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1135 
1140  ~MPI_InitFinalize();
1141 
1168  static void
1169  register_request(MPI_Request &request);
1170 
1174  static void
1175  unregister_request(MPI_Request &request);
1176 
1184  struct Signals
1185  {
1190  boost::signals2::signal<void()> at_mpi_init;
1191 
1198  boost::signals2::signal<void()> at_mpi_finalize;
1199  };
1200 
1202 
1203  private:
1207  static std::set<MPI_Request *> requests;
1208  };
1209 
1221  bool
1222  job_supports_mpi();
1223 
1241  template <typename T>
1242  std::map<unsigned int, T>
1243  some_to_some(const MPI_Comm & comm,
1244  const std::map<unsigned int, T> &objects_to_send);
1245 
1259  template <typename T>
1260  std::vector<T>
1261  all_gather(const MPI_Comm &comm, const T &object_to_send);
1262 
1278  template <typename T>
1279  std::vector<T>
1280  gather(const MPI_Comm & comm,
1281  const T & object_to_send,
1282  const unsigned int root_process = 0);
1283 
1319  template <typename T>
1320  std::enable_if_t<is_mpi_type<T> == false, T>
1321  broadcast(const MPI_Comm & comm,
1322  const T & object_to_send,
1323  const unsigned int root_process = 0);
1324 
1347  template <typename T>
1348  std::enable_if_t<is_mpi_type<T> == true, T>
1349  broadcast(const MPI_Comm & comm,
1350  const T & object_to_send,
1351  const unsigned int root_process = 0);
1352 
1369  template <typename T>
1370  void
1371  broadcast(T * buffer,
1372  const size_t count,
1373  const unsigned int root,
1374  const MPI_Comm & comm);
1375 
1388  template <typename T>
1389  T
1390  reduce(const T & local_value,
1391  const MPI_Comm & comm,
1392  const std::function<T(const T &, const T &)> &combiner,
1393  const unsigned int root_process = 0);
1394 
1404  template <typename T>
1405  T
1406  all_reduce(const T & local_value,
1407  const MPI_Comm & comm,
1408  const std::function<T(const T &, const T &)> &combiner);
1409 
1410 
1431  template <typename T>
1432  Future<void>
1433  isend(const T & object,
1434  MPI_Comm communicator,
1435  const unsigned int target_rank,
1436  const unsigned int mpi_tag = 0);
1437 
1438 
1455  template <typename T>
1456  Future<T>
1457  irecv(MPI_Comm communicator,
1458  const unsigned int source_rank,
1459  const unsigned int mpi_tag = 0);
1460 
1461 
1504  std::vector<unsigned int>
1505  compute_index_owner(const IndexSet &owned_indices,
1506  const IndexSet &indices_to_look_up,
1507  const MPI_Comm &comm);
1508 
1516  template <typename T>
1517  std::vector<T>
1518  compute_set_union(const std::vector<T> &vec, const MPI_Comm &comm);
1519 
1523  template <typename T>
1524  std::set<T>
1525  compute_set_union(const std::set<T> &set, const MPI_Comm &comm);
1526 
1527 
1528 
1529  /* --------------------------- inline functions ------------------------- */
1530 
1531  namespace internal
1532  {
1538  namespace MPIDataTypes
1539  {
1540 #ifdef DEAL_II_WITH_MPI
1541  inline MPI_Datatype
1542  mpi_type_id(const bool *)
1543  {
1544  return MPI_CXX_BOOL;
1545  }
1546 
1547 
1548 
1549  inline MPI_Datatype
1550  mpi_type_id(const char *)
1551  {
1552  return MPI_CHAR;
1553  }
1554 
1555 
1556 
1557  inline MPI_Datatype
1558  mpi_type_id(const signed char *)
1559  {
1560  return MPI_SIGNED_CHAR;
1561  }
1562 
1563 
1564 
1565  inline MPI_Datatype
1566  mpi_type_id(const wchar_t *)
1567  {
1568  return MPI_WCHAR;
1569  }
1570 
1571 
1572 
1573  inline MPI_Datatype
1574  mpi_type_id(const short *)
1575  {
1576  return MPI_SHORT;
1577  }
1578 
1579 
1580 
1581  inline MPI_Datatype
1582  mpi_type_id(const int *)
1583  {
1584  return MPI_INT;
1585  }
1586 
1587 
1588 
1589  inline MPI_Datatype
1590  mpi_type_id(const long int *)
1591  {
1592  return MPI_LONG;
1593  }
1594 
1595 
1596 
1597  inline MPI_Datatype
1598  mpi_type_id(const long long int *)
1599  {
1600  return MPI_LONG_LONG;
1601  }
1602 
1603 
1604 
1605  inline MPI_Datatype
1606  mpi_type_id(const unsigned char *)
1607  {
1608  return MPI_UNSIGNED_CHAR;
1609  }
1610 
1611 
1612 
1613  inline MPI_Datatype
1614  mpi_type_id(const unsigned short *)
1615  {
1616  return MPI_UNSIGNED_SHORT;
1617  }
1618 
1619 
1620 
1621  inline MPI_Datatype
1622  mpi_type_id(const unsigned int *)
1623  {
1624  return MPI_UNSIGNED;
1625  }
1626 
1627 
1628 
1629  inline MPI_Datatype
1630  mpi_type_id(const unsigned long int *)
1631  {
1632  return MPI_UNSIGNED_LONG;
1633  }
1634 
1635 
1636 
1637  inline MPI_Datatype
1638  mpi_type_id(const unsigned long long int *)
1639  {
1640  return MPI_UNSIGNED_LONG_LONG;
1641  }
1642 
1643 
1644 
1645  inline MPI_Datatype
1646  mpi_type_id(const float *)
1647  {
1648  return MPI_FLOAT;
1649  }
1650 
1651 
1652 
1653  inline MPI_Datatype
1654  mpi_type_id(const double *)
1655  {
1656  return MPI_DOUBLE;
1657  }
1658 
1659 
1660 
1661  inline MPI_Datatype
1662  mpi_type_id(const long double *)
1663  {
1664  return MPI_LONG_DOUBLE;
1665  }
1666 
1667 
1668 
1669  inline MPI_Datatype
1670  mpi_type_id(const std::complex<float> *)
1671  {
1672  return MPI_COMPLEX;
1673  }
1674 
1675 
1676 
1677  inline MPI_Datatype
1678  mpi_type_id(const std::complex<double> *)
1679  {
1680  return MPI_DOUBLE_COMPLEX;
1681  }
1682 #endif
1683  } // namespace MPIDataTypes
1684  } // namespace internal
1685 
1686 
1687 
1688 #ifdef DEAL_II_WITH_MPI
1706  template <typename T>
1707  const MPI_Datatype
1709  static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1710 #endif
1711 
1712 #ifndef DOXYGEN
1713  namespace internal
1714  {
1715  // declaration for an internal function that lives in mpi.templates.h
1716  template <typename T>
1717  void
1718  all_reduce(const MPI_Op & mpi_op,
1719  const ArrayView<const T> &values,
1720  const MPI_Comm & mpi_communicator,
1721  const ArrayView<T> & output);
1722  } // namespace internal
1723 
1724 
1725  template <typename T>
1726  template <typename W, typename G>
1727  Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1728  : wait_function(wait_operation)
1729  , get_and_cleanup_function(get_and_cleanup_operation)
1730  , is_done(false)
1731  , get_was_called(false)
1732  {}
1733 
1734 
1735 
1736  template <typename T>
1737  Future<T>::~Future()
1738  {
1739  // If there is a clean-up function, and if it has not been
1740  // called yet, then do so. Note that we may not have a
1741  // clean-up function (not even an empty one) if the current
1742  // object has been moved from, into another object, and as
1743  // a consequence the std::function objects are now empty
1744  // even though they were initialized in the constructor.
1745  // (A std::function object whose object is a an empty lambda
1746  // function, [](){}, is not an empty std::function object.)
1747  if ((get_was_called == false) && get_and_cleanup_function)
1748  get();
1749  }
1750 
1751 
1752 
1753  template <typename T>
1754  void
1755  Future<T>::wait()
1756  {
1757  if (is_done == false)
1758  {
1759  wait_function();
1760 
1761  is_done = true;
1762  }
1763  }
1764 
1765 
1766  template <typename T>
1767  T
1768  Future<T>::get()
1769  {
1770  Assert(get_was_called == false,
1771  ExcMessage(
1772  "You can't call get() more than once on a Future object."));
1773  get_was_called = true;
1774 
1775  wait();
1776  return get_and_cleanup_function();
1777  }
1778 
1779 
1780 
1781  template <typename T, unsigned int N>
1782  void
1783  sum(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&sums)[N])
1784  {
1785  internal::all_reduce(MPI_SUM,
1787  mpi_communicator,
1788  ArrayView<T>(sums, N));
1789  }
1790 
1791 
1792 
1793  template <typename T, unsigned int N>
1794  void
1795  max(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&maxima)[N])
1796  {
1797  internal::all_reduce(MPI_MAX,
1799  mpi_communicator,
1800  ArrayView<T>(maxima, N));
1801  }
1802 
1803 
1804 
1805  template <typename T, unsigned int N>
1806  void
1807  min(const T (&values)[N], const MPI_Comm &mpi_communicator, T (&minima)[N])
1808  {
1809  internal::all_reduce(MPI_MIN,
1811  mpi_communicator,
1812  ArrayView<T>(minima, N));
1813  }
1814 
1815 
1816 
1817  template <typename T, unsigned int N>
1818  void
1819  logical_or(const T (&values)[N],
1820  const MPI_Comm &mpi_communicator,
1821  T (&results)[N])
1822  {
1823  static_assert(std::is_integral<T>::value,
1824  "The MPI_LOR operation only allows integral data types.");
1825 
1826  internal::all_reduce(MPI_LOR,
1828  mpi_communicator,
1829  ArrayView<T>(results, N));
1830  }
1831 
1832 
1833 
1834  template <typename T>
1835  std::map<unsigned int, T>
1836  some_to_some(const MPI_Comm & comm,
1837  const std::map<unsigned int, T> &objects_to_send)
1838  {
1839 # ifndef DEAL_II_WITH_MPI
1840  (void)comm;
1841  Assert(objects_to_send.size() < 2,
1842  ExcMessage("Cannot send to more than one processor."));
1843  Assert(objects_to_send.find(0) != objects_to_send.end() ||
1844  objects_to_send.size() == 0,
1845  ExcMessage("Can only send to myself or to nobody."));
1846  return objects_to_send;
1847 # else
1848  const auto my_proc = this_mpi_process(comm);
1849 
1850  std::map<unsigned int, T> received_objects;
1851 
1852  std::vector<unsigned int> send_to;
1853  send_to.reserve(objects_to_send.size());
1854  for (const auto &m : objects_to_send)
1855  if (m.first == my_proc)
1856  received_objects[my_proc] = m.second;
1857  else
1858  send_to.emplace_back(m.first);
1859 
1860  const unsigned int n_expected_incoming_messages =
1862 
1863  // Protect the following communication:
1864  static CollectiveMutex mutex;
1865  CollectiveMutex::ScopedLock lock(mutex, comm);
1866 
1867  // If we have something to send, or we expect something from other
1868  // processors, we need to visit one of the two scopes below. Otherwise,
1869  // no other action is required by this mpi process, and we can safely
1870  // return.
1871  if (send_to.size() == 0 && n_expected_incoming_messages == 0)
1872  return received_objects;
1873 
1874  const int mpi_tag =
1876 
1877  // Sending buffers
1878  std::vector<std::vector<char>> buffers_to_send(send_to.size());
1879  std::vector<MPI_Request> buffer_send_requests(send_to.size());
1880  {
1881  unsigned int i = 0;
1882  for (const auto &rank_obj : objects_to_send)
1883  if (rank_obj.first != my_proc)
1884  {
1885  const auto &rank = rank_obj.first;
1886  buffers_to_send[i] = Utilities::pack(rank_obj.second,
1887  /*allow_compression=*/false);
1888  const int ierr = MPI_Isend(buffers_to_send[i].data(),
1889  buffers_to_send[i].size(),
1890  MPI_CHAR,
1891  rank,
1892  mpi_tag,
1893  comm,
1894  &buffer_send_requests[i]);
1895  AssertThrowMPI(ierr);
1896  ++i;
1897  }
1898  }
1899 
1900  // Fill the output map
1901  {
1902  std::vector<char> buffer;
1903  // We do this on a first come/first served basis
1904  for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1905  {
1906  // Probe what's going on. Take data from the first available sender
1907  MPI_Status status;
1908  int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1909  AssertThrowMPI(ierr);
1910 
1911  // Length of the message
1912  int len;
1913  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1914  AssertThrowMPI(ierr);
1915  buffer.resize(len);
1916 
1917  // Source rank
1918  const unsigned int rank = status.MPI_SOURCE;
1919 
1920  // Actually receive the message
1921  ierr = MPI_Recv(buffer.data(),
1922  len,
1923  MPI_CHAR,
1924  status.MPI_SOURCE,
1925  status.MPI_TAG,
1926  comm,
1927  MPI_STATUS_IGNORE);
1928  AssertThrowMPI(ierr);
1929  Assert(received_objects.find(rank) == received_objects.end(),
1931  "I should not receive again from this rank"));
1932  received_objects[rank] =
1933  Utilities::unpack<T>(buffer,
1934  /*allow_compression=*/false);
1935  }
1936  }
1937 
1938  // Wait to have sent all objects.
1939  const int ierr = MPI_Waitall(send_to.size(),
1940  buffer_send_requests.data(),
1941  MPI_STATUSES_IGNORE);
1942  AssertThrowMPI(ierr);
1943 
1944  return received_objects;
1945 # endif // deal.II with MPI
1946  }
1947 
1948 
1949 
1950  template <typename T>
1951  std::vector<T>
1952  all_gather(const MPI_Comm &comm, const T &object)
1953  {
1954  if (job_supports_mpi() == false)
1955  return {object};
1956 
1957 # ifndef DEAL_II_WITH_MPI
1958  (void)comm;
1959  std::vector<T> v(1, object);
1960  return v;
1961 # else
1962  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
1963 
1964  std::vector<char> buffer = Utilities::pack(object);
1965 
1966  int n_local_data = buffer.size();
1967 
1968  // Vector to store the size of loc_data_array for every process
1969  std::vector<int> size_all_data(n_procs, 0);
1970 
1971  // Exchanging the size of each buffer
1972  int ierr = MPI_Allgather(
1973  &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1974  AssertThrowMPI(ierr);
1975 
1976  // Now computing the displacement, relative to recvbuf,
1977  // at which to store the incoming buffer
1978  std::vector<int> rdispls(n_procs);
1979  rdispls[0] = 0;
1980  for (unsigned int i = 1; i < n_procs; ++i)
1981  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1982 
1983  // Step 3: exchange the buffer:
1984  std::vector<char> received_unrolled_buffer(rdispls.back() +
1985  size_all_data.back());
1986 
1987  ierr = MPI_Allgatherv(buffer.data(),
1988  n_local_data,
1989  MPI_CHAR,
1990  received_unrolled_buffer.data(),
1991  size_all_data.data(),
1992  rdispls.data(),
1993  MPI_CHAR,
1994  comm);
1995  AssertThrowMPI(ierr);
1996 
1997  std::vector<T> received_objects(n_procs);
1998  for (unsigned int i = 0; i < n_procs; ++i)
1999  {
2000  std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2001  rdispls[i],
2002  received_unrolled_buffer.begin() +
2003  rdispls[i] + size_all_data[i]);
2004  received_objects[i] = Utilities::unpack<T>(local_buffer);
2005  }
2006 
2007  return received_objects;
2008 # endif
2009  }
2010 
2011 
2012 
2013  template <typename T>
2014  std::vector<T>
2015  gather(const MPI_Comm & comm,
2016  const T & object_to_send,
2017  const unsigned int root_process)
2018  {
2019 # ifndef DEAL_II_WITH_MPI
2020  (void)comm;
2021  (void)root_process;
2022  std::vector<T> v(1, object_to_send);
2023  return v;
2024 # else
2025  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2026  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2027 
2028  AssertIndexRange(root_process, n_procs);
2029 
2030  std::vector<char> buffer = Utilities::pack(object_to_send);
2031  int n_local_data = buffer.size();
2032 
2033  // Vector to store the size of loc_data_array for every process
2034  // only the root process needs to allocate memory for that purpose
2035  std::vector<int> size_all_data;
2036  if (my_rank == root_process)
2037  size_all_data.resize(n_procs, 0);
2038 
2039  // Exchanging the size of each buffer
2040  int ierr = MPI_Gather(&n_local_data,
2041  1,
2042  MPI_INT,
2043  size_all_data.data(),
2044  1,
2045  MPI_INT,
2046  root_process,
2047  comm);
2048  AssertThrowMPI(ierr);
2049 
2050  // Now computing the displacement, relative to recvbuf,
2051  // at which to store the incoming buffer; only for root
2052  std::vector<int> rdispls;
2053  if (my_rank == root_process)
2054  {
2055  rdispls.resize(n_procs, 0);
2056  for (unsigned int i = 1; i < n_procs; ++i)
2057  rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2058  }
2059  // exchange the buffer:
2060  std::vector<char> received_unrolled_buffer;
2061  if (my_rank == root_process)
2062  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2063 
2064  ierr = MPI_Gatherv(buffer.data(),
2065  n_local_data,
2066  MPI_CHAR,
2067  received_unrolled_buffer.data(),
2068  size_all_data.data(),
2069  rdispls.data(),
2070  MPI_CHAR,
2071  root_process,
2072  comm);
2073  AssertThrowMPI(ierr);
2074 
2075  std::vector<T> received_objects;
2076 
2077  if (my_rank == root_process)
2078  {
2079  received_objects.resize(n_procs);
2080 
2081  for (unsigned int i = 0; i < n_procs; ++i)
2082  {
2083  const std::vector<char> local_buffer(
2084  received_unrolled_buffer.begin() + rdispls[i],
2085  received_unrolled_buffer.begin() + rdispls[i] +
2086  size_all_data[i]);
2087  received_objects[i] = Utilities::unpack<T>(local_buffer);
2088  }
2089  }
2090  return received_objects;
2091 # endif
2092  }
2093 
2094 
2095 
2096  template <typename T>
2097  void
2098  broadcast(T * buffer,
2099  const size_t count,
2100  const unsigned int root,
2101  const MPI_Comm & comm)
2102  {
2103 # ifndef DEAL_II_WITH_MPI
2104  (void)buffer;
2105  (void)count;
2106  (void)root;
2107  (void)comm;
2108 # else
2109  Assert(root < n_mpi_processes(comm),
2110  ExcMessage("Invalid root rank specified."));
2111 
2112  // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2113  // iteration:
2114  const size_t max_send_count = std::numeric_limits<signed int>::max();
2115 
2116  size_t total_sent_count = 0;
2117  while (total_sent_count < count)
2118  {
2119  const size_t current_count =
2120  std::min(count - total_sent_count, max_send_count);
2121 
2122  const int ierr = MPI_Bcast(buffer + total_sent_count,
2123  current_count,
2124  mpi_type_id_for_type<decltype(*buffer)>,
2125  root,
2126  comm);
2127  AssertThrowMPI(ierr);
2128  total_sent_count += current_count;
2129  }
2130 # endif
2131  }
2132 
2133 
2134 
2135  template <typename T>
2136  std::enable_if_t<is_mpi_type<T> == false, T>
2137  broadcast(const MPI_Comm & comm,
2138  const T & object_to_send,
2139  const unsigned int root_process)
2140  {
2141 # ifndef DEAL_II_WITH_MPI
2142  (void)comm;
2143  (void)root_process;
2144  return object_to_send;
2145 # else
2146  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2147  AssertIndexRange(root_process, n_procs);
2148  (void)n_procs;
2149 
2150  std::vector<char> buffer;
2151  std::size_t buffer_size = numbers::invalid_size_type;
2152 
2153  // On the root process, pack the data and determine what the
2154  // buffer size needs to be.
2155  if (this_mpi_process(comm) == root_process)
2156  {
2157  buffer = Utilities::pack(object_to_send, false);
2158  buffer_size = buffer.size();
2159  }
2160 
2161  // Exchange the size of buffer
2162  int ierr = MPI_Bcast(&buffer_size,
2163  1,
2164  mpi_type_id_for_type<decltype(buffer_size)>,
2165  root_process,
2166  comm);
2167  AssertThrowMPI(ierr);
2168 
2169  // If not on the root process, correctly size the buffer to
2170  // receive the data, then do exactly that.
2171  if (this_mpi_process(comm) != root_process)
2172  buffer.resize(buffer_size);
2173 
2174  broadcast(buffer.data(), buffer_size, root_process, comm);
2175 
2176  if (Utilities::MPI::this_mpi_process(comm) == root_process)
2177  return object_to_send;
2178  else
2179  return Utilities::unpack<T>(buffer, false);
2180 # endif
2181  }
2182 
2183 
2184 
2185  template <typename T>
2186  std::enable_if_t<is_mpi_type<T> == true, T>
2187  broadcast(const MPI_Comm & comm,
2188  const T & object_to_send,
2189  const unsigned int root_process)
2190  {
2191 # ifndef DEAL_II_WITH_MPI
2192  (void)comm;
2193  (void)root_process;
2194  return object_to_send;
2195 # else
2196 
2197  T object = object_to_send;
2198  int ierr =
2199  MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2200  AssertThrowMPI(ierr);
2201 
2202  return object;
2203 # endif
2204  }
2205 
2206 
2207  template <typename T>
2208  Future<void>
2209  isend(const T & object,
2210  MPI_Comm communicator,
2211  const unsigned int target_rank,
2212  const unsigned int mpi_tag)
2213  {
2214 # ifndef DEAL_II_WITH_MPI
2215  Assert(false, ExcNeedsMPI());
2216  (void)object;
2217  (void)communicator;
2218  (void)target_rank;
2219  (void)mpi_tag;
2220  return Future<void>([]() {}, []() {});
2221 # else
2222  // Create a pointer to a send buffer into which we pack the object
2223  // to be sent. The buffer will be released by the Future object once
2224  // the send has been verified to have succeeded.
2225  //
2226  // Conceptually, we would like this send buffer to be a
2227  // std::unique_ptr object whose ownership is later handed over
2228  // to the cleanup function. That has the disadvantage that the
2229  // cleanup object is a non-copyable lambda capture, leading to
2230  // awkward semantics. Instead, we use a std::shared_ptr; we move
2231  // this shared pointer into the cleanup function, which means
2232  // that there is exactly one shared pointer who owns the buffer
2233  // at any given time, though the latter is not an important
2234  // optimization.
2235  std::shared_ptr<std::vector<char>> send_buffer =
2236  std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2237 
2238  // Now start the send, and store the result in a request object that
2239  // we can then wait for later:
2240  MPI_Request request;
2241  const int ierr =
2242  MPI_Isend(send_buffer->data(),
2243  send_buffer->size(),
2244  mpi_type_id_for_type<decltype(*send_buffer->data())>,
2245  target_rank,
2246  mpi_tag,
2247  communicator,
2248  &request);
2249  AssertThrowMPI(ierr);
2250 
2251  // Then return a std::future-like object that has a wait()
2252  // function one can use to wait for the communication to finish,
2253  // and that has a cleanup function to be called at some point
2254  // after that makes sure the send buffer gets deallocated. This
2255  // cleanup function takes over ownership of the send buffer.
2256  //
2257  // Note that the body of the lambda function of the clean-up
2258  // function could be left empty. If that were so, once the
2259  // lambda function object goes out of scope, the 'send_buffer'
2260  // member of the closure object goes out of scope as well and so
2261  // the send_buffer is destroyed. But we may want to release the
2262  // buffer itself as early as possible, and so we clear the
2263  // buffer when the Future::get() function is called.
2264  auto wait = [request]() mutable {
2265  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2266  AssertThrowMPI(ierr);
2267  };
2268  auto cleanup = [send_buffer = std::move(send_buffer)]() {
2269  send_buffer->clear();
2270  };
2271  return Future<void>(wait, cleanup);
2272 # endif
2273  }
2274 
2275 
2276 
2277  template <typename T>
2278  Future<T>
2279  irecv(MPI_Comm communicator,
2280  const unsigned int source_rank,
2281  const unsigned int mpi_tag)
2282  {
2283 # ifndef DEAL_II_WITH_MPI
2284  Assert(false, ExcNeedsMPI());
2285  (void)communicator;
2286  (void)source_rank;
2287  (void)mpi_tag;
2288  return Future<void>([]() {}, []() { return T{}; });
2289 # else
2290  // Use a 'probe' operation for the 'wait' operation of the
2291  // Future this function returns. It will trigger whenever we get
2292  // the incoming message. Later, once we have received the message, we
2293  // can query its size and allocate a receiver buffer.
2294  //
2295  // Since we may be waiting for multiple messages from the same
2296  // incoming process (with possibly the same tag -- we can't
2297  // know), we must make sure that the 'probe' operation we have
2298  // here (and which we use to determine the buffer size) matches
2299  // the 'recv' operation with which we actually get the data
2300  // later on. This is exactly what the 'MPI_Mprobe' function and
2301  // its 'I'mmediate variant is there for, coupled with the
2302  // 'MPI_Mrecv' call that would put into the clean-up function
2303  // below.
2304  std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2305  std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2306 
2307  auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2308  const int ierr = MPI_Mprobe(
2309  source_rank, mpi_tag, communicator, message.get(), status.get());
2310  AssertThrowMPI(ierr);
2311  };
2312 
2313 
2314  // Now also define the function that actually gets the data:
2315  auto get = [status, message]() {
2316  int number_amount;
2317  int ierr;
2318  ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2319  AssertThrowMPI(ierr);
2320 
2321  std::vector<char> receive_buffer(number_amount);
2322 
2323  // Then actually get the data, using the matching MPI_Mrecv to the above
2324  // MPI_Mprobe:
2325  ierr = MPI_Mrecv(receive_buffer.data(),
2326  number_amount,
2327  mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2328  message.get(),
2329  status.get());
2330  AssertThrowMPI(ierr);
2331 
2332  // Return the unpacked object:
2333  return Utilities::unpack<T>(receive_buffer, false);
2334  };
2335 
2336  return Future<T>(wait, get);
2337 # endif
2338  }
2339 
2340 
2341 
2342 # ifdef DEAL_II_WITH_MPI
2343  template <class Iterator, typename Number>
2344  std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2345  mean_and_standard_deviation(const Iterator begin,
2346  const Iterator end,
2347  const MPI_Comm &comm)
2348  {
2349  // below we do simple and straight-forward implementation. More elaborate
2350  // options are:
2351  // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2352  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2353  // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2354  using Std = typename numbers::NumberTraits<Number>::real_type;
2355  const Number sum = std::accumulate(begin, end, Number(0.));
2356 
2357  const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2358  Assert(size > 0, ExcDivideByZero());
2359  const Number mean =
2360  Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2361  Std sq_sum = 0.;
2362  std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2364  });
2365  sq_sum = Utilities::MPI::sum(sq_sum, comm);
2366  return std::make_pair(mean,
2367  std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2368  }
2369 # endif
2370 
2371 #endif
2372  } // end of namespace MPI
2373 } // end of namespace Utilities
2374 
2375 
2377 
2378 #endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Definition: tensor.h:503
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
Definition: mpi.h:345
void unlock(const MPI_Comm &comm)
Definition: mpi.cc:1137
void lock(const MPI_Comm &comm)
Definition: mpi.cc:1103
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm &communicator)
Definition: mpi.h:260
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
const MPI_Comm & operator*() const
Definition: mpi.h:281
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition: mpi.h:529
std::function< T()> get_and_cleanup_function
Definition: mpi.h:530
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
Definition: mpi.h:1207
static Signals signals
Definition: mpi.h:1201
#define DEAL_II_DEPRECATED
Definition: config.h:164
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:457
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:458
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
Definition: exceptions.h:1501
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1818
#define AssertIndexRange(index, range)
Definition: exceptions.h:1760
static ::ExceptionBase & ExcMessage(std::string arg1)
static const char U
static const char T
static const char N
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
MPI_Datatype mpi_type_id(const bool *)
Definition: mpi.h:1542
MPI_Datatype mpi_type_id(const std::complex< double > *)
Definition: mpi.h:1678
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:200
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1044
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
void logical_or(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &results)
void broadcast(T *buffer, const size_t count, const unsigned int root, const MPI_Comm &comm)
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void min(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &minima)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:267
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:223
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const types::global_dof_index total_size)
Definition: mpi.cc:252
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:425
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:157
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:310
void max(const ArrayView< const T > &values, const MPI_Comm &mpi_communicator, const ArrayView< T > &maxima)
T min(const T &t, const MPI_Comm &mpi_communicator)
T reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1028
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:189
T sum(const T &t, const MPI_Comm &mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
Definition: mpi.cc:169
const MPI_Datatype mpi_type_id_for_type
Definition: mpi.h:1708
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:146
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:120
T max(const T &t, const MPI_Comm &mpi_communicator)
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
constexpr bool is_mpi_type
Definition: mpi.h:110
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:210
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition: utilities.h:1329
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:75
static const unsigned int invalid_unsigned_int
Definition: types.h:206
const types::global_dof_index invalid_size_type
Definition: types.h:215
unsigned int global_dof_index
Definition: types.h:81
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1198
unsigned int max_index
Definition: mpi.h:980
unsigned int min_index
Definition: mpi.h:970
static constexpr std::enable_if_t< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type > abs_square(const number &x)
const MPI_Comm & comm
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)