deal.II version GIT relicensing-3696-g88d7b26363 2025-07-08 15:10:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2025 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_h
16#define dealii_mpi_h
17
18#include <deal.II/base/config.h>
19
27#include <deal.II/base/types.h>
29
30#include <complex>
31#include <limits>
32#include <map>
33#include <numeric>
34#include <set>
35#include <vector>
36
37#ifdef DEAL_II_WITH_MPI
39# include <mpi.h>
41#endif
42
43
57#ifdef DEAL_II_WITH_MPI
58# define DEAL_II_MPI_CONST_CAST(expr) (expr)
59#endif
60
61
62
64
65
66// Forward type declarations to allow MPI sums over tensorial types
67#ifndef DOXYGEN
68template <int rank, int dim, typename Number>
69class Tensor;
70template <int rank, int dim, typename Number>
71class SymmetricTensor;
72template <typename Number>
73class SparseMatrix;
74class IndexSet;
75#endif
76
77namespace Utilities
78{
93 const unsigned int my_partition_id,
94 const unsigned int n_partitions,
95 const types::global_dof_index total_size);
96
104 namespace MPI
105 {
114 template <typename T>
115 constexpr bool is_mpi_type = is_same_as_any_of<T,
116 char,
117 signed short,
118 signed int,
119 signed long,
120 signed long long,
121 signed char,
122 unsigned char,
123 unsigned short,
124 unsigned int,
125 unsigned long int,
126 unsigned long long,
127 float,
128 double,
129 long double,
130 bool,
131 std::complex<float>,
132 std::complex<double>,
133 std::complex<long double>,
134 wchar_t>::value;
135
144 unsigned int
145 n_mpi_processes(const MPI_Comm mpi_communicator);
146
155 unsigned int
156 this_mpi_process(const MPI_Comm mpi_communicator);
157
162 std::vector<unsigned int>
164 const MPI_Comm comm_small);
165
187 std::vector<unsigned int>
189 const MPI_Comm mpi_comm,
190 const std::vector<unsigned int> &destinations);
191
211 unsigned int
213 const MPI_Comm mpi_comm,
214 const std::vector<unsigned int> &destinations);
215
233 duplicate_communicator(const MPI_Comm mpi_communicator);
234
244 void
245 free_communicator(MPI_Comm mpi_communicator);
246
260 {
261 public:
265 explicit DuplicatedCommunicator(const MPI_Comm communicator)
266 : comm(duplicate_communicator(communicator))
267 {}
268
273
281
286 operator*() const
287 {
288 return comm;
289 }
290
291
297
298 private:
303 };
304
305
306
337 {
338 public:
345 {
346 public:
351 : mutex(mutex)
352 , comm(comm)
353 {
354 mutex.lock(comm);
355 }
356
361 {
363 }
364
365 private:
374 };
375
380
385
392 void
393 lock(const MPI_Comm comm);
394
401 void
402 unlock(const MPI_Comm comm);
403
404 private:
408 bool locked;
409
413 MPI_Request request;
414 };
415
416
417
464 template <typename T>
465 class Future
466 {
467 public:
472 template <typename W, typename G>
473 Future(W &&wait_operation, G &&get_and_cleanup_operation);
474
480 Future(const Future &) = delete;
481
485 Future(Future &&) noexcept = default;
486
519
525 Future &
526 operator=(const Future &) = delete;
527
531 Future &
532 operator=(Future &&) noexcept = default;
533
541 void
543
555 T
557
558 private:
562 std::function<void()> wait_function;
564
569
574 };
575
584 std::vector<IndexSet>
586 const MPI_Comm comm,
587 const types::global_dof_index locally_owned_size);
588
598 const MPI_Comm comm,
599 const types::global_dof_index total_size);
600
601#ifdef DEAL_II_WITH_MPI
617 template <class Iterator, typename Number = long double>
618 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
619 mean_and_standard_deviation(const Iterator begin,
620 const Iterator end,
621 const MPI_Comm comm);
622#endif
623
624
672 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
673 create_mpi_data_type_n_bytes(const std::size_t n_bytes);
674
694 template <typename T>
695 T
696 sum(const T &t, const MPI_Comm mpi_communicator);
697
707 template <typename T, typename U>
708 void
709 sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
710
720 template <typename T>
721 void
722 sum(const ArrayView<const T> &values,
723 const MPI_Comm mpi_communicator,
724 const ArrayView<T> &sums);
725
731 template <int rank, int dim, typename Number>
734 const MPI_Comm mpi_communicator);
735
741 template <int rank, int dim, typename Number>
744 const MPI_Comm mpi_communicator);
745
754 template <typename Number>
755 void
757 const MPI_Comm mpi_communicator,
758 SparseMatrix<Number> &global);
759
779 template <typename T>
780 T
781 max(const T &t, const MPI_Comm mpi_communicator);
782
792 template <typename T, typename U>
793 void
794 max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
795
805 template <typename T>
806 void
807 max(const ArrayView<const T> &values,
808 const MPI_Comm mpi_communicator,
809 const ArrayView<T> &maxima);
810
830 template <typename T>
831 T
832 min(const T &t, const MPI_Comm mpi_communicator);
833
843 template <typename T, typename U>
844 void
845 min(const T &values, const MPI_Comm mpi_communicator, U &minima);
846
856 template <typename T>
857 void
858 min(const ArrayView<const T> &values,
859 const MPI_Comm mpi_communicator,
860 const ArrayView<T> &minima);
861
885 template <typename T>
886 T
887 logical_or(const T &t, const MPI_Comm mpi_communicator);
888
903 template <typename T, typename U>
904 void
905 logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
906
916 template <typename T>
917 void
919 const MPI_Comm mpi_communicator,
920 const ArrayView<T> &results);
921
937 {
942 double sum;
943
948 double min;
949
954 double max;
955
964 unsigned int min_index;
965
974 unsigned int max_index;
975
980 double avg;
981 };
982
998 min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
999
1011 std::vector<MinMaxAvg>
1012 min_max_avg(const std::vector<double> &my_value,
1013 const MPI_Comm mpi_communicator);
1014
1015
1028 void
1029 min_max_avg(const ArrayView<const double> &my_values,
1030 const ArrayView<MinMaxAvg> &result,
1031 const MPI_Comm mpi_communicator);
1032
1033
1078 {
1079 public:
1126 int &argc,
1127 char **&argv,
1128 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1129
1135 };
1136
1148 bool
1150
1168 template <typename T>
1169 std::map<unsigned int, T>
1171 const std::map<unsigned int, T> &objects_to_send);
1172
1186 template <typename T>
1187 [[nodiscard]] std::vector<T>
1188 all_gather(const MPI_Comm comm, const T &object_to_send);
1189
1205 template <typename T>
1206 [[nodiscard]] std::vector<T>
1208 const T &object_to_send,
1209 const unsigned int root_process = 0);
1210
1225 template <typename T>
1226 [[nodiscard]] T
1228 const std::vector<T> &objects_to_send,
1229 const unsigned int root_process = 0);
1230
1269 template <typename T>
1270 [[nodiscard]] T
1272 const T &object_to_send,
1273 const unsigned int root_process = 0);
1274
1291 template <typename T>
1292 void
1293 broadcast(T *buffer,
1294 const std::size_t count,
1295 const unsigned int root,
1296 const MPI_Comm comm);
1297
1310 template <typename T>
1311 [[nodiscard]] T
1312 reduce(const T &local_value,
1313 const MPI_Comm comm,
1314 const std::function<T(const T &, const T &)> &combiner,
1315 const unsigned int root_process = 0);
1316
1317
1329 template <typename T, typename = std::enable_if_t<is_mpi_type<T> == true>>
1330 [[nodiscard]] std::pair<T, T>
1331 partial_and_total_sum(const T &value, const MPI_Comm comm);
1332
1333
1343 template <typename T>
1344 [[nodiscard]] T
1345 all_reduce(const T &local_value,
1346 const MPI_Comm comm,
1347 const std::function<T(const T &, const T &)> &combiner);
1348
1349
1370 template <typename T>
1372 isend(const T &object,
1373 MPI_Comm communicator,
1374 const unsigned int target_rank,
1375 const unsigned int mpi_tag = 0);
1376
1377
1394 template <typename T>
1395 Future<T>
1396 irecv(MPI_Comm communicator,
1397 const unsigned int source_rank,
1398 const unsigned int mpi_tag = 0);
1399
1400
1444 std::vector<unsigned int>
1447 const MPI_Comm comm);
1448
1459 std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
1462 const MPI_Comm &comm);
1463
1471 template <typename T>
1472 std::vector<T>
1473 compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1474
1478 template <typename T>
1479 std::set<T>
1480 compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1481
1482
1483
1484 /* --------------------------- inline functions ------------------------- */
1485
1486 namespace internal
1487 {
1493 namespace MPIDataTypes
1494 {
1495#ifdef DEAL_II_WITH_MPI
1496 inline MPI_Datatype
1497 mpi_type_id(const bool *)
1498 {
1499 return MPI_CXX_BOOL;
1500 }
1501
1502
1503
1504 inline MPI_Datatype
1505 mpi_type_id(const char *)
1506 {
1507 return MPI_CHAR;
1508 }
1509
1510
1511
1512 inline MPI_Datatype
1513 mpi_type_id(const signed char *)
1514 {
1515 return MPI_SIGNED_CHAR;
1516 }
1517
1518
1519
1520 inline MPI_Datatype
1521 mpi_type_id(const wchar_t *)
1522 {
1523 return MPI_WCHAR;
1524 }
1525
1526
1527
1528 inline MPI_Datatype
1529 mpi_type_id(const short *)
1530 {
1531 return MPI_SHORT;
1532 }
1533
1534
1535
1536 inline MPI_Datatype
1537 mpi_type_id(const int *)
1538 {
1539 return MPI_INT;
1540 }
1541
1542
1543
1544 inline MPI_Datatype
1545 mpi_type_id(const long int *)
1546 {
1547 return MPI_LONG;
1548 }
1549
1550
1551
1552 inline MPI_Datatype
1553 mpi_type_id(const long long int *)
1554 {
1555 return MPI_LONG_LONG;
1556 }
1557
1558
1559
1560 inline MPI_Datatype
1561 mpi_type_id(const unsigned char *)
1562 {
1563 return MPI_UNSIGNED_CHAR;
1564 }
1565
1566
1567
1568 inline MPI_Datatype
1569 mpi_type_id(const unsigned short *)
1570 {
1571 return MPI_UNSIGNED_SHORT;
1572 }
1573
1574
1575
1576 inline MPI_Datatype
1577 mpi_type_id(const unsigned int *)
1578 {
1579 return MPI_UNSIGNED;
1580 }
1581
1582
1583
1584 inline MPI_Datatype
1585 mpi_type_id(const unsigned long int *)
1586 {
1587 return MPI_UNSIGNED_LONG;
1588 }
1589
1590
1591
1592 inline MPI_Datatype
1593 mpi_type_id(const unsigned long long int *)
1594 {
1595 return MPI_UNSIGNED_LONG_LONG;
1596 }
1597
1598
1599
1600 inline MPI_Datatype
1601 mpi_type_id(const float *)
1602 {
1603 return MPI_FLOAT;
1604 }
1605
1606
1607
1608 inline MPI_Datatype
1609 mpi_type_id(const double *)
1610 {
1611 return MPI_DOUBLE;
1612 }
1613
1614
1615
1616 inline MPI_Datatype
1617 mpi_type_id(const long double *)
1618 {
1619 return MPI_LONG_DOUBLE;
1620 }
1621
1622
1623
1624 inline MPI_Datatype
1625 mpi_type_id(const std::complex<float> *)
1626 {
1627 return MPI_COMPLEX;
1628 }
1629
1630
1631
1632 inline MPI_Datatype
1633 mpi_type_id(const std::complex<double> *)
1634 {
1635 return MPI_DOUBLE_COMPLEX;
1636 }
1637#endif
1638 } // namespace MPIDataTypes
1639 } // namespace internal
1640
1641
1642
1643#ifdef DEAL_II_WITH_MPI
1661 template <typename T>
1662 inline const MPI_Datatype mpi_type_id_for_type =
1664 static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1665#endif
1666
1667#ifndef DOXYGEN
1668 namespace internal
1669 {
1670 // declaration for an internal function that lives in mpi.templates.h
1671 template <typename T>
1672 void
1673 all_reduce(const MPI_Op &mpi_op,
1674 const ArrayView<const T> &values,
1675 const MPI_Comm mpi_communicator,
1676 const ArrayView<T> &output);
1677 } // namespace internal
1678
1679
1680 template <typename T>
1681 template <typename W, typename G>
1682 Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1683 : wait_function(wait_operation)
1684 , get_and_cleanup_function(get_and_cleanup_operation)
1685 , is_done(false)
1686 , get_was_called(false)
1687 {}
1688
1689
1690
1691 template <typename T>
1692 Future<T>::~Future()
1693 {
1694 // If there is a clean-up function, and if it has not been
1695 // called yet, then do so. Note that we may not have a
1696 // clean-up function (not even an empty one) if the current
1697 // object has been moved from, into another object, and as
1698 // a consequence the std::function objects are now empty
1699 // even though they were initialized in the constructor.
1700 // (A std::function object whose object is a an empty lambda
1701 // function, [](){}, is not an empty std::function object.)
1702 if ((get_was_called == false) && get_and_cleanup_function)
1703 get();
1704 }
1705
1706
1707
1708 template <typename T>
1709 void
1710 Future<T>::wait()
1711 {
1712 if (is_done == false)
1713 {
1714 wait_function();
1715
1716 is_done = true;
1717 }
1718 }
1719
1720
1721 template <typename T>
1722 T
1723 Future<T>::get()
1724 {
1725 Assert(get_was_called == false,
1726 ExcMessage(
1727 "You can't call get() more than once on a Future object."));
1728 get_was_called = true;
1729
1730 wait();
1731 return get_and_cleanup_function();
1732 }
1733
1734
1735
1736 template <typename T, unsigned int N>
1737 void
1738 sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1739 {
1740 internal::all_reduce(MPI_SUM,
1741 ArrayView<const T>(values, N),
1742 mpi_communicator,
1743 ArrayView<T>(sums, N));
1744 }
1745
1746
1747
1748 template <typename T, unsigned int N>
1749 void
1750 max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1751 {
1752 internal::all_reduce(MPI_MAX,
1753 ArrayView<const T>(values, N),
1754 mpi_communicator,
1755 ArrayView<T>(maxima, N));
1756 }
1757
1758
1759
1760 template <typename T, unsigned int N>
1761 void
1762 min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1763 {
1764 internal::all_reduce(MPI_MIN,
1765 ArrayView<const T>(values, N),
1766 mpi_communicator,
1767 ArrayView<T>(minima, N));
1768 }
1769
1770
1771
1772 template <typename T, unsigned int N>
1773 void
1774 logical_or(const T (&values)[N],
1775 const MPI_Comm mpi_communicator,
1776 T (&results)[N])
1777 {
1778 static_assert(std::is_integral_v<T>,
1779 "The MPI_LOR operation only allows integral data types.");
1780
1781 internal::all_reduce(MPI_LOR,
1782 ArrayView<const T>(values, N),
1783 mpi_communicator,
1784 ArrayView<T>(results, N));
1785 }
1786
1787
1788
1789 template <typename T>
1790 std::map<unsigned int, T>
1792 const std::map<unsigned int, T> &objects_to_send)
1793 {
1794# ifndef DEAL_II_WITH_MPI
1795 (void)comm;
1796 Assert(objects_to_send.size() < 2,
1797 ExcMessage("Cannot send to more than one processor."));
1798 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1799 objects_to_send.empty(),
1800 ExcMessage("Can only send to myself or to nobody."));
1801 return objects_to_send;
1802# else
1803 const auto my_proc = this_mpi_process(comm);
1804
1805 std::map<unsigned int, T> received_objects;
1806
1807 std::vector<unsigned int> send_to;
1808 send_to.reserve(objects_to_send.size());
1809 for (const auto &m : objects_to_send)
1810 if (m.first == my_proc)
1811 received_objects[my_proc] = m.second;
1812 else
1813 send_to.emplace_back(m.first);
1814
1815 const unsigned int n_expected_incoming_messages =
1817
1818 // Protect the following communication:
1819 static CollectiveMutex mutex;
1820 CollectiveMutex::ScopedLock lock(mutex, comm);
1821
1822 // If we have something to send, or we expect something from other
1823 // processors, we need to visit one of the two scopes below. Otherwise,
1824 // no other action is required by this mpi process, and we can safely
1825 // return.
1826 if (send_to.empty() && n_expected_incoming_messages == 0)
1827 return received_objects;
1828
1829 const int mpi_tag =
1830 internal::Tags::compute_point_to_point_communication_pattern;
1831
1832 // Sending buffers
1833 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1834 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1835 {
1836 unsigned int i = 0;
1837 for (const auto &rank_obj : objects_to_send)
1838 if (rank_obj.first != my_proc)
1839 {
1840 const auto &rank = rank_obj.first;
1841 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1842 /*allow_compression=*/false);
1843 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1844 buffers_to_send[i].size(),
1845 MPI_CHAR,
1846 rank,
1847 mpi_tag,
1848 comm,
1849 &buffer_send_requests[i]);
1850 AssertThrowMPI(ierr);
1851 ++i;
1852 }
1853 }
1854
1855 // Fill the output map
1856 {
1857 std::vector<char> buffer;
1858 // We do this on a first come/first served basis
1859 for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1860 {
1861 // Probe what's going on. Take data from the first available sender
1862 MPI_Status status;
1863 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1864 AssertThrowMPI(ierr);
1865
1866 // Length of the message
1867 int len;
1868 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1869 AssertThrowMPI(ierr);
1870 buffer.resize(len);
1871
1872 // Source rank
1873 const unsigned int rank = status.MPI_SOURCE;
1874
1875 // Actually receive the message
1876 ierr = MPI_Recv(buffer.data(),
1877 len,
1878 MPI_CHAR,
1879 status.MPI_SOURCE,
1880 status.MPI_TAG,
1881 comm,
1882 MPI_STATUS_IGNORE);
1883 AssertThrowMPI(ierr);
1884 Assert(received_objects.find(rank) == received_objects.end(),
1886 "I should not receive again from this rank"));
1887 received_objects[rank] =
1888 Utilities::unpack<T>(buffer,
1889 /*allow_compression=*/false);
1890 }
1891 }
1892
1893 // Wait to have sent all objects.
1894 const int ierr = MPI_Waitall(send_to.size(),
1895 buffer_send_requests.data(),
1896 MPI_STATUSES_IGNORE);
1897 AssertThrowMPI(ierr);
1898
1899 return received_objects;
1900# endif // deal.II with MPI
1901 }
1902
1903
1904
1905 template <typename T, typename>
1906 std::pair<T, T>
1907 partial_and_total_sum(const T &value, const MPI_Comm comm)
1908 {
1909# ifndef DEAL_II_WITH_MPI
1910 (void)comm;
1911 return {0, value};
1912# else
1914 return {0, value};
1915 else
1916 {
1917 T prefix = {};
1918
1919 // First obtain every process's prefix sum:
1920 int ierr =
1921 MPI_Exscan(&value,
1922 &prefix,
1923 1,
1924 Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
1925 MPI_SUM,
1926 comm);
1927 AssertThrowMPI(ierr);
1928
1929 // Then we also need the total sum. We could obtain it by
1930 // calling Utilities::MPI::sum(), but it is cheaper if we
1931 // broadcast it from the last process, which can compute it
1932 // from its own prefix sum plus its own value.
1934 comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
1935
1936 return {prefix, sum};
1937 }
1938# endif
1939 }
1940
1941
1942
1943 template <typename T>
1944 std::vector<T>
1945 all_gather(const MPI_Comm comm, const T &object)
1946 {
1947 if (job_supports_mpi() == false)
1948 return {object};
1949
1950# ifndef DEAL_II_WITH_MPI
1951 (void)comm;
1952 std::vector<T> v(1, object);
1953 return v;
1954# else
1956
1957 std::vector<char> buffer = Utilities::pack(object);
1958
1959 int n_local_data = buffer.size();
1960
1961 // Vector to store the size of loc_data_array for every process
1962 std::vector<int> size_all_data(n_procs, 0);
1963
1964 // Exchanging the size of each buffer
1965 int ierr = MPI_Allgather(
1966 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1967 AssertThrowMPI(ierr);
1968
1969 // Now computing the displacement, relative to recvbuf,
1970 // at which to store the incoming buffer
1971 std::vector<int> rdispls(n_procs);
1972 rdispls[0] = 0;
1973 for (unsigned int i = 1; i < n_procs; ++i)
1974 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1975
1976 // Step 3: exchange the buffer:
1977 std::vector<char> received_unrolled_buffer(rdispls.back() +
1978 size_all_data.back());
1979
1980 ierr = MPI_Allgatherv(buffer.data(),
1981 n_local_data,
1982 MPI_CHAR,
1983 received_unrolled_buffer.data(),
1984 size_all_data.data(),
1985 rdispls.data(),
1986 MPI_CHAR,
1987 comm);
1988 AssertThrowMPI(ierr);
1989
1990 std::vector<T> received_objects(n_procs);
1991 for (unsigned int i = 0; i < n_procs; ++i)
1992 {
1993 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1994 rdispls[i],
1995 received_unrolled_buffer.begin() +
1996 rdispls[i] + size_all_data[i]);
1997 received_objects[i] = Utilities::unpack<T>(local_buffer);
1998 }
1999
2000 return received_objects;
2001# endif
2002 }
2003
2004
2005
2006 template <typename T>
2007 std::vector<T>
2008 gather(const MPI_Comm comm,
2009 const T &object_to_send,
2010 const unsigned int root_process)
2011 {
2012# ifndef DEAL_II_WITH_MPI
2013 (void)comm;
2014 (void)root_process;
2015 std::vector<T> v(1, object_to_send);
2016 return v;
2017# else
2020
2021 AssertIndexRange(root_process, n_procs);
2022
2023 std::vector<char> buffer = Utilities::pack(object_to_send);
2024 int n_local_data = buffer.size();
2025
2026 // Vector to store the size of loc_data_array for every process
2027 // only the root process needs to allocate memory for that purpose
2028 std::vector<int> size_all_data;
2029 if (my_rank == root_process)
2030 size_all_data.resize(n_procs, 0);
2031
2032 // Exchanging the size of each buffer
2033 int ierr = MPI_Gather(&n_local_data,
2034 1,
2035 MPI_INT,
2036 size_all_data.data(),
2037 1,
2038 MPI_INT,
2039 root_process,
2040 comm);
2041 AssertThrowMPI(ierr);
2042
2043 // Now computing the displacement, relative to recvbuf,
2044 // at which to store the incoming buffer; only for root
2045 std::vector<int> rdispls;
2046 if (my_rank == root_process)
2047 {
2048 rdispls.resize(n_procs, 0);
2049 for (unsigned int i = 1; i < n_procs; ++i)
2050 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2051 }
2052 // exchange the buffer:
2053 std::vector<char> received_unrolled_buffer;
2054 if (my_rank == root_process)
2055 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2056
2057 ierr = MPI_Gatherv(buffer.data(),
2058 n_local_data,
2059 MPI_CHAR,
2060 received_unrolled_buffer.data(),
2061 size_all_data.data(),
2062 rdispls.data(),
2063 MPI_CHAR,
2064 root_process,
2065 comm);
2066 AssertThrowMPI(ierr);
2067
2068 std::vector<T> received_objects;
2069
2070 if (my_rank == root_process)
2071 {
2072 received_objects.resize(n_procs);
2073
2074 for (unsigned int i = 0; i < n_procs; ++i)
2075 {
2076 const std::vector<char> local_buffer(
2077 received_unrolled_buffer.begin() + rdispls[i],
2078 received_unrolled_buffer.begin() + rdispls[i] +
2079 size_all_data[i]);
2080 received_objects[i] = Utilities::unpack<T>(local_buffer);
2081 }
2082 }
2083 return received_objects;
2084# endif
2085 }
2086
2087
2088
2089 template <typename T>
2090 T
2091 scatter(const MPI_Comm comm,
2092 const std::vector<T> &objects_to_send,
2093 const unsigned int root_process)
2094 {
2095# ifndef DEAL_II_WITH_MPI
2096 (void)comm;
2097 (void)root_process;
2098
2099 AssertDimension(objects_to_send.size(), 1);
2100
2101 return objects_to_send[0];
2102# else
2105
2106 AssertIndexRange(root_process, n_procs);
2108 (my_rank != root_process && objects_to_send.empty()) ||
2109 objects_to_send.size() == n_procs,
2110 ExcMessage(
2111 "The number of objects to be scattered must correspond to the number processes."));
2112
2113 std::vector<char> send_buffer;
2114 std::vector<int> send_counts;
2115 std::vector<int> send_displacements;
2116
2117 if (my_rank == root_process)
2118 {
2119 send_counts.resize(n_procs, 0);
2120 send_displacements.resize(n_procs + 1, 0);
2121
2122 for (unsigned int i = 0; i < n_procs; ++i)
2123 {
2124 const auto packed_data = Utilities::pack(objects_to_send[i]);
2125 send_buffer.insert(send_buffer.end(),
2126 packed_data.begin(),
2127 packed_data.end());
2128 send_counts[i] = packed_data.size();
2129 }
2130
2131 for (unsigned int i = 0; i < n_procs; ++i)
2132 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2133 }
2134
2135 int n_local_data;
2136 int ierr = MPI_Scatter(send_counts.data(),
2137 1,
2138 MPI_INT,
2139 &n_local_data,
2140 1,
2141 MPI_INT,
2142 root_process,
2143 comm);
2144 AssertThrowMPI(ierr);
2145
2146 std::vector<char> recv_buffer(n_local_data);
2147
2148 ierr = MPI_Scatterv(send_buffer.data(),
2149 send_counts.data(),
2150 send_displacements.data(),
2151 MPI_CHAR,
2152 recv_buffer.data(),
2153 n_local_data,
2154 MPI_CHAR,
2155 root_process,
2156 comm);
2157 AssertThrowMPI(ierr);
2158
2159 return Utilities::unpack<T>(recv_buffer);
2160# endif
2161 }
2162
2163
2164 template <typename T>
2165 void
2166 broadcast(T *buffer,
2167 const std::size_t count,
2168 const unsigned int root,
2169 const MPI_Comm comm)
2170 {
2171# ifndef DEAL_II_WITH_MPI
2172 (void)buffer;
2173 (void)count;
2174 (void)root;
2175 (void)comm;
2176# else
2177 Assert(root < n_mpi_processes(comm),
2178 ExcMessage("Invalid root rank specified."));
2179
2180 // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2181 // iteration:
2182 const size_t max_send_count = std::numeric_limits<signed int>::max();
2183
2184 size_t total_sent_count = 0;
2185 while (total_sent_count < count)
2186 {
2187 const size_t current_count =
2188 std::min(count - total_sent_count, max_send_count);
2189
2190 const int ierr = MPI_Bcast(buffer + total_sent_count,
2191 current_count,
2192 mpi_type_id_for_type<decltype(*buffer)>,
2193 root,
2194 comm);
2195 AssertThrowMPI(ierr);
2196 total_sent_count += current_count;
2197 }
2198# endif
2199 }
2200
2201
2202
2203 template <typename T>
2204 T
2205 broadcast(const MPI_Comm comm,
2206 const T &object_to_send,
2207 const unsigned int root_process)
2208 {
2209# ifndef DEAL_II_WITH_MPI
2210 (void)comm;
2211 (void)root_process;
2212 return object_to_send;
2213# else
2215 AssertIndexRange(root_process, n_procs);
2216 (void)n_procs;
2217
2218 if constexpr (is_mpi_type<T>)
2219 {
2220 T object = object_to_send;
2221 int ierr =
2222 MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2223 AssertThrowMPI(ierr);
2224
2225 return object;
2226 }
2227 else
2228 {
2229 std::vector<char> buffer;
2230 std::size_t buffer_size = numbers::invalid_size_type;
2231
2232 // On the root process, pack the data and determine what the
2233 // buffer size needs to be.
2234 if (this_mpi_process(comm) == root_process)
2235 {
2236 buffer = Utilities::pack(object_to_send, false);
2237 buffer_size = buffer.size();
2238 }
2239
2240 // Exchange the size of buffer
2241 int ierr = MPI_Bcast(&buffer_size,
2242 1,
2243 mpi_type_id_for_type<decltype(buffer_size)>,
2244 root_process,
2245 comm);
2246 AssertThrowMPI(ierr);
2247
2248 // If not on the root process, correctly size the buffer to
2249 // receive the data, then do exactly that.
2250 if (this_mpi_process(comm) != root_process)
2251 buffer.resize(buffer_size);
2252
2253 broadcast(buffer.data(), buffer_size, root_process, comm);
2254
2255 if (Utilities::MPI::this_mpi_process(comm) == root_process)
2256 return object_to_send;
2257 else
2258 return Utilities::unpack<T>(buffer, false);
2259 }
2260# endif
2261 }
2262
2263
2264
2265 template <typename T>
2266 Future<void>
2267 isend(const T &object,
2268 MPI_Comm communicator,
2269 const unsigned int target_rank,
2270 const unsigned int mpi_tag)
2271 {
2272# ifndef DEAL_II_WITH_MPI
2273 Assert(false, ExcNeedsMPI());
2274 (void)object;
2275 (void)communicator;
2276 (void)target_rank;
2277 (void)mpi_tag;
2278 return Future<void>([]() {}, []() {});
2279# else
2280 // Create a pointer to a send buffer into which we pack the object
2281 // to be sent. The buffer will be released by the Future object once
2282 // the send has been verified to have succeeded.
2283 //
2284 // Conceptually, we would like this send buffer to be a
2285 // std::unique_ptr object whose ownership is later handed over
2286 // to the cleanup function. That has the disadvantage that the
2287 // cleanup object is a non-copyable lambda capture, leading to
2288 // awkward semantics. Instead, we use a std::shared_ptr; we move
2289 // this shared pointer into the cleanup function, which means
2290 // that there is exactly one shared pointer who owns the buffer
2291 // at any given time, though the latter is not an important
2292 // optimization.
2293 std::shared_ptr<std::vector<char>> send_buffer =
2294 std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2295
2296 // Now start the send, and store the result in a request object that
2297 // we can then wait for later:
2298 MPI_Request request;
2299 const int ierr =
2300 MPI_Isend(send_buffer->data(),
2301 send_buffer->size(),
2302 mpi_type_id_for_type<decltype(*send_buffer->data())>,
2303 target_rank,
2304 mpi_tag,
2305 communicator,
2306 &request);
2307 AssertThrowMPI(ierr);
2308
2309 // Then return a std::future-like object that has a wait()
2310 // function one can use to wait for the communication to finish,
2311 // and that has a cleanup function to be called at some point
2312 // after that makes sure the send buffer gets deallocated. This
2313 // cleanup function takes over ownership of the send buffer.
2314 //
2315 // Note that the body of the lambda function of the clean-up
2316 // function could be left empty. If that were so, once the
2317 // lambda function object goes out of scope, the 'send_buffer'
2318 // member of the closure object goes out of scope as well and so
2319 // the send_buffer is destroyed. But we may want to release the
2320 // buffer itself as early as possible, and so we clear the
2321 // buffer when the Future::get() function is called.
2322 auto wait = [request]() mutable {
2323 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2324 AssertThrowMPI(ierr);
2325 };
2326 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2327 send_buffer->clear();
2328 };
2329 return Future<void>(wait, cleanup);
2330# endif
2331 }
2332
2333
2334
2335 template <typename T>
2336 Future<T>
2337 irecv(MPI_Comm communicator,
2338 const unsigned int source_rank,
2339 const unsigned int mpi_tag)
2340 {
2341# ifndef DEAL_II_WITH_MPI
2342 Assert(false, ExcNeedsMPI());
2343 (void)communicator;
2344 (void)source_rank;
2345 (void)mpi_tag;
2346 return Future<void>([]() {}, []() { return T{}; });
2347# else
2348 // Use a 'probe' operation for the 'wait' operation of the
2349 // Future this function returns. It will trigger whenever we get
2350 // the incoming message. Later, once we have received the message, we
2351 // can query its size and allocate a receiver buffer.
2352 //
2353 // Since we may be waiting for multiple messages from the same
2354 // incoming process (with possibly the same tag -- we can't
2355 // know), we must make sure that the 'probe' operation we have
2356 // here (and which we use to determine the buffer size) matches
2357 // the 'recv' operation with which we actually get the data
2358 // later on. This is exactly what the 'MPI_Mprobe' function and
2359 // its 'I'mmediate variant is there for, coupled with the
2360 // 'MPI_Mrecv' call that would put into the clean-up function
2361 // below.
2362 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2363 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2364
2365 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2366 const int ierr = MPI_Mprobe(
2367 source_rank, mpi_tag, communicator, message.get(), status.get());
2368 AssertThrowMPI(ierr);
2369 };
2370
2371
2372 // Now also define the function that actually gets the data:
2373 auto get = [status, message]() {
2374 int number_amount;
2375 int ierr;
2376 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2377 AssertThrowMPI(ierr);
2378
2379 std::vector<char> receive_buffer(number_amount);
2380
2381 // Then actually get the data, using the matching MPI_Mrecv to the above
2382 // MPI_Mprobe:
2383 ierr = MPI_Mrecv(receive_buffer.data(),
2384 number_amount,
2385 mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2386 message.get(),
2387 status.get());
2388 AssertThrowMPI(ierr);
2389
2390 // Return the unpacked object:
2391 return Utilities::unpack<T>(receive_buffer, false);
2392 };
2393
2394 return Future<T>(wait, get);
2395# endif
2396 }
2397
2398
2399
2400# ifdef DEAL_II_WITH_MPI
2401 template <class Iterator, typename Number>
2402 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2403 mean_and_standard_deviation(const Iterator begin,
2404 const Iterator end,
2405 const MPI_Comm comm)
2406 {
2407 // below we do simple and straight-forward implementation. More elaborate
2408 // options are:
2409 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2410 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2411 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2412 using Std = typename numbers::NumberTraits<Number>::real_type;
2413 const Number sum = std::accumulate(begin, end, Number(0.));
2414
2415 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2416 Assert(size > 0, ExcDivideByZero());
2417 const Number mean =
2418 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2419 Std sq_sum = 0.;
2420 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2422 });
2423 sq_sum = Utilities::MPI::sum(sq_sum, comm);
2424 return std::make_pair(mean,
2425 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2426 }
2427# endif
2428
2429#endif
2430 } // end of namespace MPI
2431} // end of namespace Utilities
2432
2433
2435
2436#endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition mpi.h:350
void lock(const MPI_Comm comm)
Definition mpi.cc:1997
void unlock(const MPI_Comm comm)
Definition mpi.cc:2034
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition mpi.h:265
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition mpi.h:562
std::function< T()> get_and_cleanup_function
Definition mpi.h:563
Future(W &&wait_operation, G &&get_and_cleanup_operation)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition config.h:598
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition config.h:641
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
const unsigned int my_rank
Definition mpi.cc:933
const IndexSet & indices_to_look_up
Definition mpi.cc:923
std::vector< index_type > data
Definition mpi.cc:750
std::size_t size
Definition mpi.cc:749
const MPI_Comm comm
Definition mpi.cc:928
const IndexSet & owned_indices
Definition mpi.cc:917
types::global_dof_index locally_owned_size
Definition mpi.cc:837
const unsigned int n_procs
Definition mpi.cc:939
constexpr char T
MPI_Datatype mpi_type_id(const bool *)
Definition mpi.h:1497
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:177
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition mpi.cc:1890
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:221
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:105
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1839
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:120
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:206
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:263
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:156
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:696
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1662
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:167
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:136
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:375
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:79
constexpr bool is_mpi_type
Definition mpi.h:115
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1382
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:55
constexpr types::global_dof_index invalid_size_type
Definition types.h:250
constexpr unsigned int invalid_unsigned_int
Definition types.h:238
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int max_index
Definition mpi.h:974
unsigned int min_index
Definition mpi.h:964
static constexpr real_type abs_square(const number &x)
Definition numbers.h:551
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)