Loading [MathJax]/extensions/TeX/AMSsymbols.js
 deal.II version GIT relicensing-3063-g10967a192b 2025-04-12 13:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Modules Pages Concepts
mpi.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_h
16#define dealii_mpi_h
17
18#include <deal.II/base/config.h>
19
27#include <deal.II/base/types.h>
29
30#include <complex>
31#include <limits>
32#include <map>
33#include <numeric>
34#include <set>
35#include <vector>
36
37#ifdef DEAL_II_WITH_MPI
39# include <mpi.h>
41#endif
42
43
57#ifdef DEAL_II_WITH_MPI
58# define DEAL_II_MPI_CONST_CAST(expr) (expr)
59#endif
60
61
62
64
65
66// Forward type declarations to allow MPI sums over tensorial types
67#ifndef DOXYGEN
68template <int rank, int dim, typename Number>
69class Tensor;
70template <int rank, int dim, typename Number>
71class SymmetricTensor;
72template <typename Number>
73class SparseMatrix;
74class IndexSet;
75#endif
76
77namespace Utilities
78{
93 const unsigned int my_partition_id,
94 const unsigned int n_partitions,
95 const types::global_dof_index total_size);
96
104 namespace MPI
105 {
114 template <typename T>
115 constexpr bool is_mpi_type = is_same_as_any_of<T,
116 char,
117 signed short,
118 signed int,
119 signed long,
120 signed long long,
121 signed char,
122 unsigned char,
123 unsigned short,
124 unsigned int,
125 unsigned long int,
126 unsigned long long,
127 float,
128 double,
129 long double,
130 bool,
131 std::complex<float>,
132 std::complex<double>,
133 std::complex<long double>,
134 wchar_t>::value;
135
144 unsigned int
145 n_mpi_processes(const MPI_Comm mpi_communicator);
146
155 unsigned int
156 this_mpi_process(const MPI_Comm mpi_communicator);
157
162 std::vector<unsigned int>
164 const MPI_Comm comm_small);
165
187 std::vector<unsigned int>
189 const MPI_Comm mpi_comm,
190 const std::vector<unsigned int> &destinations);
191
211 unsigned int
213 const MPI_Comm mpi_comm,
214 const std::vector<unsigned int> &destinations);
215
233 duplicate_communicator(const MPI_Comm mpi_communicator);
234
244 void
245 free_communicator(MPI_Comm mpi_communicator);
246
260 {
261 public:
265 explicit DuplicatedCommunicator(const MPI_Comm communicator)
266 : comm(duplicate_communicator(communicator))
267 {}
268
273
281
286 operator*() const
287 {
288 return comm;
289 }
290
291
297
298 private:
303 };
304
305
306
337 {
338 public:
345 {
346 public:
351 : mutex(mutex)
352 , comm(comm)
353 {
354 mutex.lock(comm);
355 }
356
361 {
363 }
364
365 private:
374 };
375
380
385
392 void
393 lock(const MPI_Comm comm);
394
401 void
402 unlock(const MPI_Comm comm);
403
404 private:
408 bool locked;
409
413 MPI_Request request;
414 };
415
416
417
464 template <typename T>
465 class Future
466 {
467 public:
472 template <typename W, typename G>
473 Future(W &&wait_operation, G &&get_and_cleanup_operation);
474
480 Future(const Future &) = delete;
481
485 Future(Future &&) noexcept = default;
486
491
497 Future &
498 operator=(const Future &) = delete;
499
503 Future &
504 operator=(Future &&) noexcept = default;
505
513 void
515
527 T
529
530 private:
534 std::function<void()> wait_function;
536
541
546 };
547
556 std::vector<IndexSet>
558 const MPI_Comm comm,
559 const types::global_dof_index locally_owned_size);
560
570 const MPI_Comm comm,
571 const types::global_dof_index total_size);
572
573#ifdef DEAL_II_WITH_MPI
589 template <class Iterator, typename Number = long double>
590 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
591 mean_and_standard_deviation(const Iterator begin,
592 const Iterator end,
593 const MPI_Comm comm);
594#endif
595
596
644 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
645 create_mpi_data_type_n_bytes(const std::size_t n_bytes);
646
666 template <typename T>
667 T
668 sum(const T &t, const MPI_Comm mpi_communicator);
669
679 template <typename T, typename U>
680 void
681 sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
682
692 template <typename T>
693 void
694 sum(const ArrayView<const T> &values,
695 const MPI_Comm mpi_communicator,
696 const ArrayView<T> &sums);
697
703 template <int rank, int dim, typename Number>
706 const MPI_Comm mpi_communicator);
707
713 template <int rank, int dim, typename Number>
716 const MPI_Comm mpi_communicator);
717
726 template <typename Number>
727 void
729 const MPI_Comm mpi_communicator,
730 SparseMatrix<Number> &global);
731
751 template <typename T>
752 T
753 max(const T &t, const MPI_Comm mpi_communicator);
754
764 template <typename T, typename U>
765 void
766 max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
767
777 template <typename T>
778 void
779 max(const ArrayView<const T> &values,
780 const MPI_Comm mpi_communicator,
781 const ArrayView<T> &maxima);
782
802 template <typename T>
803 T
804 min(const T &t, const MPI_Comm mpi_communicator);
805
815 template <typename T, typename U>
816 void
817 min(const T &values, const MPI_Comm mpi_communicator, U &minima);
818
828 template <typename T>
829 void
830 min(const ArrayView<const T> &values,
831 const MPI_Comm mpi_communicator,
832 const ArrayView<T> &minima);
833
857 template <typename T>
858 T
859 logical_or(const T &t, const MPI_Comm mpi_communicator);
860
875 template <typename T, typename U>
876 void
877 logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
878
888 template <typename T>
889 void
891 const MPI_Comm mpi_communicator,
892 const ArrayView<T> &results);
893
909 {
914 double sum;
915
920 double min;
921
926 double max;
927
936 unsigned int min_index;
937
946 unsigned int max_index;
947
952 double avg;
953 };
954
970 min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
971
983 std::vector<MinMaxAvg>
984 min_max_avg(const std::vector<double> &my_value,
985 const MPI_Comm mpi_communicator);
986
987
1000 void
1001 min_max_avg(const ArrayView<const double> &my_values,
1002 const ArrayView<MinMaxAvg> &result,
1003 const MPI_Comm mpi_communicator);
1004
1005
1050 {
1051 public:
1098 int &argc,
1099 char **&argv,
1100 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1101
1107 };
1108
1120 bool
1122
1140 template <typename T>
1141 std::map<unsigned int, T>
1143 const std::map<unsigned int, T> &objects_to_send);
1144
1158 template <typename T>
1159 std::vector<T>
1160 all_gather(const MPI_Comm comm, const T &object_to_send);
1161
1177 template <typename T>
1178 std::vector<T>
1180 const T &object_to_send,
1181 const unsigned int root_process = 0);
1182
1197 template <typename T>
1198 T
1200 const std::vector<T> &objects_to_send,
1201 const unsigned int root_process = 0);
1202
1241 template <typename T>
1242 T
1244 const T &object_to_send,
1245 const unsigned int root_process = 0);
1246
1263 template <typename T>
1264 void
1265 broadcast(T *buffer,
1266 const std::size_t count,
1267 const unsigned int root,
1268 const MPI_Comm comm);
1269
1282 template <typename T>
1283 T
1284 reduce(const T &local_value,
1285 const MPI_Comm comm,
1286 const std::function<T(const T &, const T &)> &combiner,
1287 const unsigned int root_process = 0);
1288
1289
1301 template <typename T, typename = std::enable_if_t<is_mpi_type<T> == true>>
1302 std::pair<T, T>
1303 partial_and_total_sum(const T &value, const MPI_Comm comm);
1304
1305
1315 template <typename T>
1316 T
1317 all_reduce(const T &local_value,
1318 const MPI_Comm comm,
1319 const std::function<T(const T &, const T &)> &combiner);
1320
1321
1342 template <typename T>
1344 isend(const T &object,
1345 MPI_Comm communicator,
1346 const unsigned int target_rank,
1347 const unsigned int mpi_tag = 0);
1348
1349
1366 template <typename T>
1367 Future<T>
1368 irecv(MPI_Comm communicator,
1369 const unsigned int source_rank,
1370 const unsigned int mpi_tag = 0);
1371
1372
1416 std::vector<unsigned int>
1419 const MPI_Comm comm);
1420
1431 std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
1434 const MPI_Comm &comm);
1435
1443 template <typename T>
1444 std::vector<T>
1445 compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1446
1450 template <typename T>
1451 std::set<T>
1452 compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1453
1454
1455
1456 /* --------------------------- inline functions ------------------------- */
1457
1458 namespace internal
1459 {
1465 namespace MPIDataTypes
1466 {
1467#ifdef DEAL_II_WITH_MPI
1468 inline MPI_Datatype
1469 mpi_type_id(const bool *)
1470 {
1471 return MPI_CXX_BOOL;
1472 }
1473
1474
1475
1476 inline MPI_Datatype
1477 mpi_type_id(const char *)
1478 {
1479 return MPI_CHAR;
1480 }
1481
1482
1483
1484 inline MPI_Datatype
1485 mpi_type_id(const signed char *)
1486 {
1487 return MPI_SIGNED_CHAR;
1488 }
1489
1490
1491
1492 inline MPI_Datatype
1493 mpi_type_id(const wchar_t *)
1494 {
1495 return MPI_WCHAR;
1496 }
1497
1498
1499
1500 inline MPI_Datatype
1501 mpi_type_id(const short *)
1502 {
1503 return MPI_SHORT;
1504 }
1505
1506
1507
1508 inline MPI_Datatype
1509 mpi_type_id(const int *)
1510 {
1511 return MPI_INT;
1512 }
1513
1514
1515
1516 inline MPI_Datatype
1517 mpi_type_id(const long int *)
1518 {
1519 return MPI_LONG;
1520 }
1521
1522
1523
1524 inline MPI_Datatype
1525 mpi_type_id(const long long int *)
1526 {
1527 return MPI_LONG_LONG;
1528 }
1529
1530
1531
1532 inline MPI_Datatype
1533 mpi_type_id(const unsigned char *)
1534 {
1535 return MPI_UNSIGNED_CHAR;
1536 }
1537
1538
1539
1540 inline MPI_Datatype
1541 mpi_type_id(const unsigned short *)
1542 {
1543 return MPI_UNSIGNED_SHORT;
1544 }
1545
1546
1547
1548 inline MPI_Datatype
1549 mpi_type_id(const unsigned int *)
1550 {
1551 return MPI_UNSIGNED;
1552 }
1553
1554
1555
1556 inline MPI_Datatype
1557 mpi_type_id(const unsigned long int *)
1558 {
1559 return MPI_UNSIGNED_LONG;
1560 }
1561
1562
1563
1564 inline MPI_Datatype
1565 mpi_type_id(const unsigned long long int *)
1566 {
1567 return MPI_UNSIGNED_LONG_LONG;
1568 }
1569
1570
1571
1572 inline MPI_Datatype
1573 mpi_type_id(const float *)
1574 {
1575 return MPI_FLOAT;
1576 }
1577
1578
1579
1580 inline MPI_Datatype
1581 mpi_type_id(const double *)
1582 {
1583 return MPI_DOUBLE;
1584 }
1585
1586
1587
1588 inline MPI_Datatype
1589 mpi_type_id(const long double *)
1590 {
1591 return MPI_LONG_DOUBLE;
1592 }
1593
1594
1595
1596 inline MPI_Datatype
1597 mpi_type_id(const std::complex<float> *)
1598 {
1599 return MPI_COMPLEX;
1600 }
1601
1602
1603
1604 inline MPI_Datatype
1605 mpi_type_id(const std::complex<double> *)
1606 {
1607 return MPI_DOUBLE_COMPLEX;
1608 }
1609#endif
1610 } // namespace MPIDataTypes
1611 } // namespace internal
1612
1613
1614
1615#ifdef DEAL_II_WITH_MPI
1633 template <typename T>
1634 inline const MPI_Datatype mpi_type_id_for_type =
1636 static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1637#endif
1638
1639#ifndef DOXYGEN
1640 namespace internal
1641 {
1642 // declaration for an internal function that lives in mpi.templates.h
1643 template <typename T>
1644 void
1645 all_reduce(const MPI_Op &mpi_op,
1646 const ArrayView<const T> &values,
1647 const MPI_Comm mpi_communicator,
1648 const ArrayView<T> &output);
1649 } // namespace internal
1650
1651
1652 template <typename T>
1653 template <typename W, typename G>
1654 Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1655 : wait_function(wait_operation)
1656 , get_and_cleanup_function(get_and_cleanup_operation)
1657 , is_done(false)
1658 , get_was_called(false)
1659 {}
1660
1661
1662
1663 template <typename T>
1664 Future<T>::~Future()
1665 {
1666 // If there is a clean-up function, and if it has not been
1667 // called yet, then do so. Note that we may not have a
1668 // clean-up function (not even an empty one) if the current
1669 // object has been moved from, into another object, and as
1670 // a consequence the std::function objects are now empty
1671 // even though they were initialized in the constructor.
1672 // (A std::function object whose object is a an empty lambda
1673 // function, [](){}, is not an empty std::function object.)
1674 if ((get_was_called == false) && get_and_cleanup_function)
1675 get();
1676 }
1677
1678
1679
1680 template <typename T>
1681 void
1682 Future<T>::wait()
1683 {
1684 if (is_done == false)
1685 {
1686 wait_function();
1687
1688 is_done = true;
1689 }
1690 }
1691
1692
1693 template <typename T>
1694 T
1695 Future<T>::get()
1696 {
1697 Assert(get_was_called == false,
1698 ExcMessage(
1699 "You can't call get() more than once on a Future object."));
1700 get_was_called = true;
1701
1702 wait();
1703 return get_and_cleanup_function();
1704 }
1705
1706
1707
1708 template <typename T, unsigned int N>
1709 void
1710 sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1711 {
1712 internal::all_reduce(MPI_SUM,
1713 ArrayView<const T>(values, N),
1714 mpi_communicator,
1715 ArrayView<T>(sums, N));
1716 }
1717
1718
1719
1720 template <typename T, unsigned int N>
1721 void
1722 max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1723 {
1724 internal::all_reduce(MPI_MAX,
1725 ArrayView<const T>(values, N),
1726 mpi_communicator,
1727 ArrayView<T>(maxima, N));
1728 }
1729
1730
1731
1732 template <typename T, unsigned int N>
1733 void
1734 min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1735 {
1736 internal::all_reduce(MPI_MIN,
1737 ArrayView<const T>(values, N),
1738 mpi_communicator,
1739 ArrayView<T>(minima, N));
1740 }
1741
1742
1743
1744 template <typename T, unsigned int N>
1745 void
1746 logical_or(const T (&values)[N],
1747 const MPI_Comm mpi_communicator,
1748 T (&results)[N])
1749 {
1750 static_assert(std::is_integral_v<T>,
1751 "The MPI_LOR operation only allows integral data types.");
1752
1753 internal::all_reduce(MPI_LOR,
1754 ArrayView<const T>(values, N),
1755 mpi_communicator,
1756 ArrayView<T>(results, N));
1757 }
1758
1759
1760
1761 template <typename T>
1762 std::map<unsigned int, T>
1764 const std::map<unsigned int, T> &objects_to_send)
1765 {
1766# ifndef DEAL_II_WITH_MPI
1767 (void)comm;
1768 Assert(objects_to_send.size() < 2,
1769 ExcMessage("Cannot send to more than one processor."));
1770 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1771 objects_to_send.empty(),
1772 ExcMessage("Can only send to myself or to nobody."));
1773 return objects_to_send;
1774# else
1775 const auto my_proc = this_mpi_process(comm);
1776
1777 std::map<unsigned int, T> received_objects;
1778
1779 std::vector<unsigned int> send_to;
1780 send_to.reserve(objects_to_send.size());
1781 for (const auto &m : objects_to_send)
1782 if (m.first == my_proc)
1783 received_objects[my_proc] = m.second;
1784 else
1785 send_to.emplace_back(m.first);
1786
1787 const unsigned int n_expected_incoming_messages =
1789
1790 // Protect the following communication:
1791 static CollectiveMutex mutex;
1792 CollectiveMutex::ScopedLock lock(mutex, comm);
1793
1794 // If we have something to send, or we expect something from other
1795 // processors, we need to visit one of the two scopes below. Otherwise,
1796 // no other action is required by this mpi process, and we can safely
1797 // return.
1798 if (send_to.empty() && n_expected_incoming_messages == 0)
1799 return received_objects;
1800
1801 const int mpi_tag =
1802 internal::Tags::compute_point_to_point_communication_pattern;
1803
1804 // Sending buffers
1805 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1806 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1807 {
1808 unsigned int i = 0;
1809 for (const auto &rank_obj : objects_to_send)
1810 if (rank_obj.first != my_proc)
1811 {
1812 const auto &rank = rank_obj.first;
1813 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1814 /*allow_compression=*/false);
1815 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1816 buffers_to_send[i].size(),
1817 MPI_CHAR,
1818 rank,
1819 mpi_tag,
1820 comm,
1821 &buffer_send_requests[i]);
1822 AssertThrowMPI(ierr);
1823 ++i;
1824 }
1825 }
1826
1827 // Fill the output map
1828 {
1829 std::vector<char> buffer;
1830 // We do this on a first come/first served basis
1831 for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1832 {
1833 // Probe what's going on. Take data from the first available sender
1834 MPI_Status status;
1835 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1836 AssertThrowMPI(ierr);
1837
1838 // Length of the message
1839 int len;
1840 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1841 AssertThrowMPI(ierr);
1842 buffer.resize(len);
1843
1844 // Source rank
1845 const unsigned int rank = status.MPI_SOURCE;
1846
1847 // Actually receive the message
1848 ierr = MPI_Recv(buffer.data(),
1849 len,
1850 MPI_CHAR,
1851 status.MPI_SOURCE,
1852 status.MPI_TAG,
1853 comm,
1854 MPI_STATUS_IGNORE);
1855 AssertThrowMPI(ierr);
1856 Assert(received_objects.find(rank) == received_objects.end(),
1858 "I should not receive again from this rank"));
1859 received_objects[rank] =
1860 Utilities::unpack<T>(buffer,
1861 /*allow_compression=*/false);
1862 }
1863 }
1864
1865 // Wait to have sent all objects.
1866 const int ierr = MPI_Waitall(send_to.size(),
1867 buffer_send_requests.data(),
1868 MPI_STATUSES_IGNORE);
1869 AssertThrowMPI(ierr);
1870
1871 return received_objects;
1872# endif // deal.II with MPI
1873 }
1874
1875
1876
1877 template <typename T, typename>
1878 std::pair<T, T>
1879 partial_and_total_sum(const T &value, const MPI_Comm comm)
1880 {
1881# ifndef DEAL_II_WITH_MPI
1882 (void)comm;
1883 return {0, value};
1884# else
1886 return {0, value};
1887 else
1888 {
1889 T prefix = {};
1890
1891 // First obtain every process's prefix sum:
1892 int ierr =
1893 MPI_Exscan(&value,
1894 &prefix,
1895 1,
1896 Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
1897 MPI_SUM,
1898 comm);
1899 AssertThrowMPI(ierr);
1900
1901 // Then we also need the total sum. We could obtain it by
1902 // calling Utilities::MPI::sum(), but it is cheaper if we
1903 // broadcast it from the last process, which can compute it
1904 // from its own prefix sum plus its own value.
1906 comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
1907
1908 return {prefix, sum};
1909 }
1910# endif
1911 }
1912
1913
1914
1915 template <typename T>
1916 std::vector<T>
1917 all_gather(const MPI_Comm comm, const T &object)
1918 {
1919 if (job_supports_mpi() == false)
1920 return {object};
1921
1922# ifndef DEAL_II_WITH_MPI
1923 (void)comm;
1924 std::vector<T> v(1, object);
1925 return v;
1926# else
1928
1929 std::vector<char> buffer = Utilities::pack(object);
1930
1931 int n_local_data = buffer.size();
1932
1933 // Vector to store the size of loc_data_array for every process
1934 std::vector<int> size_all_data(n_procs, 0);
1935
1936 // Exchanging the size of each buffer
1937 int ierr = MPI_Allgather(
1938 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1939 AssertThrowMPI(ierr);
1940
1941 // Now computing the displacement, relative to recvbuf,
1942 // at which to store the incoming buffer
1943 std::vector<int> rdispls(n_procs);
1944 rdispls[0] = 0;
1945 for (unsigned int i = 1; i < n_procs; ++i)
1946 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1947
1948 // Step 3: exchange the buffer:
1949 std::vector<char> received_unrolled_buffer(rdispls.back() +
1950 size_all_data.back());
1951
1952 ierr = MPI_Allgatherv(buffer.data(),
1953 n_local_data,
1954 MPI_CHAR,
1955 received_unrolled_buffer.data(),
1956 size_all_data.data(),
1957 rdispls.data(),
1958 MPI_CHAR,
1959 comm);
1960 AssertThrowMPI(ierr);
1961
1962 std::vector<T> received_objects(n_procs);
1963 for (unsigned int i = 0; i < n_procs; ++i)
1964 {
1965 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1966 rdispls[i],
1967 received_unrolled_buffer.begin() +
1968 rdispls[i] + size_all_data[i]);
1969 received_objects[i] = Utilities::unpack<T>(local_buffer);
1970 }
1971
1972 return received_objects;
1973# endif
1974 }
1975
1976
1977
1978 template <typename T>
1979 std::vector<T>
1980 gather(const MPI_Comm comm,
1981 const T &object_to_send,
1982 const unsigned int root_process)
1983 {
1984# ifndef DEAL_II_WITH_MPI
1985 (void)comm;
1986 (void)root_process;
1987 std::vector<T> v(1, object_to_send);
1988 return v;
1989# else
1992
1993 AssertIndexRange(root_process, n_procs);
1994
1995 std::vector<char> buffer = Utilities::pack(object_to_send);
1996 int n_local_data = buffer.size();
1997
1998 // Vector to store the size of loc_data_array for every process
1999 // only the root process needs to allocate memory for that purpose
2000 std::vector<int> size_all_data;
2001 if (my_rank == root_process)
2002 size_all_data.resize(n_procs, 0);
2003
2004 // Exchanging the size of each buffer
2005 int ierr = MPI_Gather(&n_local_data,
2006 1,
2007 MPI_INT,
2008 size_all_data.data(),
2009 1,
2010 MPI_INT,
2011 root_process,
2012 comm);
2013 AssertThrowMPI(ierr);
2014
2015 // Now computing the displacement, relative to recvbuf,
2016 // at which to store the incoming buffer; only for root
2017 std::vector<int> rdispls;
2018 if (my_rank == root_process)
2019 {
2020 rdispls.resize(n_procs, 0);
2021 for (unsigned int i = 1; i < n_procs; ++i)
2022 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2023 }
2024 // exchange the buffer:
2025 std::vector<char> received_unrolled_buffer;
2026 if (my_rank == root_process)
2027 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2028
2029 ierr = MPI_Gatherv(buffer.data(),
2030 n_local_data,
2031 MPI_CHAR,
2032 received_unrolled_buffer.data(),
2033 size_all_data.data(),
2034 rdispls.data(),
2035 MPI_CHAR,
2036 root_process,
2037 comm);
2038 AssertThrowMPI(ierr);
2039
2040 std::vector<T> received_objects;
2041
2042 if (my_rank == root_process)
2043 {
2044 received_objects.resize(n_procs);
2045
2046 for (unsigned int i = 0; i < n_procs; ++i)
2047 {
2048 const std::vector<char> local_buffer(
2049 received_unrolled_buffer.begin() + rdispls[i],
2050 received_unrolled_buffer.begin() + rdispls[i] +
2051 size_all_data[i]);
2052 received_objects[i] = Utilities::unpack<T>(local_buffer);
2053 }
2054 }
2055 return received_objects;
2056# endif
2057 }
2058
2059
2060
2061 template <typename T>
2062 T
2063 scatter(const MPI_Comm comm,
2064 const std::vector<T> &objects_to_send,
2065 const unsigned int root_process)
2066 {
2067# ifndef DEAL_II_WITH_MPI
2068 (void)comm;
2069 (void)root_process;
2070
2071 AssertDimension(objects_to_send.size(), 1);
2072
2073 return objects_to_send[0];
2074# else
2077
2078 AssertIndexRange(root_process, n_procs);
2080 (my_rank != root_process && objects_to_send.empty()) ||
2081 objects_to_send.size() == n_procs,
2082 ExcMessage(
2083 "The number of objects to be scattered must correspond to the number processes."));
2084
2085 std::vector<char> send_buffer;
2086 std::vector<int> send_counts;
2087 std::vector<int> send_displacements;
2088
2089 if (my_rank == root_process)
2090 {
2091 send_counts.resize(n_procs, 0);
2092 send_displacements.resize(n_procs + 1, 0);
2093
2094 for (unsigned int i = 0; i < n_procs; ++i)
2095 {
2096 const auto packed_data = Utilities::pack(objects_to_send[i]);
2097 send_buffer.insert(send_buffer.end(),
2098 packed_data.begin(),
2099 packed_data.end());
2100 send_counts[i] = packed_data.size();
2101 }
2102
2103 for (unsigned int i = 0; i < n_procs; ++i)
2104 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2105 }
2106
2107 int n_local_data;
2108 int ierr = MPI_Scatter(send_counts.data(),
2109 1,
2110 MPI_INT,
2111 &n_local_data,
2112 1,
2113 MPI_INT,
2114 root_process,
2115 comm);
2116 AssertThrowMPI(ierr);
2117
2118 std::vector<char> recv_buffer(n_local_data);
2119
2120 ierr = MPI_Scatterv(send_buffer.data(),
2121 send_counts.data(),
2122 send_displacements.data(),
2123 MPI_CHAR,
2124 recv_buffer.data(),
2125 n_local_data,
2126 MPI_CHAR,
2127 root_process,
2128 comm);
2129 AssertThrowMPI(ierr);
2130
2131 return Utilities::unpack<T>(recv_buffer);
2132# endif
2133 }
2134
2135
2136 template <typename T>
2137 void
2138 broadcast(T *buffer,
2139 const std::size_t count,
2140 const unsigned int root,
2141 const MPI_Comm comm)
2142 {
2143# ifndef DEAL_II_WITH_MPI
2144 (void)buffer;
2145 (void)count;
2146 (void)root;
2147 (void)comm;
2148# else
2149 Assert(root < n_mpi_processes(comm),
2150 ExcMessage("Invalid root rank specified."));
2151
2152 // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2153 // iteration:
2154 const size_t max_send_count = std::numeric_limits<signed int>::max();
2155
2156 size_t total_sent_count = 0;
2157 while (total_sent_count < count)
2158 {
2159 const size_t current_count =
2160 std::min(count - total_sent_count, max_send_count);
2161
2162 const int ierr = MPI_Bcast(buffer + total_sent_count,
2163 current_count,
2164 mpi_type_id_for_type<decltype(*buffer)>,
2165 root,
2166 comm);
2167 AssertThrowMPI(ierr);
2168 total_sent_count += current_count;
2169 }
2170# endif
2171 }
2172
2173
2174
2175 template <typename T>
2176 T
2177 broadcast(const MPI_Comm comm,
2178 const T &object_to_send,
2179 const unsigned int root_process)
2180 {
2181# ifndef DEAL_II_WITH_MPI
2182 (void)comm;
2183 (void)root_process;
2184 return object_to_send;
2185# else
2187 AssertIndexRange(root_process, n_procs);
2188 (void)n_procs;
2189
2190 if constexpr (is_mpi_type<T>)
2191 {
2192 T object = object_to_send;
2193 int ierr =
2194 MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2195 AssertThrowMPI(ierr);
2196
2197 return object;
2198 }
2199 else
2200 {
2201 std::vector<char> buffer;
2202 std::size_t buffer_size = numbers::invalid_size_type;
2203
2204 // On the root process, pack the data and determine what the
2205 // buffer size needs to be.
2206 if (this_mpi_process(comm) == root_process)
2207 {
2208 buffer = Utilities::pack(object_to_send, false);
2209 buffer_size = buffer.size();
2210 }
2211
2212 // Exchange the size of buffer
2213 int ierr = MPI_Bcast(&buffer_size,
2214 1,
2215 mpi_type_id_for_type<decltype(buffer_size)>,
2216 root_process,
2217 comm);
2218 AssertThrowMPI(ierr);
2219
2220 // If not on the root process, correctly size the buffer to
2221 // receive the data, then do exactly that.
2222 if (this_mpi_process(comm) != root_process)
2223 buffer.resize(buffer_size);
2224
2225 broadcast(buffer.data(), buffer_size, root_process, comm);
2226
2227 if (Utilities::MPI::this_mpi_process(comm) == root_process)
2228 return object_to_send;
2229 else
2230 return Utilities::unpack<T>(buffer, false);
2231 }
2232# endif
2233 }
2234
2235
2236
2237 template <typename T>
2238 Future<void>
2239 isend(const T &object,
2240 MPI_Comm communicator,
2241 const unsigned int target_rank,
2242 const unsigned int mpi_tag)
2243 {
2244# ifndef DEAL_II_WITH_MPI
2245 Assert(false, ExcNeedsMPI());
2246 (void)object;
2247 (void)communicator;
2248 (void)target_rank;
2249 (void)mpi_tag;
2250 return Future<void>([]() {}, []() {});
2251# else
2252 // Create a pointer to a send buffer into which we pack the object
2253 // to be sent. The buffer will be released by the Future object once
2254 // the send has been verified to have succeeded.
2255 //
2256 // Conceptually, we would like this send buffer to be a
2257 // std::unique_ptr object whose ownership is later handed over
2258 // to the cleanup function. That has the disadvantage that the
2259 // cleanup object is a non-copyable lambda capture, leading to
2260 // awkward semantics. Instead, we use a std::shared_ptr; we move
2261 // this shared pointer into the cleanup function, which means
2262 // that there is exactly one shared pointer who owns the buffer
2263 // at any given time, though the latter is not an important
2264 // optimization.
2265 std::shared_ptr<std::vector<char>> send_buffer =
2266 std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2267
2268 // Now start the send, and store the result in a request object that
2269 // we can then wait for later:
2270 MPI_Request request;
2271 const int ierr =
2272 MPI_Isend(send_buffer->data(),
2273 send_buffer->size(),
2274 mpi_type_id_for_type<decltype(*send_buffer->data())>,
2275 target_rank,
2276 mpi_tag,
2277 communicator,
2278 &request);
2279 AssertThrowMPI(ierr);
2280
2281 // Then return a std::future-like object that has a wait()
2282 // function one can use to wait for the communication to finish,
2283 // and that has a cleanup function to be called at some point
2284 // after that makes sure the send buffer gets deallocated. This
2285 // cleanup function takes over ownership of the send buffer.
2286 //
2287 // Note that the body of the lambda function of the clean-up
2288 // function could be left empty. If that were so, once the
2289 // lambda function object goes out of scope, the 'send_buffer'
2290 // member of the closure object goes out of scope as well and so
2291 // the send_buffer is destroyed. But we may want to release the
2292 // buffer itself as early as possible, and so we clear the
2293 // buffer when the Future::get() function is called.
2294 auto wait = [request]() mutable {
2295 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2296 AssertThrowMPI(ierr);
2297 };
2298 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2299 send_buffer->clear();
2300 };
2301 return Future<void>(wait, cleanup);
2302# endif
2303 }
2304
2305
2306
2307 template <typename T>
2308 Future<T>
2309 irecv(MPI_Comm communicator,
2310 const unsigned int source_rank,
2311 const unsigned int mpi_tag)
2312 {
2313# ifndef DEAL_II_WITH_MPI
2314 Assert(false, ExcNeedsMPI());
2315 (void)communicator;
2316 (void)source_rank;
2317 (void)mpi_tag;
2318 return Future<void>([]() {}, []() { return T{}; });
2319# else
2320 // Use a 'probe' operation for the 'wait' operation of the
2321 // Future this function returns. It will trigger whenever we get
2322 // the incoming message. Later, once we have received the message, we
2323 // can query its size and allocate a receiver buffer.
2324 //
2325 // Since we may be waiting for multiple messages from the same
2326 // incoming process (with possibly the same tag -- we can't
2327 // know), we must make sure that the 'probe' operation we have
2328 // here (and which we use to determine the buffer size) matches
2329 // the 'recv' operation with which we actually get the data
2330 // later on. This is exactly what the 'MPI_Mprobe' function and
2331 // its 'I'mmediate variant is there for, coupled with the
2332 // 'MPI_Mrecv' call that would put into the clean-up function
2333 // below.
2334 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2335 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2336
2337 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2338 const int ierr = MPI_Mprobe(
2339 source_rank, mpi_tag, communicator, message.get(), status.get());
2340 AssertThrowMPI(ierr);
2341 };
2342
2343
2344 // Now also define the function that actually gets the data:
2345 auto get = [status, message]() {
2346 int number_amount;
2347 int ierr;
2348 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2349 AssertThrowMPI(ierr);
2350
2351 std::vector<char> receive_buffer(number_amount);
2352
2353 // Then actually get the data, using the matching MPI_Mrecv to the above
2354 // MPI_Mprobe:
2355 ierr = MPI_Mrecv(receive_buffer.data(),
2356 number_amount,
2357 mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2358 message.get(),
2359 status.get());
2360 AssertThrowMPI(ierr);
2361
2362 // Return the unpacked object:
2363 return Utilities::unpack<T>(receive_buffer, false);
2364 };
2365
2366 return Future<T>(wait, get);
2367# endif
2368 }
2369
2370
2371
2372# ifdef DEAL_II_WITH_MPI
2373 template <class Iterator, typename Number>
2374 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2375 mean_and_standard_deviation(const Iterator begin,
2376 const Iterator end,
2377 const MPI_Comm comm)
2378 {
2379 // below we do simple and straight-forward implementation. More elaborate
2380 // options are:
2381 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2382 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2383 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2384 using Std = typename numbers::NumberTraits<Number>::real_type;
2385 const Number sum = std::accumulate(begin, end, Number(0.));
2386
2387 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2388 Assert(size > 0, ExcDivideByZero());
2389 const Number mean =
2390 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2391 Std sq_sum = 0.;
2392 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2394 });
2395 sq_sum = Utilities::MPI::sum(sq_sum, comm);
2396 return std::make_pair(mean,
2397 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2398 }
2399# endif
2400
2401#endif
2402 } // end of namespace MPI
2403} // end of namespace Utilities
2404
2405
2407
2408#endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition mpi.h:350
void lock(const MPI_Comm comm)
Definition mpi.cc:1993
void unlock(const MPI_Comm comm)
Definition mpi.cc:2030
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition mpi.h:265
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition mpi.h:534
std::function< T()> get_and_cleanup_function
Definition mpi.h:535
Future(W &&wait_operation, G &&get_and_cleanup_operation)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:35
#define DEAL_II_DISABLE_EXTRA_DIAGNOSTICS
Definition config.h:595
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:36
#define DEAL_II_ENABLE_EXTRA_DIAGNOSTICS
Definition config.h:638
Point< 2 > second
Definition grid_out.cc:4633
Point< 2 > first
Definition grid_out.cc:4632
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
const unsigned int my_rank
Definition mpi.cc:929
const IndexSet & indices_to_look_up
Definition mpi.cc:919
std::vector< index_type > data
Definition mpi.cc:746
std::size_t size
Definition mpi.cc:745
const MPI_Comm comm
Definition mpi.cc:924
const IndexSet & owned_indices
Definition mpi.cc:913
types::global_dof_index locally_owned_size
Definition mpi.cc:833
const unsigned int n_procs
Definition mpi.cc:935
constexpr char T
MPI_Datatype mpi_type_id(const bool *)
Definition mpi.h:1469
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:171
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition mpi.cc:1886
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:215
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:99
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1835
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:114
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:200
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:259
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:150
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:692
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1634
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:161
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:130
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:371
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:73
constexpr bool is_mpi_type
Definition mpi.h:115
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1382
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:49
constexpr types::global_dof_index invalid_size_type
Definition types.h:250
constexpr unsigned int invalid_unsigned_int
Definition types.h:238
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int max_index
Definition mpi.h:946
unsigned int min_index
Definition mpi.h:936
static constexpr real_type abs_square(const number &x)
Definition numbers.h:565
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)