deal.II version GIT relicensing-2287-g6548a49e0a 2024-12-20 18:30:00+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_h
16#define dealii_mpi_h
17
18#include <deal.II/base/config.h>
19
27
28#include <complex>
29#include <limits>
30#include <map>
31#include <numeric>
32#include <vector>
33
34
35
49#ifdef DEAL_II_WITH_MPI
50# define DEAL_II_MPI_CONST_CAST(expr) (expr)
51#endif
52
53
54
56
57
58// Forward type declarations to allow MPI sums over tensorial types
59#ifndef DOXYGEN
60template <int rank, int dim, typename Number>
61class Tensor;
62template <int rank, int dim, typename Number>
63class SymmetricTensor;
64template <typename Number>
65class SparseMatrix;
66class IndexSet;
67#endif
68
69namespace Utilities
70{
85 const unsigned int my_partition_id,
86 const unsigned int n_partitions,
87 const types::global_dof_index total_size);
88
96 namespace MPI
97 {
106 template <typename T>
107 constexpr bool is_mpi_type = is_same_as_any_of<T,
108 char,
109 signed short,
110 signed int,
111 signed long,
112 signed long long,
113 signed char,
114 unsigned char,
115 unsigned short,
116 unsigned int,
117 unsigned long int,
118 unsigned long long,
119 float,
120 double,
121 long double,
122 bool,
123 std::complex<float>,
124 std::complex<double>,
125 std::complex<long double>,
126 wchar_t>::value;
127
136 unsigned int
137 n_mpi_processes(const MPI_Comm mpi_communicator);
138
147 unsigned int
148 this_mpi_process(const MPI_Comm mpi_communicator);
149
154 std::vector<unsigned int>
156 const MPI_Comm comm_small);
157
179 std::vector<unsigned int>
181 const MPI_Comm mpi_comm,
182 const std::vector<unsigned int> &destinations);
183
203 unsigned int
205 const MPI_Comm mpi_comm,
206 const std::vector<unsigned int> &destinations);
207
225 duplicate_communicator(const MPI_Comm mpi_communicator);
226
236 void
237 free_communicator(MPI_Comm mpi_communicator);
238
252 {
253 public:
257 explicit DuplicatedCommunicator(const MPI_Comm communicator)
258 : comm(duplicate_communicator(communicator))
259 {}
260
265
273
278 operator*() const
279 {
280 return comm;
281 }
282
283
289
290 private:
295 };
296
297
298
329 {
330 public:
337 {
338 public:
343 : mutex(mutex)
344 , comm(comm)
345 {
346 mutex.lock(comm);
347 }
348
353 {
355 }
356
357 private:
366 };
367
372
377
384 void
385 lock(const MPI_Comm comm);
386
393 void
394 unlock(const MPI_Comm comm);
395
396 private:
400 bool locked;
401
405 MPI_Request request;
406 };
407
408
409
456 template <typename T>
457 class Future
458 {
459 public:
464 template <typename W, typename G>
465 Future(W &&wait_operation, G &&get_and_cleanup_operation);
466
472 Future(const Future &) = delete;
473
477 Future(Future &&) noexcept = default;
478
483
489 Future &
490 operator=(const Future &) = delete;
491
495 Future &
496 operator=(Future &&) noexcept = default;
497
505 void
507
519 T
521
522 private:
526 std::function<void()> wait_function;
528
533
538 };
539
548 std::vector<IndexSet>
550 const MPI_Comm comm,
551 const types::global_dof_index locally_owned_size);
552
562 const MPI_Comm comm,
563 const types::global_dof_index total_size);
564
565#ifdef DEAL_II_WITH_MPI
581 template <class Iterator, typename Number = long double>
582 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
583 mean_and_standard_deviation(const Iterator begin,
584 const Iterator end,
585 const MPI_Comm comm);
586#endif
587
588
636 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
637 create_mpi_data_type_n_bytes(const std::size_t n_bytes);
638
658 template <typename T>
659 T
660 sum(const T &t, const MPI_Comm mpi_communicator);
661
671 template <typename T, typename U>
672 void
673 sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
674
684 template <typename T>
685 void
686 sum(const ArrayView<const T> &values,
687 const MPI_Comm mpi_communicator,
688 const ArrayView<T> &sums);
689
695 template <int rank, int dim, typename Number>
698 const MPI_Comm mpi_communicator);
699
705 template <int rank, int dim, typename Number>
708 const MPI_Comm mpi_communicator);
709
718 template <typename Number>
719 void
721 const MPI_Comm mpi_communicator,
722 SparseMatrix<Number> &global);
723
743 template <typename T>
744 T
745 max(const T &t, const MPI_Comm mpi_communicator);
746
756 template <typename T, typename U>
757 void
758 max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
759
769 template <typename T>
770 void
771 max(const ArrayView<const T> &values,
772 const MPI_Comm mpi_communicator,
773 const ArrayView<T> &maxima);
774
794 template <typename T>
795 T
796 min(const T &t, const MPI_Comm mpi_communicator);
797
807 template <typename T, typename U>
808 void
809 min(const T &values, const MPI_Comm mpi_communicator, U &minima);
810
820 template <typename T>
821 void
822 min(const ArrayView<const T> &values,
823 const MPI_Comm mpi_communicator,
824 const ArrayView<T> &minima);
825
849 template <typename T>
850 T
851 logical_or(const T &t, const MPI_Comm mpi_communicator);
852
867 template <typename T, typename U>
868 void
869 logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
870
880 template <typename T>
881 void
883 const MPI_Comm mpi_communicator,
884 const ArrayView<T> &results);
885
901 {
906 double sum;
907
912 double min;
913
918 double max;
919
928 unsigned int min_index;
929
938 unsigned int max_index;
939
944 double avg;
945 };
946
962 min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
963
975 std::vector<MinMaxAvg>
976 min_max_avg(const std::vector<double> &my_value,
977 const MPI_Comm mpi_communicator);
978
979
992 void
993 min_max_avg(const ArrayView<const double> &my_values,
994 const ArrayView<MinMaxAvg> &result,
995 const MPI_Comm mpi_communicator);
996
997
1042 {
1043 public:
1090 int &argc,
1091 char **&argv,
1092 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1093
1099 };
1100
1112 bool
1114
1132 template <typename T>
1133 std::map<unsigned int, T>
1135 const std::map<unsigned int, T> &objects_to_send);
1136
1150 template <typename T>
1151 std::vector<T>
1152 all_gather(const MPI_Comm comm, const T &object_to_send);
1153
1169 template <typename T>
1170 std::vector<T>
1172 const T &object_to_send,
1173 const unsigned int root_process = 0);
1174
1189 template <typename T>
1190 T
1192 const std::vector<T> &objects_to_send,
1193 const unsigned int root_process = 0);
1194
1233 template <typename T>
1234 T
1236 const T &object_to_send,
1237 const unsigned int root_process = 0);
1238
1255 template <typename T>
1256 void
1257 broadcast(T *buffer,
1258 const size_t count,
1259 const unsigned int root,
1260 const MPI_Comm comm);
1261
1274 template <typename T>
1275 T
1276 reduce(const T &local_value,
1277 const MPI_Comm comm,
1278 const std::function<T(const T &, const T &)> &combiner,
1279 const unsigned int root_process = 0);
1280
1281
1293 template <typename T, typename = std::enable_if_t<is_mpi_type<T> == true>>
1294 std::pair<T, T>
1295 partial_and_total_sum(const T &value, const MPI_Comm comm);
1296
1297
1307 template <typename T>
1308 T
1309 all_reduce(const T &local_value,
1310 const MPI_Comm comm,
1311 const std::function<T(const T &, const T &)> &combiner);
1312
1313
1334 template <typename T>
1336 isend(const T &object,
1337 MPI_Comm communicator,
1338 const unsigned int target_rank,
1339 const unsigned int mpi_tag = 0);
1340
1341
1358 template <typename T>
1359 Future<T>
1360 irecv(MPI_Comm communicator,
1361 const unsigned int source_rank,
1362 const unsigned int mpi_tag = 0);
1363
1364
1408 std::vector<unsigned int>
1411 const MPI_Comm comm);
1412
1423 std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
1426 const MPI_Comm &comm);
1427
1435 template <typename T>
1436 std::vector<T>
1437 compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1438
1442 template <typename T>
1443 std::set<T>
1444 compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1445
1446
1447
1448 /* --------------------------- inline functions ------------------------- */
1449
1450 namespace internal
1451 {
1457 namespace MPIDataTypes
1458 {
1459#ifdef DEAL_II_WITH_MPI
1460 inline MPI_Datatype
1461 mpi_type_id(const bool *)
1462 {
1463 return MPI_CXX_BOOL;
1464 }
1465
1466
1467
1468 inline MPI_Datatype
1469 mpi_type_id(const char *)
1470 {
1471 return MPI_CHAR;
1472 }
1473
1474
1475
1476 inline MPI_Datatype
1477 mpi_type_id(const signed char *)
1478 {
1479 return MPI_SIGNED_CHAR;
1480 }
1481
1482
1483
1484 inline MPI_Datatype
1485 mpi_type_id(const wchar_t *)
1486 {
1487 return MPI_WCHAR;
1488 }
1489
1490
1491
1492 inline MPI_Datatype
1493 mpi_type_id(const short *)
1494 {
1495 return MPI_SHORT;
1496 }
1497
1498
1499
1500 inline MPI_Datatype
1501 mpi_type_id(const int *)
1502 {
1503 return MPI_INT;
1504 }
1505
1506
1507
1508 inline MPI_Datatype
1509 mpi_type_id(const long int *)
1510 {
1511 return MPI_LONG;
1512 }
1513
1514
1515
1516 inline MPI_Datatype
1517 mpi_type_id(const long long int *)
1518 {
1519 return MPI_LONG_LONG;
1520 }
1521
1522
1523
1524 inline MPI_Datatype
1525 mpi_type_id(const unsigned char *)
1526 {
1527 return MPI_UNSIGNED_CHAR;
1528 }
1529
1530
1531
1532 inline MPI_Datatype
1533 mpi_type_id(const unsigned short *)
1534 {
1535 return MPI_UNSIGNED_SHORT;
1536 }
1537
1538
1539
1540 inline MPI_Datatype
1541 mpi_type_id(const unsigned int *)
1542 {
1543 return MPI_UNSIGNED;
1544 }
1545
1546
1547
1548 inline MPI_Datatype
1549 mpi_type_id(const unsigned long int *)
1550 {
1551 return MPI_UNSIGNED_LONG;
1552 }
1553
1554
1555
1556 inline MPI_Datatype
1557 mpi_type_id(const unsigned long long int *)
1558 {
1559 return MPI_UNSIGNED_LONG_LONG;
1560 }
1561
1562
1563
1564 inline MPI_Datatype
1565 mpi_type_id(const float *)
1566 {
1567 return MPI_FLOAT;
1568 }
1569
1570
1571
1572 inline MPI_Datatype
1573 mpi_type_id(const double *)
1574 {
1575 return MPI_DOUBLE;
1576 }
1577
1578
1579
1580 inline MPI_Datatype
1581 mpi_type_id(const long double *)
1582 {
1583 return MPI_LONG_DOUBLE;
1584 }
1585
1586
1587
1588 inline MPI_Datatype
1589 mpi_type_id(const std::complex<float> *)
1590 {
1591 return MPI_COMPLEX;
1592 }
1593
1594
1595
1596 inline MPI_Datatype
1597 mpi_type_id(const std::complex<double> *)
1598 {
1599 return MPI_DOUBLE_COMPLEX;
1600 }
1601#endif
1602 } // namespace MPIDataTypes
1603 } // namespace internal
1604
1605
1606
1607#ifdef DEAL_II_WITH_MPI
1625 template <typename T>
1626 inline const MPI_Datatype mpi_type_id_for_type =
1628 static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1629#endif
1630
1631#ifndef DOXYGEN
1632 namespace internal
1633 {
1634 // declaration for an internal function that lives in mpi.templates.h
1635 template <typename T>
1636 void
1637 all_reduce(const MPI_Op &mpi_op,
1638 const ArrayView<const T> &values,
1639 const MPI_Comm mpi_communicator,
1640 const ArrayView<T> &output);
1641 } // namespace internal
1642
1643
1644 template <typename T>
1645 template <typename W, typename G>
1646 Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1647 : wait_function(wait_operation)
1648 , get_and_cleanup_function(get_and_cleanup_operation)
1649 , is_done(false)
1650 , get_was_called(false)
1651 {}
1652
1653
1654
1655 template <typename T>
1656 Future<T>::~Future()
1657 {
1658 // If there is a clean-up function, and if it has not been
1659 // called yet, then do so. Note that we may not have a
1660 // clean-up function (not even an empty one) if the current
1661 // object has been moved from, into another object, and as
1662 // a consequence the std::function objects are now empty
1663 // even though they were initialized in the constructor.
1664 // (A std::function object whose object is a an empty lambda
1665 // function, [](){}, is not an empty std::function object.)
1666 if ((get_was_called == false) && get_and_cleanup_function)
1667 get();
1668 }
1669
1670
1671
1672 template <typename T>
1673 void
1674 Future<T>::wait()
1675 {
1676 if (is_done == false)
1677 {
1678 wait_function();
1679
1680 is_done = true;
1681 }
1682 }
1683
1684
1685 template <typename T>
1686 T
1687 Future<T>::get()
1688 {
1689 Assert(get_was_called == false,
1690 ExcMessage(
1691 "You can't call get() more than once on a Future object."));
1692 get_was_called = true;
1693
1694 wait();
1695 return get_and_cleanup_function();
1696 }
1697
1698
1699
1700 template <typename T, unsigned int N>
1701 void
1702 sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1703 {
1704 internal::all_reduce(MPI_SUM,
1705 ArrayView<const T>(values, N),
1706 mpi_communicator,
1707 ArrayView<T>(sums, N));
1708 }
1709
1710
1711
1712 template <typename T, unsigned int N>
1713 void
1714 max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1715 {
1716 internal::all_reduce(MPI_MAX,
1717 ArrayView<const T>(values, N),
1718 mpi_communicator,
1719 ArrayView<T>(maxima, N));
1720 }
1721
1722
1723
1724 template <typename T, unsigned int N>
1725 void
1726 min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1727 {
1728 internal::all_reduce(MPI_MIN,
1729 ArrayView<const T>(values, N),
1730 mpi_communicator,
1731 ArrayView<T>(minima, N));
1732 }
1733
1734
1735
1736 template <typename T, unsigned int N>
1737 void
1738 logical_or(const T (&values)[N],
1739 const MPI_Comm mpi_communicator,
1740 T (&results)[N])
1741 {
1742 static_assert(std::is_integral_v<T>,
1743 "The MPI_LOR operation only allows integral data types.");
1744
1745 internal::all_reduce(MPI_LOR,
1746 ArrayView<const T>(values, N),
1747 mpi_communicator,
1748 ArrayView<T>(results, N));
1749 }
1750
1751
1752
1753 template <typename T>
1754 std::map<unsigned int, T>
1756 const std::map<unsigned int, T> &objects_to_send)
1757 {
1758# ifndef DEAL_II_WITH_MPI
1759 (void)comm;
1760 Assert(objects_to_send.size() < 2,
1761 ExcMessage("Cannot send to more than one processor."));
1762 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1763 objects_to_send.empty(),
1764 ExcMessage("Can only send to myself or to nobody."));
1765 return objects_to_send;
1766# else
1767 const auto my_proc = this_mpi_process(comm);
1768
1769 std::map<unsigned int, T> received_objects;
1770
1771 std::vector<unsigned int> send_to;
1772 send_to.reserve(objects_to_send.size());
1773 for (const auto &m : objects_to_send)
1774 if (m.first == my_proc)
1775 received_objects[my_proc] = m.second;
1776 else
1777 send_to.emplace_back(m.first);
1778
1779 const unsigned int n_expected_incoming_messages =
1781
1782 // Protect the following communication:
1783 static CollectiveMutex mutex;
1784 CollectiveMutex::ScopedLock lock(mutex, comm);
1785
1786 // If we have something to send, or we expect something from other
1787 // processors, we need to visit one of the two scopes below. Otherwise,
1788 // no other action is required by this mpi process, and we can safely
1789 // return.
1790 if (send_to.empty() && n_expected_incoming_messages == 0)
1791 return received_objects;
1792
1793 const int mpi_tag =
1794 internal::Tags::compute_point_to_point_communication_pattern;
1795
1796 // Sending buffers
1797 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1798 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1799 {
1800 unsigned int i = 0;
1801 for (const auto &rank_obj : objects_to_send)
1802 if (rank_obj.first != my_proc)
1803 {
1804 const auto &rank = rank_obj.first;
1805 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1806 /*allow_compression=*/false);
1807 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1808 buffers_to_send[i].size(),
1809 MPI_CHAR,
1810 rank,
1811 mpi_tag,
1812 comm,
1813 &buffer_send_requests[i]);
1814 AssertThrowMPI(ierr);
1815 ++i;
1816 }
1817 }
1818
1819 // Fill the output map
1820 {
1821 std::vector<char> buffer;
1822 // We do this on a first come/first served basis
1823 for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1824 {
1825 // Probe what's going on. Take data from the first available sender
1826 MPI_Status status;
1827 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1828 AssertThrowMPI(ierr);
1829
1830 // Length of the message
1831 int len;
1832 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1833 AssertThrowMPI(ierr);
1834 buffer.resize(len);
1835
1836 // Source rank
1837 const unsigned int rank = status.MPI_SOURCE;
1838
1839 // Actually receive the message
1840 ierr = MPI_Recv(buffer.data(),
1841 len,
1842 MPI_CHAR,
1843 status.MPI_SOURCE,
1844 status.MPI_TAG,
1845 comm,
1846 MPI_STATUS_IGNORE);
1847 AssertThrowMPI(ierr);
1848 Assert(received_objects.find(rank) == received_objects.end(),
1850 "I should not receive again from this rank"));
1851 received_objects[rank] =
1852 Utilities::unpack<T>(buffer,
1853 /*allow_compression=*/false);
1854 }
1855 }
1856
1857 // Wait to have sent all objects.
1858 const int ierr = MPI_Waitall(send_to.size(),
1859 buffer_send_requests.data(),
1860 MPI_STATUSES_IGNORE);
1861 AssertThrowMPI(ierr);
1862
1863 return received_objects;
1864# endif // deal.II with MPI
1865 }
1866
1867
1868
1869 template <typename T, typename>
1870 std::pair<T, T>
1871 partial_and_total_sum(const T &value, const MPI_Comm comm)
1872 {
1873# ifndef DEAL_II_WITH_MPI
1874 (void)comm;
1875 return {0, value};
1876# else
1878 return {0, value};
1879 else
1880 {
1881 T prefix = {};
1882
1883 // First obtain every process's prefix sum:
1884 int ierr =
1885 MPI_Exscan(&value,
1886 &prefix,
1887 1,
1888 Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
1889 MPI_SUM,
1890 comm);
1891 AssertThrowMPI(ierr);
1892
1893 // Then we also need the total sum. We could obtain it by
1894 // calling Utilities::MPI::sum(), but it is cheaper if we
1895 // broadcast it from the last process, which can compute it
1896 // from its own prefix sum plus its own value.
1898 comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
1899
1900 return {prefix, sum};
1901 }
1902# endif
1903 }
1904
1905
1906
1907 template <typename T>
1908 std::vector<T>
1909 all_gather(const MPI_Comm comm, const T &object)
1910 {
1911 if (job_supports_mpi() == false)
1912 return {object};
1913
1914# ifndef DEAL_II_WITH_MPI
1915 (void)comm;
1916 std::vector<T> v(1, object);
1917 return v;
1918# else
1920
1921 std::vector<char> buffer = Utilities::pack(object);
1922
1923 int n_local_data = buffer.size();
1924
1925 // Vector to store the size of loc_data_array for every process
1926 std::vector<int> size_all_data(n_procs, 0);
1927
1928 // Exchanging the size of each buffer
1929 int ierr = MPI_Allgather(
1930 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
1931 AssertThrowMPI(ierr);
1932
1933 // Now computing the displacement, relative to recvbuf,
1934 // at which to store the incoming buffer
1935 std::vector<int> rdispls(n_procs);
1936 rdispls[0] = 0;
1937 for (unsigned int i = 1; i < n_procs; ++i)
1938 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1939
1940 // Step 3: exchange the buffer:
1941 std::vector<char> received_unrolled_buffer(rdispls.back() +
1942 size_all_data.back());
1943
1944 ierr = MPI_Allgatherv(buffer.data(),
1945 n_local_data,
1946 MPI_CHAR,
1947 received_unrolled_buffer.data(),
1948 size_all_data.data(),
1949 rdispls.data(),
1950 MPI_CHAR,
1951 comm);
1952 AssertThrowMPI(ierr);
1953
1954 std::vector<T> received_objects(n_procs);
1955 for (unsigned int i = 0; i < n_procs; ++i)
1956 {
1957 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1958 rdispls[i],
1959 received_unrolled_buffer.begin() +
1960 rdispls[i] + size_all_data[i]);
1961 received_objects[i] = Utilities::unpack<T>(local_buffer);
1962 }
1963
1964 return received_objects;
1965# endif
1966 }
1967
1968
1969
1970 template <typename T>
1971 std::vector<T>
1972 gather(const MPI_Comm comm,
1973 const T &object_to_send,
1974 const unsigned int root_process)
1975 {
1976# ifndef DEAL_II_WITH_MPI
1977 (void)comm;
1978 (void)root_process;
1979 std::vector<T> v(1, object_to_send);
1980 return v;
1981# else
1984
1985 AssertIndexRange(root_process, n_procs);
1986
1987 std::vector<char> buffer = Utilities::pack(object_to_send);
1988 int n_local_data = buffer.size();
1989
1990 // Vector to store the size of loc_data_array for every process
1991 // only the root process needs to allocate memory for that purpose
1992 std::vector<int> size_all_data;
1993 if (my_rank == root_process)
1994 size_all_data.resize(n_procs, 0);
1995
1996 // Exchanging the size of each buffer
1997 int ierr = MPI_Gather(&n_local_data,
1998 1,
1999 MPI_INT,
2000 size_all_data.data(),
2001 1,
2002 MPI_INT,
2003 root_process,
2004 comm);
2005 AssertThrowMPI(ierr);
2006
2007 // Now computing the displacement, relative to recvbuf,
2008 // at which to store the incoming buffer; only for root
2009 std::vector<int> rdispls;
2010 if (my_rank == root_process)
2011 {
2012 rdispls.resize(n_procs, 0);
2013 for (unsigned int i = 1; i < n_procs; ++i)
2014 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2015 }
2016 // exchange the buffer:
2017 std::vector<char> received_unrolled_buffer;
2018 if (my_rank == root_process)
2019 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2020
2021 ierr = MPI_Gatherv(buffer.data(),
2022 n_local_data,
2023 MPI_CHAR,
2024 received_unrolled_buffer.data(),
2025 size_all_data.data(),
2026 rdispls.data(),
2027 MPI_CHAR,
2028 root_process,
2029 comm);
2030 AssertThrowMPI(ierr);
2031
2032 std::vector<T> received_objects;
2033
2034 if (my_rank == root_process)
2035 {
2036 received_objects.resize(n_procs);
2037
2038 for (unsigned int i = 0; i < n_procs; ++i)
2039 {
2040 const std::vector<char> local_buffer(
2041 received_unrolled_buffer.begin() + rdispls[i],
2042 received_unrolled_buffer.begin() + rdispls[i] +
2043 size_all_data[i]);
2044 received_objects[i] = Utilities::unpack<T>(local_buffer);
2045 }
2046 }
2047 return received_objects;
2048# endif
2049 }
2050
2051
2052
2053 template <typename T>
2054 T
2055 scatter(const MPI_Comm comm,
2056 const std::vector<T> &objects_to_send,
2057 const unsigned int root_process)
2058 {
2059# ifndef DEAL_II_WITH_MPI
2060 (void)comm;
2061 (void)root_process;
2062
2063 AssertDimension(objects_to_send.size(), 1);
2064
2065 return objects_to_send[0];
2066# else
2069
2070 AssertIndexRange(root_process, n_procs);
2072 (my_rank != root_process && objects_to_send.empty()) ||
2073 objects_to_send.size() == n_procs,
2074 ExcMessage(
2075 "The number of objects to be scattered must correspond to the number processes."));
2076
2077 std::vector<char> send_buffer;
2078 std::vector<int> send_counts;
2079 std::vector<int> send_displacements;
2080
2081 if (my_rank == root_process)
2082 {
2083 send_counts.resize(n_procs, 0);
2084 send_displacements.resize(n_procs + 1, 0);
2085
2086 for (unsigned int i = 0; i < n_procs; ++i)
2087 {
2088 const auto packed_data = Utilities::pack(objects_to_send[i]);
2089 send_buffer.insert(send_buffer.end(),
2090 packed_data.begin(),
2091 packed_data.end());
2092 send_counts[i] = packed_data.size();
2093 }
2094
2095 for (unsigned int i = 0; i < n_procs; ++i)
2096 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2097 }
2098
2099 int n_local_data;
2100 int ierr = MPI_Scatter(send_counts.data(),
2101 1,
2102 MPI_INT,
2103 &n_local_data,
2104 1,
2105 MPI_INT,
2106 root_process,
2107 comm);
2108 AssertThrowMPI(ierr);
2109
2110 std::vector<char> recv_buffer(n_local_data);
2111
2112 ierr = MPI_Scatterv(send_buffer.data(),
2113 send_counts.data(),
2114 send_displacements.data(),
2115 MPI_CHAR,
2116 recv_buffer.data(),
2117 n_local_data,
2118 MPI_CHAR,
2119 root_process,
2120 comm);
2121 AssertThrowMPI(ierr);
2122
2123 return Utilities::unpack<T>(recv_buffer);
2124# endif
2125 }
2126
2127
2128 template <typename T>
2129 void
2130 broadcast(T *buffer,
2131 const size_t count,
2132 const unsigned int root,
2133 const MPI_Comm comm)
2134 {
2135# ifndef DEAL_II_WITH_MPI
2136 (void)buffer;
2137 (void)count;
2138 (void)root;
2139 (void)comm;
2140# else
2141 Assert(root < n_mpi_processes(comm),
2142 ExcMessage("Invalid root rank specified."));
2143
2144 // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2145 // iteration:
2146 const size_t max_send_count = std::numeric_limits<signed int>::max();
2147
2148 size_t total_sent_count = 0;
2149 while (total_sent_count < count)
2150 {
2151 const size_t current_count =
2152 std::min(count - total_sent_count, max_send_count);
2153
2154 const int ierr = MPI_Bcast(buffer + total_sent_count,
2155 current_count,
2156 mpi_type_id_for_type<decltype(*buffer)>,
2157 root,
2158 comm);
2159 AssertThrowMPI(ierr);
2160 total_sent_count += current_count;
2161 }
2162# endif
2163 }
2164
2165
2166
2167 template <typename T>
2168 T
2169 broadcast(const MPI_Comm comm,
2170 const T &object_to_send,
2171 const unsigned int root_process)
2172 {
2173# ifndef DEAL_II_WITH_MPI
2174 (void)comm;
2175 (void)root_process;
2176 return object_to_send;
2177# else
2179 AssertIndexRange(root_process, n_procs);
2180 (void)n_procs;
2181
2182 if constexpr (is_mpi_type<T>)
2183 {
2184 T object = object_to_send;
2185 int ierr =
2186 MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2187 AssertThrowMPI(ierr);
2188
2189 return object;
2190 }
2191 else
2192 {
2193 std::vector<char> buffer;
2194 std::size_t buffer_size = numbers::invalid_size_type;
2195
2196 // On the root process, pack the data and determine what the
2197 // buffer size needs to be.
2198 if (this_mpi_process(comm) == root_process)
2199 {
2200 buffer = Utilities::pack(object_to_send, false);
2201 buffer_size = buffer.size();
2202 }
2203
2204 // Exchange the size of buffer
2205 int ierr = MPI_Bcast(&buffer_size,
2206 1,
2207 mpi_type_id_for_type<decltype(buffer_size)>,
2208 root_process,
2209 comm);
2210 AssertThrowMPI(ierr);
2211
2212 // If not on the root process, correctly size the buffer to
2213 // receive the data, then do exactly that.
2214 if (this_mpi_process(comm) != root_process)
2215 buffer.resize(buffer_size);
2216
2217 broadcast(buffer.data(), buffer_size, root_process, comm);
2218
2219 if (Utilities::MPI::this_mpi_process(comm) == root_process)
2220 return object_to_send;
2221 else
2222 return Utilities::unpack<T>(buffer, false);
2223 }
2224# endif
2225 }
2226
2227
2228
2229 template <typename T>
2230 Future<void>
2231 isend(const T &object,
2232 MPI_Comm communicator,
2233 const unsigned int target_rank,
2234 const unsigned int mpi_tag)
2235 {
2236# ifndef DEAL_II_WITH_MPI
2237 Assert(false, ExcNeedsMPI());
2238 (void)object;
2239 (void)communicator;
2240 (void)target_rank;
2241 (void)mpi_tag;
2242 return Future<void>([]() {}, []() {});
2243# else
2244 // Create a pointer to a send buffer into which we pack the object
2245 // to be sent. The buffer will be released by the Future object once
2246 // the send has been verified to have succeeded.
2247 //
2248 // Conceptually, we would like this send buffer to be a
2249 // std::unique_ptr object whose ownership is later handed over
2250 // to the cleanup function. That has the disadvantage that the
2251 // cleanup object is a non-copyable lambda capture, leading to
2252 // awkward semantics. Instead, we use a std::shared_ptr; we move
2253 // this shared pointer into the cleanup function, which means
2254 // that there is exactly one shared pointer who owns the buffer
2255 // at any given time, though the latter is not an important
2256 // optimization.
2257 std::shared_ptr<std::vector<char>> send_buffer =
2258 std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2259
2260 // Now start the send, and store the result in a request object that
2261 // we can then wait for later:
2262 MPI_Request request;
2263 const int ierr =
2264 MPI_Isend(send_buffer->data(),
2265 send_buffer->size(),
2266 mpi_type_id_for_type<decltype(*send_buffer->data())>,
2267 target_rank,
2268 mpi_tag,
2269 communicator,
2270 &request);
2271 AssertThrowMPI(ierr);
2272
2273 // Then return a std::future-like object that has a wait()
2274 // function one can use to wait for the communication to finish,
2275 // and that has a cleanup function to be called at some point
2276 // after that makes sure the send buffer gets deallocated. This
2277 // cleanup function takes over ownership of the send buffer.
2278 //
2279 // Note that the body of the lambda function of the clean-up
2280 // function could be left empty. If that were so, once the
2281 // lambda function object goes out of scope, the 'send_buffer'
2282 // member of the closure object goes out of scope as well and so
2283 // the send_buffer is destroyed. But we may want to release the
2284 // buffer itself as early as possible, and so we clear the
2285 // buffer when the Future::get() function is called.
2286 auto wait = [request]() mutable {
2287 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2288 AssertThrowMPI(ierr);
2289 };
2290 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2291 send_buffer->clear();
2292 };
2293 return Future<void>(wait, cleanup);
2294# endif
2295 }
2296
2297
2298
2299 template <typename T>
2300 Future<T>
2301 irecv(MPI_Comm communicator,
2302 const unsigned int source_rank,
2303 const unsigned int mpi_tag)
2304 {
2305# ifndef DEAL_II_WITH_MPI
2306 Assert(false, ExcNeedsMPI());
2307 (void)communicator;
2308 (void)source_rank;
2309 (void)mpi_tag;
2310 return Future<void>([]() {}, []() { return T{}; });
2311# else
2312 // Use a 'probe' operation for the 'wait' operation of the
2313 // Future this function returns. It will trigger whenever we get
2314 // the incoming message. Later, once we have received the message, we
2315 // can query its size and allocate a receiver buffer.
2316 //
2317 // Since we may be waiting for multiple messages from the same
2318 // incoming process (with possibly the same tag -- we can't
2319 // know), we must make sure that the 'probe' operation we have
2320 // here (and which we use to determine the buffer size) matches
2321 // the 'recv' operation with which we actually get the data
2322 // later on. This is exactly what the 'MPI_Mprobe' function and
2323 // its 'I'mmediate variant is there for, coupled with the
2324 // 'MPI_Mrecv' call that would put into the clean-up function
2325 // below.
2326 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2327 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2328
2329 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2330 const int ierr = MPI_Mprobe(
2331 source_rank, mpi_tag, communicator, message.get(), status.get());
2332 AssertThrowMPI(ierr);
2333 };
2334
2335
2336 // Now also define the function that actually gets the data:
2337 auto get = [status, message]() {
2338 int number_amount;
2339 int ierr;
2340 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2341 AssertThrowMPI(ierr);
2342
2343 std::vector<char> receive_buffer(number_amount);
2344
2345 // Then actually get the data, using the matching MPI_Mrecv to the above
2346 // MPI_Mprobe:
2347 ierr = MPI_Mrecv(receive_buffer.data(),
2348 number_amount,
2349 mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2350 message.get(),
2351 status.get());
2352 AssertThrowMPI(ierr);
2353
2354 // Return the unpacked object:
2355 return Utilities::unpack<T>(receive_buffer, false);
2356 };
2357
2358 return Future<T>(wait, get);
2359# endif
2360 }
2361
2362
2363
2364# ifdef DEAL_II_WITH_MPI
2365 template <class Iterator, typename Number>
2366 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2367 mean_and_standard_deviation(const Iterator begin,
2368 const Iterator end,
2369 const MPI_Comm comm)
2370 {
2371 // below we do simple and straight-forward implementation. More elaborate
2372 // options are:
2373 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2374 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2375 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2376 using Std = typename numbers::NumberTraits<Number>::real_type;
2377 const Number sum = std::accumulate(begin, end, Number(0.));
2378
2379 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2380 Assert(size > 0, ExcDivideByZero());
2381 const Number mean =
2382 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2383 Std sq_sum = 0.;
2384 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2386 });
2387 sq_sum = Utilities::MPI::sum(sq_sum, comm);
2388 return std::make_pair(mean,
2389 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2390 }
2391# endif
2392
2393#endif
2394 } // end of namespace MPI
2395} // end of namespace Utilities
2396
2397
2399
2400#endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition mpi.h:342
void lock(const MPI_Comm comm)
Definition mpi.cc:1967
void unlock(const MPI_Comm comm)
Definition mpi.cc:2004
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition mpi.h:257
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition mpi.h:526
std::function< T()> get_and_cleanup_function
Definition mpi.h:527
Future(W &&wait_operation, G &&get_and_cleanup_operation)
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:498
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:499
Point< 2 > second
Definition grid_out.cc:4630
Point< 2 > first
Definition grid_out.cc:4629
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
const unsigned int my_rank
Definition mpi.cc:918
const IndexSet & indices_to_look_up
Definition mpi.cc:908
std::vector< index_type > data
Definition mpi.cc:735
std::size_t size
Definition mpi.cc:734
const MPI_Comm comm
Definition mpi.cc:913
const IndexSet & owned_indices
Definition mpi.cc:902
types::global_dof_index locally_owned_size
Definition mpi.cc:822
const unsigned int n_procs
Definition mpi.cc:924
MPI_Datatype mpi_type_id(const bool *)
Definition mpi.h:1461
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:164
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition mpi.cc:1860
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:208
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1809
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:193
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:251
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:681
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1626
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:123
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:362
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:66
constexpr bool is_mpi_type
Definition mpi.h:107
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
T broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1381
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:42
static const unsigned int invalid_unsigned_int
Definition types.h:220
const types::global_dof_index invalid_size_type
Definition types.h:233
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
Definition types.h:32
unsigned int max_index
Definition mpi.h:938
unsigned int min_index
Definition mpi.h:928
static constexpr real_type abs_square(const number &x)
Definition numbers.h:579
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)