Reference documentation for deal.II version GIT relicensing-249-g48dc7357c7 2024-03-29 12:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
mpi.h
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2012 - 2024 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
15#ifndef dealii_mpi_h
16#define dealii_mpi_h
17
18#include <deal.II/base/config.h>
19
26
27#include <boost/signals2.hpp>
28
29#include <complex>
30#include <limits>
31#include <map>
32#include <numeric>
33#include <set>
34#include <vector>
35
36
37
51#ifdef DEAL_II_WITH_MPI
52# define DEAL_II_MPI_CONST_CAST(expr) (expr)
53#endif
54
55
56
58
59
60// Forward type declarations to allow MPI sums over tensorial types
61#ifndef DOXYGEN
62template <int rank, int dim, typename Number>
63class Tensor;
64template <int rank, int dim, typename Number>
65class SymmetricTensor;
66template <typename Number>
67class SparseMatrix;
68class IndexSet;
69#endif
70
71namespace Utilities
72{
87 const unsigned int my_partition_id,
88 const unsigned int n_partitions,
89 const types::global_dof_index total_size);
90
98 namespace MPI
99 {
108 template <typename T>
109 constexpr bool is_mpi_type = is_same_as_any_of<T,
110 char,
111 signed short,
112 signed int,
113 signed long,
114 signed long long,
115 signed char,
116 unsigned char,
117 unsigned short,
118 unsigned int,
119 unsigned long int,
120 unsigned long long,
121 float,
122 double,
123 long double,
124 bool,
125 std::complex<float>,
126 std::complex<double>,
127 std::complex<long double>,
128 wchar_t>::value;
129
138 unsigned int
139 n_mpi_processes(const MPI_Comm mpi_communicator);
140
149 unsigned int
150 this_mpi_process(const MPI_Comm mpi_communicator);
151
156 std::vector<unsigned int>
158 const MPI_Comm comm_small);
159
181 std::vector<unsigned int>
183 const MPI_Comm mpi_comm,
184 const std::vector<unsigned int> &destinations);
185
205 unsigned int
207 const MPI_Comm mpi_comm,
208 const std::vector<unsigned int> &destinations);
209
227 duplicate_communicator(const MPI_Comm mpi_communicator);
228
238 void
239 free_communicator(MPI_Comm mpi_communicator);
240
254 {
255 public:
259 explicit DuplicatedCommunicator(const MPI_Comm communicator)
260 : comm(duplicate_communicator(communicator))
261 {}
262
267
275
280 operator*() const
281 {
282 return comm;
283 }
284
285
291
292 private:
297 };
298
299
300
331 {
332 public:
339 {
340 public:
345 : mutex(mutex)
346 , comm(comm)
347 {
348 mutex.lock(comm);
349 }
350
355 {
357 }
358
359 private:
368 };
369
374
379
386 void
387 lock(const MPI_Comm comm);
388
395 void
396 unlock(const MPI_Comm comm);
397
398 private:
402 bool locked;
403
407 MPI_Request request;
408 };
409
410
411
458 template <typename T>
459 class Future
460 {
461 public:
466 template <typename W, typename G>
467 Future(W &&wait_operation, G &&get_and_cleanup_operation);
468
474 Future(const Future &) = delete;
475
479 Future(Future &&) noexcept = default;
480
485
491 Future &
492 operator=(const Future &) = delete;
493
497 Future &
498 operator=(Future &&) noexcept = default;
499
507 void
509
521 T
523
524 private:
528 std::function<void()> wait_function;
530
535
540 };
541
542
543
573#ifdef DEAL_II_WITH_MPI
576 const MPI_Group &group,
577 const int tag,
578 MPI_Comm *new_comm);
579#endif
580
589 std::vector<IndexSet>
591 const MPI_Comm comm,
592 const types::global_dof_index locally_owned_size);
593
603 const MPI_Comm comm,
604 const types::global_dof_index total_size);
605
606#ifdef DEAL_II_WITH_MPI
622 template <class Iterator, typename Number = long double>
623 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
624 mean_and_standard_deviation(const Iterator begin,
625 const Iterator end,
626 const MPI_Comm comm);
627#endif
628
629
677 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
678 create_mpi_data_type_n_bytes(const std::size_t n_bytes);
679
699 template <typename T>
700 T
701 sum(const T &t, const MPI_Comm mpi_communicator);
702
712 template <typename T, typename U>
713 void
714 sum(const T &values, const MPI_Comm mpi_communicator, U &sums);
715
725 template <typename T>
726 void
727 sum(const ArrayView<const T> &values,
728 const MPI_Comm mpi_communicator,
729 const ArrayView<T> &sums);
730
736 template <int rank, int dim, typename Number>
739 const MPI_Comm mpi_communicator);
740
746 template <int rank, int dim, typename Number>
749 const MPI_Comm mpi_communicator);
750
759 template <typename Number>
760 void
762 const MPI_Comm mpi_communicator,
763 SparseMatrix<Number> &global);
764
784 template <typename T>
785 T
786 max(const T &t, const MPI_Comm mpi_communicator);
787
797 template <typename T, typename U>
798 void
799 max(const T &values, const MPI_Comm mpi_communicator, U &maxima);
800
810 template <typename T>
811 void
812 max(const ArrayView<const T> &values,
813 const MPI_Comm mpi_communicator,
814 const ArrayView<T> &maxima);
815
835 template <typename T>
836 T
837 min(const T &t, const MPI_Comm mpi_communicator);
838
848 template <typename T, typename U>
849 void
850 min(const T &values, const MPI_Comm mpi_communicator, U &minima);
851
861 template <typename T>
862 void
863 min(const ArrayView<const T> &values,
864 const MPI_Comm mpi_communicator,
865 const ArrayView<T> &minima);
866
890 template <typename T>
891 T
892 logical_or(const T &t, const MPI_Comm mpi_communicator);
893
908 template <typename T, typename U>
909 void
910 logical_or(const T &values, const MPI_Comm mpi_communicator, U &results);
911
921 template <typename T>
922 void
924 const MPI_Comm mpi_communicator,
925 const ArrayView<T> &results);
926
942 {
947 double sum;
948
953 double min;
954
959 double max;
960
969 unsigned int min_index;
970
979 unsigned int max_index;
980
985 double avg;
986 };
987
1002 MinMaxAvg
1003 min_max_avg(const double my_value, const MPI_Comm mpi_communicator);
1004
1016 std::vector<MinMaxAvg>
1017 min_max_avg(const std::vector<double> &my_value,
1018 const MPI_Comm mpi_communicator);
1019
1020
1033 void
1034 min_max_avg(const ArrayView<const double> &my_values,
1035 const ArrayView<MinMaxAvg> &result,
1036 const MPI_Comm mpi_communicator);
1037
1038
1083 {
1084 public:
1131 int &argc,
1132 char **&argv,
1133 const unsigned int max_num_threads = numbers::invalid_unsigned_int);
1134
1140
1167 static void
1168 register_request(MPI_Request &request);
1169
1173 static void
1174 unregister_request(MPI_Request &request);
1175
1183 struct Signals
1184 {
1189 boost::signals2::signal<void()> at_mpi_init;
1190
1197 boost::signals2::signal<void()> at_mpi_finalize;
1198 };
1199
1201
1202 private:
1206 static std::set<MPI_Request *> requests;
1207
1208#ifdef DEAL_II_WITH_PETSC
1210#endif
1211 };
1212
1224 bool
1226
1244 template <typename T>
1245 std::map<unsigned int, T>
1247 const std::map<unsigned int, T> &objects_to_send);
1248
1262 template <typename T>
1263 std::vector<T>
1264 all_gather(const MPI_Comm comm, const T &object_to_send);
1265
1281 template <typename T>
1282 std::vector<T>
1284 const T &object_to_send,
1285 const unsigned int root_process = 0);
1286
1301 template <typename T>
1302 T
1304 const std::vector<T> &objects_to_send,
1305 const unsigned int root_process = 0);
1306
1342 template <typename T>
1343 std::enable_if_t<is_mpi_type<T> == false, T>
1345 const T &object_to_send,
1346 const unsigned int root_process = 0);
1347
1370 template <typename T>
1371 std::enable_if_t<is_mpi_type<T> == true, T>
1373 const T &object_to_send,
1374 const unsigned int root_process = 0);
1375
1392 template <typename T>
1393 void
1394 broadcast(T *buffer,
1395 const size_t count,
1396 const unsigned int root,
1397 const MPI_Comm comm);
1398
1411 template <typename T>
1412 T
1413 reduce(const T &local_value,
1414 const MPI_Comm comm,
1415 const std::function<T(const T &, const T &)> &combiner,
1416 const unsigned int root_process = 0);
1417
1418
1430 template <typename T, typename = std::enable_if_t<is_mpi_type<T> == true>>
1431 std::pair<T, T>
1432 partial_and_total_sum(const T &value, const MPI_Comm comm);
1433
1434
1444 template <typename T>
1445 T
1446 all_reduce(const T &local_value,
1447 const MPI_Comm comm,
1448 const std::function<T(const T &, const T &)> &combiner);
1449
1450
1471 template <typename T>
1473 isend(const T &object,
1474 MPI_Comm communicator,
1475 const unsigned int target_rank,
1476 const unsigned int mpi_tag = 0);
1477
1478
1495 template <typename T>
1496 Future<T>
1497 irecv(MPI_Comm communicator,
1498 const unsigned int source_rank,
1499 const unsigned int mpi_tag = 0);
1500
1501
1544 std::vector<unsigned int>
1545 compute_index_owner(const IndexSet &owned_indices,
1546 const IndexSet &indices_to_look_up,
1547 const MPI_Comm comm);
1548
1556 template <typename T>
1557 std::vector<T>
1558 compute_set_union(const std::vector<T> &vec, const MPI_Comm comm);
1559
1563 template <typename T>
1564 std::set<T>
1565 compute_set_union(const std::set<T> &set, const MPI_Comm comm);
1566
1567
1568
1569 /* --------------------------- inline functions ------------------------- */
1570
1571 namespace internal
1572 {
1578 namespace MPIDataTypes
1579 {
1580#ifdef DEAL_II_WITH_MPI
1581 inline MPI_Datatype
1582 mpi_type_id(const bool *)
1583 {
1584 return MPI_CXX_BOOL;
1585 }
1586
1587
1588
1589 inline MPI_Datatype
1590 mpi_type_id(const char *)
1591 {
1592 return MPI_CHAR;
1593 }
1594
1595
1596
1597 inline MPI_Datatype
1598 mpi_type_id(const signed char *)
1599 {
1600 return MPI_SIGNED_CHAR;
1601 }
1602
1603
1604
1605 inline MPI_Datatype
1606 mpi_type_id(const wchar_t *)
1607 {
1608 return MPI_WCHAR;
1609 }
1610
1611
1612
1613 inline MPI_Datatype
1614 mpi_type_id(const short *)
1615 {
1616 return MPI_SHORT;
1617 }
1618
1619
1620
1621 inline MPI_Datatype
1622 mpi_type_id(const int *)
1623 {
1624 return MPI_INT;
1625 }
1626
1627
1628
1629 inline MPI_Datatype
1630 mpi_type_id(const long int *)
1631 {
1632 return MPI_LONG;
1633 }
1634
1635
1636
1637 inline MPI_Datatype
1638 mpi_type_id(const long long int *)
1639 {
1640 return MPI_LONG_LONG;
1641 }
1642
1643
1644
1645 inline MPI_Datatype
1646 mpi_type_id(const unsigned char *)
1647 {
1648 return MPI_UNSIGNED_CHAR;
1649 }
1650
1651
1652
1653 inline MPI_Datatype
1654 mpi_type_id(const unsigned short *)
1655 {
1656 return MPI_UNSIGNED_SHORT;
1657 }
1658
1659
1660
1661 inline MPI_Datatype
1662 mpi_type_id(const unsigned int *)
1663 {
1664 return MPI_UNSIGNED;
1665 }
1666
1667
1668
1669 inline MPI_Datatype
1670 mpi_type_id(const unsigned long int *)
1671 {
1672 return MPI_UNSIGNED_LONG;
1673 }
1674
1675
1676
1677 inline MPI_Datatype
1678 mpi_type_id(const unsigned long long int *)
1679 {
1680 return MPI_UNSIGNED_LONG_LONG;
1681 }
1682
1683
1684
1685 inline MPI_Datatype
1686 mpi_type_id(const float *)
1687 {
1688 return MPI_FLOAT;
1689 }
1690
1691
1692
1693 inline MPI_Datatype
1694 mpi_type_id(const double *)
1695 {
1696 return MPI_DOUBLE;
1697 }
1698
1699
1700
1701 inline MPI_Datatype
1702 mpi_type_id(const long double *)
1703 {
1704 return MPI_LONG_DOUBLE;
1705 }
1706
1707
1708
1709 inline MPI_Datatype
1710 mpi_type_id(const std::complex<float> *)
1711 {
1712 return MPI_COMPLEX;
1713 }
1714
1715
1716
1717 inline MPI_Datatype
1718 mpi_type_id(const std::complex<double> *)
1719 {
1720 return MPI_DOUBLE_COMPLEX;
1721 }
1722#endif
1723 } // namespace MPIDataTypes
1724 } // namespace internal
1725
1726
1727
1728#ifdef DEAL_II_WITH_MPI
1746 template <typename T>
1747 inline const MPI_Datatype mpi_type_id_for_type =
1749 static_cast<std::remove_cv_t<std::remove_reference_t<T>> *>(nullptr));
1750#endif
1751
1752#ifndef DOXYGEN
1753 namespace internal
1754 {
1755 // declaration for an internal function that lives in mpi.templates.h
1756 template <typename T>
1757 void
1758 all_reduce(const MPI_Op &mpi_op,
1759 const ArrayView<const T> &values,
1760 const MPI_Comm mpi_communicator,
1761 const ArrayView<T> &output);
1762 } // namespace internal
1763
1764
1765 template <typename T>
1766 template <typename W, typename G>
1767 Future<T>::Future(W &&wait_operation, G &&get_and_cleanup_operation)
1768 : wait_function(wait_operation)
1769 , get_and_cleanup_function(get_and_cleanup_operation)
1770 , is_done(false)
1771 , get_was_called(false)
1772 {}
1773
1774
1775
1776 template <typename T>
1777 Future<T>::~Future()
1778 {
1779 // If there is a clean-up function, and if it has not been
1780 // called yet, then do so. Note that we may not have a
1781 // clean-up function (not even an empty one) if the current
1782 // object has been moved from, into another object, and as
1783 // a consequence the std::function objects are now empty
1784 // even though they were initialized in the constructor.
1785 // (A std::function object whose object is a an empty lambda
1786 // function, [](){}, is not an empty std::function object.)
1787 if ((get_was_called == false) && get_and_cleanup_function)
1788 get();
1789 }
1790
1791
1792
1793 template <typename T>
1794 void
1795 Future<T>::wait()
1796 {
1797 if (is_done == false)
1798 {
1799 wait_function();
1800
1801 is_done = true;
1802 }
1803 }
1804
1805
1806 template <typename T>
1807 T
1808 Future<T>::get()
1809 {
1810 Assert(get_was_called == false,
1811 ExcMessage(
1812 "You can't call get() more than once on a Future object."));
1813 get_was_called = true;
1814
1815 wait();
1816 return get_and_cleanup_function();
1817 }
1818
1819
1820
1821 template <typename T, unsigned int N>
1822 void
1823 sum(const T (&values)[N], const MPI_Comm mpi_communicator, T (&sums)[N])
1824 {
1825 internal::all_reduce(MPI_SUM,
1826 ArrayView<const T>(values, N),
1827 mpi_communicator,
1828 ArrayView<T>(sums, N));
1829 }
1830
1831
1832
1833 template <typename T, unsigned int N>
1834 void
1835 max(const T (&values)[N], const MPI_Comm mpi_communicator, T (&maxima)[N])
1836 {
1837 internal::all_reduce(MPI_MAX,
1838 ArrayView<const T>(values, N),
1839 mpi_communicator,
1840 ArrayView<T>(maxima, N));
1841 }
1842
1843
1844
1845 template <typename T, unsigned int N>
1846 void
1847 min(const T (&values)[N], const MPI_Comm mpi_communicator, T (&minima)[N])
1848 {
1849 internal::all_reduce(MPI_MIN,
1850 ArrayView<const T>(values, N),
1851 mpi_communicator,
1852 ArrayView<T>(minima, N));
1853 }
1854
1855
1856
1857 template <typename T, unsigned int N>
1858 void
1859 logical_or(const T (&values)[N],
1860 const MPI_Comm mpi_communicator,
1861 T (&results)[N])
1862 {
1863 static_assert(std::is_integral_v<T>,
1864 "The MPI_LOR operation only allows integral data types.");
1865
1866 internal::all_reduce(MPI_LOR,
1867 ArrayView<const T>(values, N),
1868 mpi_communicator,
1869 ArrayView<T>(results, N));
1870 }
1871
1872
1873
1874 template <typename T>
1875 std::map<unsigned int, T>
1877 const std::map<unsigned int, T> &objects_to_send)
1878 {
1879# ifndef DEAL_II_WITH_MPI
1880 (void)comm;
1881 Assert(objects_to_send.size() < 2,
1882 ExcMessage("Cannot send to more than one processor."));
1883 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1884 objects_to_send.empty(),
1885 ExcMessage("Can only send to myself or to nobody."));
1886 return objects_to_send;
1887# else
1888 const auto my_proc = this_mpi_process(comm);
1889
1890 std::map<unsigned int, T> received_objects;
1891
1892 std::vector<unsigned int> send_to;
1893 send_to.reserve(objects_to_send.size());
1894 for (const auto &m : objects_to_send)
1895 if (m.first == my_proc)
1896 received_objects[my_proc] = m.second;
1897 else
1898 send_to.emplace_back(m.first);
1899
1900 const unsigned int n_expected_incoming_messages =
1902
1903 // Protect the following communication:
1904 static CollectiveMutex mutex;
1905 CollectiveMutex::ScopedLock lock(mutex, comm);
1906
1907 // If we have something to send, or we expect something from other
1908 // processors, we need to visit one of the two scopes below. Otherwise,
1909 // no other action is required by this mpi process, and we can safely
1910 // return.
1911 if (send_to.empty() && n_expected_incoming_messages == 0)
1912 return received_objects;
1913
1914 const int mpi_tag =
1915 internal::Tags::compute_point_to_point_communication_pattern;
1916
1917 // Sending buffers
1918 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1919 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1920 {
1921 unsigned int i = 0;
1922 for (const auto &rank_obj : objects_to_send)
1923 if (rank_obj.first != my_proc)
1924 {
1925 const auto &rank = rank_obj.first;
1926 buffers_to_send[i] = Utilities::pack(rank_obj.second,
1927 /*allow_compression=*/false);
1928 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1929 buffers_to_send[i].size(),
1930 MPI_CHAR,
1931 rank,
1932 mpi_tag,
1933 comm,
1934 &buffer_send_requests[i]);
1935 AssertThrowMPI(ierr);
1936 ++i;
1937 }
1938 }
1939
1940 // Fill the output map
1941 {
1942 std::vector<char> buffer;
1943 // We do this on a first come/first served basis
1944 for (unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1945 {
1946 // Probe what's going on. Take data from the first available sender
1947 MPI_Status status;
1948 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag, comm, &status);
1949 AssertThrowMPI(ierr);
1950
1951 // Length of the message
1952 int len;
1953 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1954 AssertThrowMPI(ierr);
1955 buffer.resize(len);
1956
1957 // Source rank
1958 const unsigned int rank = status.MPI_SOURCE;
1959
1960 // Actually receive the message
1961 ierr = MPI_Recv(buffer.data(),
1962 len,
1963 MPI_CHAR,
1964 status.MPI_SOURCE,
1965 status.MPI_TAG,
1966 comm,
1967 MPI_STATUS_IGNORE);
1968 AssertThrowMPI(ierr);
1969 Assert(received_objects.find(rank) == received_objects.end(),
1971 "I should not receive again from this rank"));
1972 received_objects[rank] =
1973 Utilities::unpack<T>(buffer,
1974 /*allow_compression=*/false);
1975 }
1976 }
1977
1978 // Wait to have sent all objects.
1979 const int ierr = MPI_Waitall(send_to.size(),
1980 buffer_send_requests.data(),
1981 MPI_STATUSES_IGNORE);
1982 AssertThrowMPI(ierr);
1983
1984 return received_objects;
1985# endif // deal.II with MPI
1986 }
1987
1988
1989
1990 template <typename T, typename>
1991 std::pair<T, T>
1992 partial_and_total_sum(const T &value, const MPI_Comm comm)
1993 {
1994# ifndef DEAL_II_WITH_MPI
1995 (void)comm;
1996 return {0, value};
1997# else
1999 return {0, value};
2000 else
2001 {
2002 T prefix = {};
2003
2004 // First obtain every process's prefix sum:
2005 int ierr =
2006 MPI_Exscan(&value,
2007 &prefix,
2008 1,
2009 Utilities::MPI::mpi_type_id_for_type<decltype(value)>,
2010 MPI_SUM,
2011 comm);
2012 AssertThrowMPI(ierr);
2013
2014 // Then we also need the total sum. We could obtain it by
2015 // calling Utilities::MPI::sum(), but it is cheaper if we
2016 // broadcast it from the last process, which can compute it
2017 // from its own prefix sum plus its own value.
2019 comm, prefix + value, Utilities::MPI::n_mpi_processes(comm) - 1);
2020
2021 return {prefix, sum};
2022 }
2023# endif
2024 }
2025
2026
2027
2028 template <typename T>
2029 std::vector<T>
2030 all_gather(const MPI_Comm comm, const T &object)
2031 {
2032 if (job_supports_mpi() == false)
2033 return {object};
2034
2035# ifndef DEAL_II_WITH_MPI
2036 (void)comm;
2037 std::vector<T> v(1, object);
2038 return v;
2039# else
2040 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2041
2042 std::vector<char> buffer = Utilities::pack(object);
2043
2044 int n_local_data = buffer.size();
2045
2046 // Vector to store the size of loc_data_array for every process
2047 std::vector<int> size_all_data(n_procs, 0);
2048
2049 // Exchanging the size of each buffer
2050 int ierr = MPI_Allgather(
2051 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
2052 AssertThrowMPI(ierr);
2053
2054 // Now computing the displacement, relative to recvbuf,
2055 // at which to store the incoming buffer
2056 std::vector<int> rdispls(n_procs);
2057 rdispls[0] = 0;
2058 for (unsigned int i = 1; i < n_procs; ++i)
2059 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2060
2061 // Step 3: exchange the buffer:
2062 std::vector<char> received_unrolled_buffer(rdispls.back() +
2063 size_all_data.back());
2064
2065 ierr = MPI_Allgatherv(buffer.data(),
2066 n_local_data,
2067 MPI_CHAR,
2068 received_unrolled_buffer.data(),
2069 size_all_data.data(),
2070 rdispls.data(),
2071 MPI_CHAR,
2072 comm);
2073 AssertThrowMPI(ierr);
2074
2075 std::vector<T> received_objects(n_procs);
2076 for (unsigned int i = 0; i < n_procs; ++i)
2077 {
2078 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2079 rdispls[i],
2080 received_unrolled_buffer.begin() +
2081 rdispls[i] + size_all_data[i]);
2082 received_objects[i] = Utilities::unpack<T>(local_buffer);
2083 }
2084
2085 return received_objects;
2086# endif
2087 }
2088
2089
2090
2091 template <typename T>
2092 std::vector<T>
2093 gather(const MPI_Comm comm,
2094 const T &object_to_send,
2095 const unsigned int root_process)
2096 {
2097# ifndef DEAL_II_WITH_MPI
2098 (void)comm;
2099 (void)root_process;
2100 std::vector<T> v(1, object_to_send);
2101 return v;
2102# else
2103 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2104 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2105
2106 AssertIndexRange(root_process, n_procs);
2107
2108 std::vector<char> buffer = Utilities::pack(object_to_send);
2109 int n_local_data = buffer.size();
2110
2111 // Vector to store the size of loc_data_array for every process
2112 // only the root process needs to allocate memory for that purpose
2113 std::vector<int> size_all_data;
2114 if (my_rank == root_process)
2115 size_all_data.resize(n_procs, 0);
2116
2117 // Exchanging the size of each buffer
2118 int ierr = MPI_Gather(&n_local_data,
2119 1,
2120 MPI_INT,
2121 size_all_data.data(),
2122 1,
2123 MPI_INT,
2124 root_process,
2125 comm);
2126 AssertThrowMPI(ierr);
2127
2128 // Now computing the displacement, relative to recvbuf,
2129 // at which to store the incoming buffer; only for root
2130 std::vector<int> rdispls;
2131 if (my_rank == root_process)
2132 {
2133 rdispls.resize(n_procs, 0);
2134 for (unsigned int i = 1; i < n_procs; ++i)
2135 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2136 }
2137 // exchange the buffer:
2138 std::vector<char> received_unrolled_buffer;
2139 if (my_rank == root_process)
2140 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2141
2142 ierr = MPI_Gatherv(buffer.data(),
2143 n_local_data,
2144 MPI_CHAR,
2145 received_unrolled_buffer.data(),
2146 size_all_data.data(),
2147 rdispls.data(),
2148 MPI_CHAR,
2149 root_process,
2150 comm);
2151 AssertThrowMPI(ierr);
2152
2153 std::vector<T> received_objects;
2154
2155 if (my_rank == root_process)
2156 {
2157 received_objects.resize(n_procs);
2158
2159 for (unsigned int i = 0; i < n_procs; ++i)
2160 {
2161 const std::vector<char> local_buffer(
2162 received_unrolled_buffer.begin() + rdispls[i],
2163 received_unrolled_buffer.begin() + rdispls[i] +
2164 size_all_data[i]);
2165 received_objects[i] = Utilities::unpack<T>(local_buffer);
2166 }
2167 }
2168 return received_objects;
2169# endif
2170 }
2171
2172
2173
2174 template <typename T>
2175 T
2176 scatter(const MPI_Comm comm,
2177 const std::vector<T> &objects_to_send,
2178 const unsigned int root_process)
2179 {
2180# ifndef DEAL_II_WITH_MPI
2181 (void)comm;
2182 (void)root_process;
2183
2184 AssertDimension(objects_to_send.size(), 1);
2185
2186 return objects_to_send[0];
2187# else
2188 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2189 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
2190
2191 AssertIndexRange(root_process, n_procs);
2193 (my_rank != root_process && objects_to_send.empty()) ||
2194 objects_to_send.size() == n_procs,
2195 ExcMessage(
2196 "The number of objects to be scattered must correspond to the number processes."));
2197
2198 std::vector<char> send_buffer;
2199 std::vector<int> send_counts;
2200 std::vector<int> send_displacements;
2201
2202 if (my_rank == root_process)
2203 {
2204 send_counts.resize(n_procs, 0);
2205 send_displacements.resize(n_procs + 1, 0);
2206
2207 for (unsigned int i = 0; i < n_procs; ++i)
2208 {
2209 const auto packed_data = Utilities::pack(objects_to_send[i]);
2210 send_buffer.insert(send_buffer.end(),
2211 packed_data.begin(),
2212 packed_data.end());
2213 send_counts[i] = packed_data.size();
2214 }
2215
2216 for (unsigned int i = 0; i < n_procs; ++i)
2217 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2218 }
2219
2220 int n_local_data;
2221 int ierr = MPI_Scatter(send_counts.data(),
2222 1,
2223 MPI_INT,
2224 &n_local_data,
2225 1,
2226 MPI_INT,
2227 root_process,
2228 comm);
2229 AssertThrowMPI(ierr);
2230
2231 std::vector<char> recv_buffer(n_local_data);
2232
2233 ierr = MPI_Scatterv(send_buffer.data(),
2234 send_counts.data(),
2235 send_displacements.data(),
2236 MPI_CHAR,
2237 recv_buffer.data(),
2238 n_local_data,
2239 MPI_CHAR,
2240 root_process,
2241 comm);
2242 AssertThrowMPI(ierr);
2243
2244 return Utilities::unpack<T>(recv_buffer);
2245# endif
2246 }
2247
2248
2249 template <typename T>
2250 void
2251 broadcast(T *buffer,
2252 const size_t count,
2253 const unsigned int root,
2254 const MPI_Comm comm)
2255 {
2256# ifndef DEAL_II_WITH_MPI
2257 (void)buffer;
2258 (void)count;
2259 (void)root;
2260 (void)comm;
2261# else
2262 Assert(root < n_mpi_processes(comm),
2263 ExcMessage("Invalid root rank specified."));
2264
2265 // MPI_Bcast's count is a signed int, so send at most 2^31 in each
2266 // iteration:
2267 const size_t max_send_count = std::numeric_limits<signed int>::max();
2268
2269 size_t total_sent_count = 0;
2270 while (total_sent_count < count)
2271 {
2272 const size_t current_count =
2273 std::min(count - total_sent_count, max_send_count);
2274
2275 const int ierr = MPI_Bcast(buffer + total_sent_count,
2276 current_count,
2277 mpi_type_id_for_type<decltype(*buffer)>,
2278 root,
2279 comm);
2280 AssertThrowMPI(ierr);
2281 total_sent_count += current_count;
2282 }
2283# endif
2284 }
2285
2286
2287
2288 template <typename T>
2289 std::enable_if_t<is_mpi_type<T> == false, T>
2290 broadcast(const MPI_Comm comm,
2291 const T &object_to_send,
2292 const unsigned int root_process)
2293 {
2294# ifndef DEAL_II_WITH_MPI
2295 (void)comm;
2296 (void)root_process;
2297 return object_to_send;
2298# else
2299 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
2300 AssertIndexRange(root_process, n_procs);
2301 (void)n_procs;
2302
2303 std::vector<char> buffer;
2304 std::size_t buffer_size = numbers::invalid_size_type;
2305
2306 // On the root process, pack the data and determine what the
2307 // buffer size needs to be.
2308 if (this_mpi_process(comm) == root_process)
2309 {
2310 buffer = Utilities::pack(object_to_send, false);
2311 buffer_size = buffer.size();
2312 }
2313
2314 // Exchange the size of buffer
2315 int ierr = MPI_Bcast(&buffer_size,
2316 1,
2317 mpi_type_id_for_type<decltype(buffer_size)>,
2318 root_process,
2319 comm);
2320 AssertThrowMPI(ierr);
2321
2322 // If not on the root process, correctly size the buffer to
2323 // receive the data, then do exactly that.
2324 if (this_mpi_process(comm) != root_process)
2325 buffer.resize(buffer_size);
2326
2327 broadcast(buffer.data(), buffer_size, root_process, comm);
2328
2329 if (Utilities::MPI::this_mpi_process(comm) == root_process)
2330 return object_to_send;
2331 else
2332 return Utilities::unpack<T>(buffer, false);
2333# endif
2334 }
2335
2336
2337
2338 template <typename T>
2339 std::enable_if_t<is_mpi_type<T> == true, T>
2340 broadcast(const MPI_Comm comm,
2341 const T &object_to_send,
2342 const unsigned int root_process)
2343 {
2344# ifndef DEAL_II_WITH_MPI
2345 (void)comm;
2346 (void)root_process;
2347 return object_to_send;
2348# else
2349
2350 T object = object_to_send;
2351 int ierr =
2352 MPI_Bcast(&object, 1, mpi_type_id_for_type<T>, root_process, comm);
2353 AssertThrowMPI(ierr);
2354
2355 return object;
2356# endif
2357 }
2358
2359
2360 template <typename T>
2361 Future<void>
2362 isend(const T &object,
2363 MPI_Comm communicator,
2364 const unsigned int target_rank,
2365 const unsigned int mpi_tag)
2366 {
2367# ifndef DEAL_II_WITH_MPI
2368 Assert(false, ExcNeedsMPI());
2369 (void)object;
2370 (void)communicator;
2371 (void)target_rank;
2372 (void)mpi_tag;
2373 return Future<void>([]() {}, []() {});
2374# else
2375 // Create a pointer to a send buffer into which we pack the object
2376 // to be sent. The buffer will be released by the Future object once
2377 // the send has been verified to have succeeded.
2378 //
2379 // Conceptually, we would like this send buffer to be a
2380 // std::unique_ptr object whose ownership is later handed over
2381 // to the cleanup function. That has the disadvantage that the
2382 // cleanup object is a non-copyable lambda capture, leading to
2383 // awkward semantics. Instead, we use a std::shared_ptr; we move
2384 // this shared pointer into the cleanup function, which means
2385 // that there is exactly one shared pointer who owns the buffer
2386 // at any given time, though the latter is not an important
2387 // optimization.
2388 std::shared_ptr<std::vector<char>> send_buffer =
2389 std::make_unique<std::vector<char>>(Utilities::pack(object, false));
2390
2391 // Now start the send, and store the result in a request object that
2392 // we can then wait for later:
2393 MPI_Request request;
2394 const int ierr =
2395 MPI_Isend(send_buffer->data(),
2396 send_buffer->size(),
2397 mpi_type_id_for_type<decltype(*send_buffer->data())>,
2398 target_rank,
2399 mpi_tag,
2400 communicator,
2401 &request);
2402 AssertThrowMPI(ierr);
2403
2404 // Then return a std::future-like object that has a wait()
2405 // function one can use to wait for the communication to finish,
2406 // and that has a cleanup function to be called at some point
2407 // after that makes sure the send buffer gets deallocated. This
2408 // cleanup function takes over ownership of the send buffer.
2409 //
2410 // Note that the body of the lambda function of the clean-up
2411 // function could be left empty. If that were so, once the
2412 // lambda function object goes out of scope, the 'send_buffer'
2413 // member of the closure object goes out of scope as well and so
2414 // the send_buffer is destroyed. But we may want to release the
2415 // buffer itself as early as possible, and so we clear the
2416 // buffer when the Future::get() function is called.
2417 auto wait = [request]() mutable {
2418 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2419 AssertThrowMPI(ierr);
2420 };
2421 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2422 send_buffer->clear();
2423 };
2424 return Future<void>(wait, cleanup);
2425# endif
2426 }
2427
2428
2429
2430 template <typename T>
2431 Future<T>
2432 irecv(MPI_Comm communicator,
2433 const unsigned int source_rank,
2434 const unsigned int mpi_tag)
2435 {
2436# ifndef DEAL_II_WITH_MPI
2437 Assert(false, ExcNeedsMPI());
2438 (void)communicator;
2439 (void)source_rank;
2440 (void)mpi_tag;
2441 return Future<void>([]() {}, []() { return T{}; });
2442# else
2443 // Use a 'probe' operation for the 'wait' operation of the
2444 // Future this function returns. It will trigger whenever we get
2445 // the incoming message. Later, once we have received the message, we
2446 // can query its size and allocate a receiver buffer.
2447 //
2448 // Since we may be waiting for multiple messages from the same
2449 // incoming process (with possibly the same tag -- we can't
2450 // know), we must make sure that the 'probe' operation we have
2451 // here (and which we use to determine the buffer size) matches
2452 // the 'recv' operation with which we actually get the data
2453 // later on. This is exactly what the 'MPI_Mprobe' function and
2454 // its 'I'mmediate variant is there for, coupled with the
2455 // 'MPI_Mrecv' call that would put into the clean-up function
2456 // below.
2457 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2458 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2459
2460 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2461 const int ierr = MPI_Mprobe(
2462 source_rank, mpi_tag, communicator, message.get(), status.get());
2463 AssertThrowMPI(ierr);
2464 };
2465
2466
2467 // Now also define the function that actually gets the data:
2468 auto get = [status, message]() {
2469 int number_amount;
2470 int ierr;
2471 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2472 AssertThrowMPI(ierr);
2473
2474 std::vector<char> receive_buffer(number_amount);
2475
2476 // Then actually get the data, using the matching MPI_Mrecv to the above
2477 // MPI_Mprobe:
2478 ierr = MPI_Mrecv(receive_buffer.data(),
2479 number_amount,
2480 mpi_type_id_for_type<decltype(*receive_buffer.data())>,
2481 message.get(),
2482 status.get());
2483 AssertThrowMPI(ierr);
2484
2485 // Return the unpacked object:
2486 return Utilities::unpack<T>(receive_buffer, false);
2487 };
2488
2489 return Future<T>(wait, get);
2490# endif
2491 }
2492
2493
2494
2495# ifdef DEAL_II_WITH_MPI
2496 template <class Iterator, typename Number>
2497 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2498 mean_and_standard_deviation(const Iterator begin,
2499 const Iterator end,
2500 const MPI_Comm comm)
2501 {
2502 // below we do simple and straight-forward implementation. More elaborate
2503 // options are:
2504 // http://dx.doi.org/10.1145/2807591.2807644 section 3.1.2
2505 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Welford's_online_algorithm
2506 // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online
2507 using Std = typename numbers::NumberTraits<Number>::real_type;
2508 const Number sum = std::accumulate(begin, end, Number(0.));
2509
2510 const auto size = Utilities::MPI::sum(std::distance(begin, end), comm);
2511 Assert(size > 0, ExcDivideByZero());
2512 const Number mean =
2513 Utilities::MPI::sum(sum, comm) / static_cast<Std>(size);
2514 Std sq_sum = 0.;
2515 std::for_each(begin, end, [&mean, &sq_sum](const Number &v) {
2517 });
2518 sq_sum = Utilities::MPI::sum(sq_sum, comm);
2519 return std::make_pair(mean,
2520 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
2521 }
2522# endif
2523
2524#endif
2525 } // end of namespace MPI
2526} // end of namespace Utilities
2527
2528
2530
2531#endif
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
Definition mpi.h:344
void lock(const MPI_Comm comm)
Definition mpi.cc:1171
void unlock(const MPI_Comm comm)
Definition mpi.cc:1208
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Definition mpi.h:259
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
Definition mpi.h:528
std::function< T()> get_and_cleanup_function
Definition mpi.h:529
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
Definition mpi.h:1206
#define DEAL_II_DEPRECATED
Definition config.h:207
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:502
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:503
Point< 2 > second
Definition grid_out.cc:4614
Point< 2 > first
Definition grid_out.cc:4613
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
if(marked_vertices.size() !=0) for(auto it
MPI_Datatype mpi_type_id(const bool *)
Definition mpi.h:1582
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition mpi.cc:213
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition mpi.cc:257
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:128
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition mpi.cc:1068
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:143
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition mpi.cc:242
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:300
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition mpi.cc:200
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition mpi.cc:179
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition mpi.cc:1052
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1747
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:190
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition mpi.cc:159
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition mpi.cc:415
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition mpi.cc:102
constexpr bool is_mpi_type
Definition mpi.h:109
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
Definition utilities.h:1378
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition mpi.cc:78
static const unsigned int invalid_unsigned_int
Definition types.h:220
const types::global_dof_index invalid_size_type
Definition types.h:233
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
Definition mpi.h:1189
boost::signals2::signal< void()> at_mpi_finalize
Definition mpi.h:1197
unsigned int max_index
Definition mpi.h:979
unsigned int min_index
Definition mpi.h:969
static constexpr real_type abs_square(const number &x)
Definition numbers.h:584
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)