28#include <boost/signals2.hpp>
52#ifdef DEAL_II_WITH_MPI
53# define DEAL_II_MPI_CONST_CAST(expr) (expr)
63template <
int rank,
int dim,
typename Number>
65template <
int rank,
int dim,
typename Number>
67template <
typename Number>
88 const unsigned int my_partition_id,
89 const unsigned int n_partitions,
109 template <
typename T>
127 std::complex<double>,
128 std::complex<long double>,
157 const std::vector<unsigned int>
182 std::vector<unsigned int>
185 const std::vector<unsigned int> &destinations);
209 const std::vector<unsigned int> &destinations);
459 template <
typename T>
467 template <
typename W,
typename G>
468 Future(W &&wait_operation, G &&get_and_cleanup_operation);
574#ifdef DEAL_II_WITH_MPI
577 const MPI_Group &group,
590 std::vector<IndexSet>
607#ifdef DEAL_II_WITH_MPI
623 template <
class Iterator,
typename Number =
long double>
624 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
678 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
700 template <
typename T>
713 template <
typename T,
typename U>
726 template <
typename T>
737 template <
int rank,
int dim,
typename Number>
747 template <
int rank,
int dim,
typename Number>
760 template <
typename Number>
785 template <
typename T>
798 template <
typename T,
typename U>
800 max(
const T &values,
const MPI_Comm mpi_communicator, U &maxima);
811 template <
typename T>
836 template <
typename T>
849 template <
typename T,
typename U>
851 min(
const T &values,
const MPI_Comm mpi_communicator, U &minima);
862 template <
typename T>
891 template <
typename T>
909 template <
typename T,
typename U>
922 template <
typename T>
1017 std::vector<MinMaxAvg>
1169 register_request(MPI_Request &request);
1175 unregister_request(MPI_Request &request);
1209#ifdef DEAL_II_WITH_PETSC
1245 template <
typename T>
1246 std::map<unsigned int, T>
1248 const std::map<unsigned int, T> &objects_to_send);
1263 template <
typename T>
1282 template <
typename T>
1285 const T & object_to_send,
1286 const unsigned int root_process = 0);
1302 template <
typename T>
1305 const std::vector<T> &objects_to_send,
1306 const unsigned int root_process = 0);
1343 template <
typename T>
1344 std::enable_if_t<is_mpi_type<T> ==
false, T>
1346 const T & object_to_send,
1347 const unsigned int root_process = 0);
1371 template <
typename T>
1372 std::enable_if_t<is_mpi_type<T> ==
true, T>
1374 const T & object_to_send,
1375 const unsigned int root_process = 0);
1393 template <
typename T>
1397 const unsigned int root,
1412 template <
typename T>
1416 const std::function<T(
const T &,
const T &)> &combiner,
1417 const unsigned int root_process = 0);
1428 template <
typename T>
1432 const std::function<T(
const T &,
const T &)> &combiner);
1455 template <
typename T>
1459 const unsigned int target_rank,
1460 const unsigned int mpi_tag = 0);
1479 template <
typename T>
1482 const unsigned int source_rank,
1483 const unsigned int mpi_tag = 0);
1528 std::vector<unsigned int>
1530 const IndexSet &indices_to_look_up,
1540 template <
typename T>
1547 template <
typename T>
1562 namespace MPIDataTypes
1564#ifdef DEAL_II_WITH_MPI
1568 return MPI_CXX_BOOL;
1584 return MPI_SIGNED_CHAR;
1624 return MPI_LONG_LONG;
1632 return MPI_UNSIGNED_CHAR;
1640 return MPI_UNSIGNED_SHORT;
1648 return MPI_UNSIGNED;
1656 return MPI_UNSIGNED_LONG;
1664 return MPI_UNSIGNED_LONG_LONG;
1688 return MPI_LONG_DOUBLE;
1704 return MPI_DOUBLE_COMPLEX;
1712#ifdef DEAL_II_WITH_MPI
1730 template <
typename T>
1733 static_cast<std::remove_cv_t<std::remove_reference_t<T>
> *>(
nullptr));
1740 template <
typename T>
1749 template <
typename T>
1750 template <
typename W,
typename G>
1760 template <
typename T>
1761 Future<T>::~Future()
1771 if ((get_was_called ==
false) && get_and_cleanup_function)
1777 template <
typename T>
1781 if (is_done ==
false)
1790 template <
typename T>
1794 Assert(get_was_called ==
false,
1796 "You can't call get() more than once on a Future object."));
1797 get_was_called =
true;
1800 return get_and_cleanup_function();
1805 template <
typename T,
unsigned int N>
1807 sum(
const T (&values)[N],
const MPI_Comm mpi_communicator, T (&sums)[N])
1809 internal::all_reduce(MPI_SUM,
1817 template <
typename T,
unsigned int N>
1819 max(
const T (&values)[N],
const MPI_Comm mpi_communicator, T (&maxima)[N])
1821 internal::all_reduce(MPI_MAX,
1829 template <
typename T,
unsigned int N>
1831 min(
const T (&values)[N],
const MPI_Comm mpi_communicator, T (&minima)[N])
1833 internal::all_reduce(MPI_MIN,
1841 template <
typename T,
unsigned int N>
1847 static_assert(std::is_integral<T>::value,
1848 "The MPI_LOR operation only allows integral data types.");
1850 internal::all_reduce(MPI_LOR,
1858 template <
typename T>
1859 std::map<unsigned int, T>
1861 const std::map<unsigned int, T> &objects_to_send)
1863# ifndef DEAL_II_WITH_MPI
1865 Assert(objects_to_send.size() < 2,
1866 ExcMessage(
"Cannot send to more than one processor."));
1867 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1868 objects_to_send.size() == 0,
1869 ExcMessage(
"Can only send to myself or to nobody."));
1870 return objects_to_send;
1874 std::map<unsigned int, T> received_objects;
1876 std::vector<unsigned int> send_to;
1877 send_to.reserve(objects_to_send.size());
1878 for (
const auto &m : objects_to_send)
1879 if (m.
first == my_proc)
1880 received_objects[my_proc] = m.
second;
1882 send_to.emplace_back(m.first);
1884 const unsigned int n_expected_incoming_messages =
1888 static CollectiveMutex mutex;
1889 CollectiveMutex::ScopedLock lock(mutex,
comm);
1895 if (send_to.size() == 0 && n_expected_incoming_messages == 0)
1896 return received_objects;
1899 internal::Tags::compute_point_to_point_communication_pattern;
1902 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1903 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1906 for (
const auto &rank_obj : objects_to_send)
1907 if (rank_obj.
first != my_proc)
1909 const auto &rank = rank_obj.first;
1912 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1913 buffers_to_send[i].size(),
1918 &buffer_send_requests[i]);
1926 std::vector<char> buffer;
1928 for (
unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1932 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag,
comm, &status);
1937 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1942 const unsigned int rank = status.MPI_SOURCE;
1945 ierr = MPI_Recv(buffer.data(),
1953 Assert(received_objects.find(rank) == received_objects.end(),
1955 "I should not receive again from this rank"));
1956 received_objects[rank] =
1957 Utilities::unpack<T>(buffer,
1963 const int ierr = MPI_Waitall(send_to.size(),
1964 buffer_send_requests.data(),
1965 MPI_STATUSES_IGNORE);
1968 return received_objects;
1974 template <
typename T>
1981# ifndef DEAL_II_WITH_MPI
1983 std::vector<T> v(1,
object);
1990 int n_local_data = buffer.size();
1993 std::vector<int> size_all_data(n_procs, 0);
1996 int ierr = MPI_Allgather(
1997 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT,
comm);
2002 std::vector<int> rdispls(n_procs);
2004 for (
unsigned int i = 1; i < n_procs; ++i)
2005 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2008 std::vector<char> received_unrolled_buffer(rdispls.back() +
2009 size_all_data.back());
2011 ierr = MPI_Allgatherv(buffer.data(),
2014 received_unrolled_buffer.data(),
2015 size_all_data.data(),
2021 std::vector<T> received_objects(n_procs);
2022 for (
unsigned int i = 0; i < n_procs; ++i)
2024 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2026 received_unrolled_buffer.begin() +
2027 rdispls[i] + size_all_data[i]);
2028 received_objects[i] = Utilities::unpack<T>(local_buffer);
2031 return received_objects;
2037 template <
typename T>
2040 const T & object_to_send,
2041 const unsigned int root_process)
2043# ifndef DEAL_II_WITH_MPI
2046 std::vector<T> v(1, object_to_send);
2055 int n_local_data = buffer.size();
2059 std::vector<int> size_all_data;
2060 if (my_rank == root_process)
2061 size_all_data.resize(n_procs, 0);
2064 int ierr = MPI_Gather(&n_local_data,
2067 size_all_data.data(),
2076 std::vector<int> rdispls;
2077 if (my_rank == root_process)
2079 rdispls.resize(n_procs, 0);
2080 for (
unsigned int i = 1; i < n_procs; ++i)
2081 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2084 std::vector<char> received_unrolled_buffer;
2085 if (my_rank == root_process)
2086 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2088 ierr = MPI_Gatherv(buffer.data(),
2091 received_unrolled_buffer.data(),
2092 size_all_data.data(),
2099 std::vector<T> received_objects;
2101 if (my_rank == root_process)
2103 received_objects.resize(n_procs);
2105 for (
unsigned int i = 0; i < n_procs; ++i)
2107 const std::vector<char> local_buffer(
2108 received_unrolled_buffer.begin() + rdispls[i],
2109 received_unrolled_buffer.begin() + rdispls[i] +
2111 received_objects[i] = Utilities::unpack<T>(local_buffer);
2114 return received_objects;
2120 template <
typename T>
2123 const std::vector<T> &objects_to_send,
2124 const unsigned int root_process)
2126# ifndef DEAL_II_WITH_MPI
2132 return objects_to_send[0];
2139 (my_rank != root_process && objects_to_send.size() == 0) ||
2140 objects_to_send.size() == n_procs,
2142 "The number of objects to be scattered must correspond to the number processes."));
2144 std::vector<char> send_buffer;
2145 std::vector<int> send_counts;
2146 std::vector<int> send_displacements;
2148 if (my_rank == root_process)
2150 send_counts.resize(n_procs, 0);
2151 send_displacements.resize(n_procs + 1, 0);
2153 for (
unsigned int i = 0; i < n_procs; ++i)
2156 send_buffer.insert(send_buffer.end(),
2157 packed_data.begin(),
2159 send_counts[i] = packed_data.size();
2162 for (
unsigned int i = 0; i < n_procs; ++i)
2163 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2167 int ierr = MPI_Scatter(send_counts.data(),
2177 std::vector<char> recv_buffer(n_local_data);
2179 ierr = MPI_Scatterv(send_buffer.data(),
2181 send_displacements.data(),
2190 return Utilities::unpack<T>(recv_buffer);
2195 template <
typename T>
2199 const unsigned int root,
2202# ifndef DEAL_II_WITH_MPI
2213 const size_t max_send_count = std::numeric_limits<signed int>::max();
2215 size_t total_sent_count = 0;
2216 while (total_sent_count < count)
2218 const size_t current_count =
2219 std::min(count - total_sent_count, max_send_count);
2221 const int ierr = MPI_Bcast(buffer + total_sent_count,
2223 mpi_type_id_for_type<
decltype(*buffer)>,
2227 total_sent_count += current_count;
2234 template <
typename T>
2235 std::enable_if_t<is_mpi_type<T> ==
false, T>
2237 const T & object_to_send,
2238 const unsigned int root_process)
2240# ifndef DEAL_II_WITH_MPI
2243 return object_to_send;
2249 std::vector<char> buffer;
2257 buffer_size = buffer.size();
2261 int ierr = MPI_Bcast(&buffer_size,
2263 mpi_type_id_for_type<
decltype(buffer_size)>,
2271 buffer.resize(buffer_size);
2276 return object_to_send;
2278 return Utilities::unpack<T>(buffer,
false);
2284 template <
typename T>
2285 std::enable_if_t<is_mpi_type<T> ==
true, T>
2287 const T & object_to_send,
2288 const unsigned int root_process)
2290# ifndef DEAL_II_WITH_MPI
2293 return object_to_send;
2296 T
object = object_to_send;
2298 MPI_Bcast(&
object, 1, mpi_type_id_for_type<T>, root_process,
comm);
2306 template <
typename T>
2308 isend(
const T &
object,
2310 const unsigned int target_rank,
2311 const unsigned int mpi_tag)
2313# ifndef DEAL_II_WITH_MPI
2319 return Future<void>([]() {}, []() {});
2334 std::shared_ptr<std::vector<char>> send_buffer =
2339 MPI_Request request;
2341 MPI_Isend(send_buffer->data(),
2342 send_buffer->size(),
2343 mpi_type_id_for_type<
decltype(*send_buffer->data())>,
2363 auto wait = [request]()
mutable {
2364 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2367 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2368 send_buffer->clear();
2370 return Future<void>(wait, cleanup);
2376 template <
typename T>
2379 const unsigned int source_rank,
2380 const unsigned int mpi_tag)
2382# ifndef DEAL_II_WITH_MPI
2387 return Future<void>([]() {}, []() {
return T{}; });
2403 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2404 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2406 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2407 const int ierr = MPI_Mprobe(
2408 source_rank, mpi_tag, communicator, message.get(), status.get());
2414 auto get = [status, message]() {
2417 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2420 std::vector<char> receive_buffer(number_amount);
2424 ierr = MPI_Mrecv(receive_buffer.data(),
2426 mpi_type_id_for_type<
decltype(*receive_buffer.data())>,
2432 return Utilities::unpack<T>(receive_buffer,
false);
2435 return Future<T>(wait, get);
2441# ifdef DEAL_II_WITH_MPI
2442 template <
class Iterator,
typename Number>
2443 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2454 const Number
sum = std::accumulate(begin, end, Number(0.));
2461 std::for_each(begin, end, [&mean, &sq_sum](
const Number &v) {
2465 return std::make_pair(mean,
2466 std::sqrt(sq_sum /
static_cast<Std
>(size - 1)));
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
void lock(const MPI_Comm comm)
void unlock(const MPI_Comm comm)
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
~DuplicatedCommunicator()
MPI_Comm operator*() const
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
std::function< T()> get_and_cleanup_function
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
#define DEAL_II_DEPRECATED
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
MPI_Datatype mpi_type_id(const bool *)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
constexpr bool is_mpi_type
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
static const unsigned int invalid_unsigned_int
const types::global_dof_index invalid_size_type
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize
static constexpr real_type abs_square(const number &x)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)