27#include <boost/signals2.hpp>
51#ifdef DEAL_II_WITH_MPI
52# define DEAL_II_MPI_CONST_CAST(expr) (expr)
62template <
int rank,
int dim,
typename Number>
64template <
int rank,
int dim,
typename Number>
66template <
typename Number>
87 const unsigned int my_partition_id,
88 const unsigned int n_partitions,
108 template <
typename T>
126 std::complex<double>,
127 std::complex<long double>,
156 std::vector<unsigned int>
181 std::vector<unsigned int>
184 const std::vector<unsigned int> &destinations);
208 const std::vector<unsigned int> &destinations);
458 template <
typename T>
466 template <
typename W,
typename G>
467 Future(W &&wait_operation, G &&get_and_cleanup_operation);
573#ifdef DEAL_II_WITH_MPI
576 const MPI_Group &group,
589 std::vector<IndexSet>
606#ifdef DEAL_II_WITH_MPI
622 template <
class Iterator,
typename Number =
long double>
623 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
677 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
699 template <
typename T>
712 template <
typename T,
typename U>
725 template <
typename T>
736 template <
int rank,
int dim,
typename Number>
746 template <
int rank,
int dim,
typename Number>
759 template <
typename Number>
784 template <
typename T>
797 template <
typename T,
typename U>
810 template <
typename T>
835 template <
typename T>
848 template <
typename T,
typename U>
861 template <
typename T>
890 template <
typename T>
908 template <
typename T,
typename U>
921 template <
typename T>
1016 std::vector<MinMaxAvg>
1168 register_request(MPI_Request &request);
1174 unregister_request(MPI_Request &request);
1208#ifdef DEAL_II_WITH_PETSC
1244 template <
typename T>
1245 std::map<unsigned int, T>
1247 const std::map<unsigned int, T> &objects_to_send);
1262 template <
typename T>
1281 template <
typename T>
1284 const T &object_to_send,
1285 const unsigned int root_process = 0);
1301 template <
typename T>
1304 const std::vector<T> &objects_to_send,
1305 const unsigned int root_process = 0);
1342 template <
typename T>
1343 std::enable_if_t<is_mpi_type<T> ==
false, T>
1345 const T &object_to_send,
1346 const unsigned int root_process = 0);
1370 template <
typename T>
1371 std::enable_if_t<is_mpi_type<T> ==
true, T>
1373 const T &object_to_send,
1374 const unsigned int root_process = 0);
1392 template <
typename T>
1396 const unsigned int root,
1411 template <
typename T>
1415 const std::function<T(
const T &,
const T &)> &combiner,
1416 const unsigned int root_process = 0);
1430 template <
typename T,
typename = std::enable_if_t<is_mpi_type<T> == true>>
1444 template <
typename T>
1448 const std::function<T(
const T &,
const T &)> &combiner);
1471 template <
typename T>
1475 const unsigned int target_rank,
1476 const unsigned int mpi_tag = 0);
1495 template <
typename T>
1498 const unsigned int source_rank,
1499 const unsigned int mpi_tag = 0);
1544 std::vector<unsigned int>
1546 const IndexSet &indices_to_look_up,
1556 template <
typename T>
1563 template <
typename T>
1578 namespace MPIDataTypes
1580#ifdef DEAL_II_WITH_MPI
1584 return MPI_CXX_BOOL;
1600 return MPI_SIGNED_CHAR;
1640 return MPI_LONG_LONG;
1648 return MPI_UNSIGNED_CHAR;
1656 return MPI_UNSIGNED_SHORT;
1664 return MPI_UNSIGNED;
1672 return MPI_UNSIGNED_LONG;
1680 return MPI_UNSIGNED_LONG_LONG;
1704 return MPI_LONG_DOUBLE;
1720 return MPI_DOUBLE_COMPLEX;
1728#ifdef DEAL_II_WITH_MPI
1746 template <
typename T>
1749 static_cast<std::remove_cv_t<std::remove_reference_t<T>
> *>(
nullptr));
1756 template <
typename T>
1765 template <
typename T>
1766 template <
typename W,
typename G>
1776 template <
typename T>
1777 Future<T>::~Future()
1787 if ((get_was_called ==
false) && get_and_cleanup_function)
1793 template <
typename T>
1797 if (is_done ==
false)
1806 template <
typename T>
1810 Assert(get_was_called ==
false,
1812 "You can't call get() more than once on a Future object."));
1813 get_was_called =
true;
1816 return get_and_cleanup_function();
1821 template <
typename T,
unsigned int N>
1823 sum(
const T (&values)[N],
const MPI_Comm mpi_communicator, T (&sums)[N])
1825 internal::all_reduce(MPI_SUM,
1833 template <
typename T,
unsigned int N>
1835 max(
const T (&values)[N],
const MPI_Comm mpi_communicator, T (&maxima)[N])
1837 internal::all_reduce(MPI_MAX,
1845 template <
typename T,
unsigned int N>
1847 min(
const T (&values)[N],
const MPI_Comm mpi_communicator, T (&minima)[N])
1849 internal::all_reduce(MPI_MIN,
1857 template <
typename T,
unsigned int N>
1863 static_assert(std::is_integral_v<T>,
1864 "The MPI_LOR operation only allows integral data types.");
1866 internal::all_reduce(MPI_LOR,
1874 template <
typename T>
1875 std::map<unsigned int, T>
1877 const std::map<unsigned int, T> &objects_to_send)
1879# ifndef DEAL_II_WITH_MPI
1881 Assert(objects_to_send.size() < 2,
1882 ExcMessage(
"Cannot send to more than one processor."));
1883 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1884 objects_to_send.empty(),
1885 ExcMessage(
"Can only send to myself or to nobody."));
1886 return objects_to_send;
1890 std::map<unsigned int, T> received_objects;
1892 std::vector<unsigned int> send_to;
1893 send_to.reserve(objects_to_send.size());
1894 for (
const auto &m : objects_to_send)
1896 received_objects[my_proc] = m.
second;
1898 send_to.emplace_back(m.first);
1900 const unsigned int n_expected_incoming_messages =
1904 static CollectiveMutex mutex;
1905 CollectiveMutex::ScopedLock lock(mutex,
comm);
1911 if (send_to.empty() && n_expected_incoming_messages == 0)
1912 return received_objects;
1915 internal::Tags::compute_point_to_point_communication_pattern;
1918 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1919 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1922 for (
const auto &rank_obj : objects_to_send)
1923 if (rank_obj.
first != my_proc)
1925 const auto &rank = rank_obj.first;
1928 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1929 buffers_to_send[i].size(),
1934 &buffer_send_requests[i]);
1942 std::vector<char> buffer;
1944 for (
unsigned int i = 0; i < n_expected_incoming_messages; ++i)
1948 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag,
comm, &status);
1953 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1958 const unsigned int rank = status.MPI_SOURCE;
1961 ierr = MPI_Recv(buffer.data(),
1969 Assert(received_objects.find(rank) == received_objects.end(),
1971 "I should not receive again from this rank"));
1972 received_objects[rank] =
1973 Utilities::unpack<T>(buffer,
1979 const int ierr = MPI_Waitall(send_to.size(),
1980 buffer_send_requests.data(),
1981 MPI_STATUSES_IGNORE);
1984 return received_objects;
1990 template <
typename T,
typename>
1994# ifndef DEAL_II_WITH_MPI
2021 return {prefix,
sum};
2028 template <
typename T>
2035# ifndef DEAL_II_WITH_MPI
2037 std::vector<T> v(1,
object);
2044 int n_local_data = buffer.size();
2047 std::vector<int> size_all_data(n_procs, 0);
2050 int ierr = MPI_Allgather(
2051 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT,
comm);
2056 std::vector<int> rdispls(n_procs);
2058 for (
unsigned int i = 1; i < n_procs; ++i)
2059 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2062 std::vector<char> received_unrolled_buffer(rdispls.back() +
2063 size_all_data.back());
2065 ierr = MPI_Allgatherv(buffer.data(),
2068 received_unrolled_buffer.data(),
2069 size_all_data.data(),
2075 std::vector<T> received_objects(n_procs);
2076 for (
unsigned int i = 0; i < n_procs; ++i)
2078 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
2080 received_unrolled_buffer.begin() +
2081 rdispls[i] + size_all_data[i]);
2082 received_objects[i] = Utilities::unpack<T>(local_buffer);
2085 return received_objects;
2091 template <
typename T>
2094 const T &object_to_send,
2095 const unsigned int root_process)
2097# ifndef DEAL_II_WITH_MPI
2100 std::vector<T> v(1, object_to_send);
2109 int n_local_data = buffer.size();
2113 std::vector<int> size_all_data;
2114 if (my_rank == root_process)
2115 size_all_data.resize(n_procs, 0);
2118 int ierr = MPI_Gather(&n_local_data,
2121 size_all_data.data(),
2130 std::vector<int> rdispls;
2131 if (my_rank == root_process)
2133 rdispls.resize(n_procs, 0);
2134 for (
unsigned int i = 1; i < n_procs; ++i)
2135 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
2138 std::vector<char> received_unrolled_buffer;
2139 if (my_rank == root_process)
2140 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
2142 ierr = MPI_Gatherv(buffer.data(),
2145 received_unrolled_buffer.data(),
2146 size_all_data.data(),
2153 std::vector<T> received_objects;
2155 if (my_rank == root_process)
2157 received_objects.resize(n_procs);
2159 for (
unsigned int i = 0; i < n_procs; ++i)
2161 const std::vector<char> local_buffer(
2162 received_unrolled_buffer.begin() + rdispls[i],
2163 received_unrolled_buffer.begin() + rdispls[i] +
2165 received_objects[i] = Utilities::unpack<T>(local_buffer);
2168 return received_objects;
2174 template <
typename T>
2177 const std::vector<T> &objects_to_send,
2178 const unsigned int root_process)
2180# ifndef DEAL_II_WITH_MPI
2186 return objects_to_send[0];
2193 (my_rank != root_process && objects_to_send.empty()) ||
2194 objects_to_send.size() == n_procs,
2196 "The number of objects to be scattered must correspond to the number processes."));
2198 std::vector<char> send_buffer;
2199 std::vector<int> send_counts;
2200 std::vector<int> send_displacements;
2202 if (my_rank == root_process)
2204 send_counts.resize(n_procs, 0);
2205 send_displacements.resize(n_procs + 1, 0);
2207 for (
unsigned int i = 0; i < n_procs; ++i)
2210 send_buffer.insert(send_buffer.end(),
2211 packed_data.begin(),
2213 send_counts[i] = packed_data.size();
2216 for (
unsigned int i = 0; i < n_procs; ++i)
2217 send_displacements[i + 1] = send_displacements[i] + send_counts[i];
2221 int ierr = MPI_Scatter(send_counts.data(),
2231 std::vector<char> recv_buffer(n_local_data);
2233 ierr = MPI_Scatterv(send_buffer.data(),
2235 send_displacements.data(),
2244 return Utilities::unpack<T>(recv_buffer);
2249 template <
typename T>
2253 const unsigned int root,
2256# ifndef DEAL_II_WITH_MPI
2267 const size_t max_send_count = std::numeric_limits<signed int>::max();
2269 size_t total_sent_count = 0;
2270 while (total_sent_count < count)
2272 const size_t current_count =
2273 std::min(count - total_sent_count, max_send_count);
2275 const int ierr = MPI_Bcast(buffer + total_sent_count,
2277 mpi_type_id_for_type<
decltype(*buffer)>,
2281 total_sent_count += current_count;
2288 template <
typename T>
2289 std::enable_if_t<is_mpi_type<T> ==
false, T>
2291 const T &object_to_send,
2292 const unsigned int root_process)
2294# ifndef DEAL_II_WITH_MPI
2297 return object_to_send;
2303 std::vector<char> buffer;
2311 buffer_size = buffer.size();
2315 int ierr = MPI_Bcast(&buffer_size,
2317 mpi_type_id_for_type<
decltype(buffer_size)>,
2325 buffer.resize(buffer_size);
2330 return object_to_send;
2332 return Utilities::unpack<T>(buffer,
false);
2338 template <
typename T>
2339 std::enable_if_t<is_mpi_type<T> ==
true, T>
2341 const T &object_to_send,
2342 const unsigned int root_process)
2344# ifndef DEAL_II_WITH_MPI
2347 return object_to_send;
2350 T
object = object_to_send;
2352 MPI_Bcast(&
object, 1, mpi_type_id_for_type<T>, root_process,
comm);
2360 template <
typename T>
2362 isend(
const T &
object,
2364 const unsigned int target_rank,
2365 const unsigned int mpi_tag)
2367# ifndef DEAL_II_WITH_MPI
2373 return Future<void>([]() {}, []() {});
2388 std::shared_ptr<std::vector<char>> send_buffer =
2393 MPI_Request request;
2395 MPI_Isend(send_buffer->data(),
2396 send_buffer->size(),
2397 mpi_type_id_for_type<
decltype(*send_buffer->data())>,
2417 auto wait = [request]()
mutable {
2418 const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
2421 auto cleanup = [send_buffer = std::move(send_buffer)]() {
2422 send_buffer->clear();
2424 return Future<void>(wait, cleanup);
2430 template <
typename T>
2433 const unsigned int source_rank,
2434 const unsigned int mpi_tag)
2436# ifndef DEAL_II_WITH_MPI
2441 return Future<void>([]() {}, []() {
return T{}; });
2457 std::shared_ptr<MPI_Message> message = std::make_shared<MPI_Message>();
2458 std::shared_ptr<MPI_Status> status = std::make_shared<MPI_Status>();
2460 auto wait = [source_rank, mpi_tag, communicator, message, status]() {
2461 const int ierr = MPI_Mprobe(
2462 source_rank, mpi_tag, communicator, message.get(), status.get());
2468 auto get = [status, message]() {
2471 ierr = MPI_Get_count(status.get(), MPI_CHAR, &number_amount);
2474 std::vector<char> receive_buffer(number_amount);
2478 ierr = MPI_Mrecv(receive_buffer.data(),
2480 mpi_type_id_for_type<
decltype(*receive_buffer.data())>,
2486 return Utilities::unpack<T>(receive_buffer,
false);
2489 return Future<T>(wait, get);
2495# ifdef DEAL_II_WITH_MPI
2496 template <
class Iterator,
typename Number>
2497 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
2508 const Number
sum = std::accumulate(begin, end, Number(0.));
2515 std::for_each(begin, end, [&mean, &sq_sum](
const Number &v) {
2519 return std::make_pair(mean,
2520 std::sqrt(sq_sum /
static_cast<Std
>(size - 1)));
void sum(const SparseMatrix< Number > &local, const MPI_Comm mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm comm)
void lock(const MPI_Comm comm)
void unlock(const MPI_Comm comm)
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
~DuplicatedCommunicator()
MPI_Comm operator*() const
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm communicator)
Future(const Future &)=delete
Future(Future &&) noexcept=default
std::function< void()> wait_function
std::function< T()> get_and_cleanup_function
Future(W &&wait_operation, G &&get_and_cleanup_operation)
static std::set< MPI_Request * > requests
#define DEAL_II_DEPRECATED
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
MPI_Datatype mpi_type_id(const bool *)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
std::enable_if_t< is_mpi_type< T >==false, T > broadcast(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
std::pair< T, T > partial_and_total_sum(const T &value, const MPI_Comm comm)
T sum(const T &t, const MPI_Comm mpi_communicator)
T logical_or(const T &t, const MPI_Comm mpi_communicator)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Future< T > irecv(MPI_Comm communicator, const unsigned int source_rank, const unsigned int mpi_tag=0)
std::map< unsigned int, T > some_to_some(const MPI_Comm comm, const std::map< unsigned int, T > &objects_to_send)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm comm)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
std::vector< T > gather(const MPI_Comm comm, const T &object_to_send, const unsigned int root_process=0)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
constexpr bool is_mpi_type
T scatter(const MPI_Comm comm, const std::vector< T > &objects_to_send, const unsigned int root_process=0)
Future< void > isend(const T &object, MPI_Comm communicator, const unsigned int target_rank, const unsigned int mpi_tag=0)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
static const unsigned int invalid_unsigned_int
const types::global_dof_index invalid_size_type
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize
static constexpr real_type abs_square(const number &x)
void gather(VectorizedArray< Number, width > &out, const std::array< Number *, width > &ptrs, const unsigned int offset)