26#include <boost/signals2.hpp>
33#if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
38using MPI_Request =
int;
39using MPI_Datatype =
int;
41# ifndef MPI_COMM_WORLD
42# define MPI_COMM_WORLD 0
45# define MPI_COMM_SELF 0
47# ifndef MPI_REQUEST_NULL
48# define MPI_REQUEST_NULL 0
79#ifdef DEAL_II_WITH_MPI
80# if DEAL_II_MPI_VERSION_GTE(3, 0)
82# define DEAL_II_MPI_CONST_CAST(expr) (expr)
86# include <type_traits>
88# define DEAL_II_MPI_CONST_CAST(expr) \
89 const_cast<typename std::remove_const< \
90 typename std::remove_pointer<decltype(expr)>::type>::type *>(expr)
102template <
int rank,
int dim,
typename Number>
104template <
int rank,
int dim,
typename Number>
106template <
typename Number>
127 const unsigned int n_partitions,
165 const std::vector<unsigned int>
190 std::vector<unsigned int>
193 const std::vector<unsigned int> &destinations);
217 const std::vector<unsigned int> &destinations);
445#ifdef DEAL_II_WITH_MPI
448 const MPI_Group &group,
461 std::vector<IndexSet>
477#ifdef DEAL_II_WITH_MPI
493 template <
class Iterator,
typename Number =
long double>
494 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
519 template <
typename T>
532 template <
typename T,
typename U>
545 template <
typename T>
556 template <
int rank,
int dim,
typename Number>
566 template <
int rank,
int dim,
typename Number>
579 template <
typename Number>
604 template <
typename T>
617 template <
typename T,
typename U>
630 template <
typename T>
655 template <
typename T>
668 template <
typename T,
typename U>
681 template <
typename T>
710 template <
typename T>
728 template <
typename T,
typename U>
741 template <
typename T>
836 std::vector<MinMaxAvg>
1060 template <
typename T>
1061 std::map<unsigned int, T>
1063 const std::map<unsigned int, T> &objects_to_send);
1078 template <
typename T>
1097 template <
typename T>
1100 const T & object_to_send,
1101 const unsigned int root_process = 0);
1121 template <
typename T>
1124 const T & object_to_send,
1125 const unsigned int root_process = 0);
1136 template <
typename T>
1140 const std::function<
T(
const T &,
const T &)> &combiner);
1184 std::vector<unsigned int>
1186 const IndexSet &indices_to_look_up,
1196 template <
typename T>
1203 template <
typename T>
1211 template <
typename T>
1220 template <
typename T,
unsigned int N>
1230 template <
typename T,
unsigned int N>
1240 template <
typename T,
unsigned int N>
1250 template <
typename T,
unsigned int N>
1256 static_assert(std::is_integral<T>::value,
1257 "The MPI_LOR operation only allows integral data types.");
1265 template <
typename T>
1266 std::map<unsigned int, T>
1268 const std::map<unsigned int, T> &objects_to_send)
1270# ifndef DEAL_II_WITH_MPI
1272 Assert(objects_to_send.size() < 2,
1273 ExcMessage(
"Cannot send to more than one processor."));
1274 Assert(objects_to_send.find(0) != objects_to_send.end() ||
1275 objects_to_send.size() == 0,
1276 ExcMessage(
"Can only send to myself or to nobody."));
1277 return objects_to_send;
1281 std::map<unsigned int, T> received_objects;
1283 std::vector<unsigned int> send_to;
1284 send_to.reserve(objects_to_send.size());
1285 for (
const auto &m : objects_to_send)
1286 if (m.first == my_proc)
1287 received_objects[my_proc] = m.second;
1289 send_to.emplace_back(m.first);
1291 const unsigned int n_point_point_communications =
1295 static CollectiveMutex mutex;
1296 CollectiveMutex::ScopedLock lock(mutex,
comm);
1302 if (send_to.size() == 0 && n_point_point_communications == 0)
1303 return received_objects;
1309 std::vector<std::vector<char>> buffers_to_send(send_to.size());
1310 std::vector<MPI_Request> buffer_send_requests(send_to.size());
1313 for (
const auto &rank_obj : objects_to_send)
1314 if (rank_obj.first != my_proc)
1316 const auto &rank = rank_obj.first;
1319 const int ierr = MPI_Isend(buffers_to_send[i].data(),
1320 buffers_to_send[i].size(),
1325 &buffer_send_requests[i]);
1333 std::vector<char> buffer;
1335 for (
unsigned int i = 0; i < n_point_point_communications; ++i)
1339 int ierr = MPI_Probe(MPI_ANY_SOURCE, mpi_tag,
comm, &status);
1344 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
1349 const unsigned int rank = status.MPI_SOURCE;
1352 ierr = MPI_Recv(buffer.data(),
1360 Assert(received_objects.find(rank) == received_objects.end(),
1362 "I should not receive again from this rank"));
1363 received_objects[rank] =
1364 Utilities::unpack<T>(buffer,
1370 const int ierr = MPI_Waitall(send_to.size(),
1371 buffer_send_requests.data(),
1372 MPI_STATUSES_IGNORE);
1375 return received_objects;
1379 template <
typename T>
1386# ifndef DEAL_II_WITH_MPI
1388 std::vector<T> v(1,
object);
1395 int n_local_data = buffer.size();
1398 std::vector<int> size_all_data(n_procs, 0);
1402 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT,
comm);
1406 std::vector<int> rdispls(n_procs);
1408 for (
unsigned int i = 1; i < n_procs; ++i)
1409 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1412 std::vector<char> received_unrolled_buffer(rdispls.back() +
1413 size_all_data.back());
1415 MPI_Allgatherv(buffer.data(),
1418 received_unrolled_buffer.data(),
1419 size_all_data.data(),
1424 std::vector<T> received_objects(n_procs);
1425 for (
unsigned int i = 0; i < n_procs; ++i)
1427 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
1429 received_unrolled_buffer.begin() +
1430 rdispls[i] + size_all_data[i]);
1431 received_objects[i] = Utilities::unpack<T>(local_buffer);
1434 return received_objects;
1438 template <
typename T>
1441 const T & object_to_send,
1442 const unsigned int root_process)
1444# ifndef DEAL_II_WITH_MPI
1447 std::vector<T> v(1, object_to_send);
1456 int n_local_data = buffer.size();
1460 std::vector<int> size_all_data;
1461 if (my_rank == root_process)
1462 size_all_data.resize(n_procs, 0);
1465 int ierr = MPI_Gather(&n_local_data,
1468 size_all_data.data(),
1477 std::vector<int> rdispls;
1478 if (my_rank == root_process)
1480 rdispls.resize(n_procs, 0);
1481 for (
unsigned int i = 1; i < n_procs; ++i)
1482 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
1485 std::vector<char> received_unrolled_buffer;
1486 if (my_rank == root_process)
1487 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
1489 ierr = MPI_Gatherv(buffer.data(),
1492 received_unrolled_buffer.data(),
1493 size_all_data.data(),
1500 std::vector<T> received_objects;
1502 if (my_rank == root_process)
1504 received_objects.resize(n_procs);
1506 for (
unsigned int i = 0; i < n_procs; ++i)
1508 const std::vector<char> local_buffer(
1509 received_unrolled_buffer.begin() + rdispls[i],
1510 received_unrolled_buffer.begin() + rdispls[i] +
1512 received_objects[i] = Utilities::unpack<T>(local_buffer);
1515 return received_objects;
1521 template <
typename T>
1524 const T & object_to_send,
1525 const unsigned int root_process)
1527# ifndef DEAL_II_WITH_MPI
1530 return object_to_send;
1536 std::vector<char> buffer;
1544 buffer_size = buffer.size();
1548 int ierr = MPI_Bcast(&buffer_size, 1, MPI_UNSIGNED, root_process,
comm);
1554 buffer.resize(buffer_size);
1557 MPI_Bcast(buffer.data(), buffer_size, MPI_CHAR, root_process,
comm);
1561 return object_to_send;
1563 return Utilities::unpack<T>(buffer,
false);
1568# ifdef DEAL_II_WITH_MPI
1569 template <
class Iterator,
typename Number>
1570 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
1581 const Number
sum = std::accumulate(
begin,
end, Number(0.));
1588 std::for_each(
begin,
end, [&
mean, &sq_sum](
const Number &v) {
1592 return std::make_pair(
mean,
1593 std::sqrt(sq_sum /
static_cast<Std
>(size - 1)));
types::global_dof_index size_type
void sum(const SparseMatrix< Number > &local, const MPI_Comm &mpi_communicator, SparseMatrix< Number > &global)
SymmetricTensor< rank, dim, Number > sum(const SymmetricTensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
Tensor< rank, dim, Number > sum(const Tensor< rank, dim, Number > &local, const MPI_Comm &mpi_communicator)
ScopedLock(CollectiveMutex &mutex, const MPI_Comm &comm)
void unlock(const MPI_Comm &comm)
void lock(const MPI_Comm &comm)
DuplicatedCommunicator(const DuplicatedCommunicator &)=delete
DuplicatedCommunicator(const MPI_Comm &communicator)
~DuplicatedCommunicator()
const MPI_Comm & operator*() const
DuplicatedCommunicator & operator=(const DuplicatedCommunicator &)=delete
static void unregister_request(MPI_Request &request)
static std::set< MPI_Request * > requests
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static void register_request(MPI_Request &request)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void set(Number *val, const Number s, const size_type N)
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcDivideByZero()
static ::ExceptionBase & ExcMessage(std::string arg1)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
T broadcast(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
void free_communicator(MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
T logical_or(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T all_reduce(const T &local_value, const MPI_Comm &comm, const std::function< T(const T &, const T &)> &combiner)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T min(const T &t, const MPI_Comm &mpi_communicator)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
T sum(const T &t, const MPI_Comm &mpi_communicator)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)