19 #include <deal.II/base/config.h> 21 #include <deal.II/base/array_view.h> 22 #include <deal.II/base/numbers.h> 28 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC) 33 using MPI_Datatype = int;
35 # ifndef MPI_COMM_WORLD 36 # define MPI_COMM_WORLD 0 38 # ifndef MPI_COMM_SELF 39 # define MPI_COMM_SELF 0 67 #ifdef DEAL_II_WITH_MPI 68 # if DEAL_II_MPI_VERSION_GTE(3, 0) 70 # define DEAL_II_MPI_CONST_CAST(expr) (expr) 74 # include <type_traits> 76 # define DEAL_II_MPI_CONST_CAST(expr) \ 77 const_cast<typename std::remove_const< \ 78 typename std::remove_pointer<decltype(expr)>::type>::type *>(expr) 85 DEAL_II_NAMESPACE_OPEN
89 template <
int rank,
int dim,
typename Number>
91 template <
int rank,
int dim,
typename Number>
93 template <
typename Number>
151 std::vector<unsigned int>
153 const MPI_Comm & mpi_comm,
154 const std::vector<unsigned int> &destinations);
177 const MPI_Comm & mpi_comm,
178 const std::vector<unsigned int> &destinations);
223 #ifdef DEAL_II_WITH_MPI 226 const MPI_Group &group,
228 MPI_Comm * new_comm);
239 std::vector<IndexSet>
243 #ifdef DEAL_II_WITH_MPI 259 template <
class Iterator,
typename Number =
long double>
260 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
263 const MPI_Comm &comm);
285 template <
typename T>
287 sum(
const T &t,
const MPI_Comm &mpi_communicator);
298 template <
typename T,
typename U>
300 sum(
const T &values,
const MPI_Comm &mpi_communicator, U &sums);
311 template <
typename T>
314 const MPI_Comm & mpi_communicator,
322 template <
int rank,
int dim,
typename Number>
325 const MPI_Comm & mpi_communicator);
332 template <
int rank,
int dim,
typename Number>
335 const MPI_Comm & mpi_communicator);
345 template <
typename Number>
348 const MPI_Comm & mpi_communicator,
370 template <
typename T>
372 max(
const T &t,
const MPI_Comm &mpi_communicator);
383 template <
typename T,
typename U>
385 max(
const T &values,
const MPI_Comm &mpi_communicator, U &maxima);
396 template <
typename T>
399 const MPI_Comm & mpi_communicator,
421 template <
typename T>
423 min(
const T &t,
const MPI_Comm &mpi_communicator);
434 template <
typename T,
typename U>
436 min(
const T &values,
const MPI_Comm &mpi_communicator, U &minima);
447 template <
typename T>
450 const MPI_Comm & mpi_communicator,
529 min_max_avg(
const double my_value,
const MPI_Comm &mpi_communicator);
664 template <
typename T>
665 std::map<unsigned int, T>
667 const std::map<unsigned int, T> &objects_to_send);
684 template <
typename T>
686 all_gather(
const MPI_Comm &comm,
const T &object_to_send);
705 template <
typename T>
707 gather(
const MPI_Comm & comm,
708 const T & object_to_send,
709 const unsigned int root_process = 0);
715 template <
typename T>
717 all_reduce(
const MPI_Op & mpi_op,
719 const MPI_Comm & mpi_communicator,
724 template <
typename T,
unsigned int N>
726 sum(
const T (&values)[N],
const MPI_Comm &mpi_communicator, T (&sums)[N])
728 internal::all_reduce(MPI_SUM,
734 template <
typename T,
unsigned int N>
736 max(
const T (&values)[N],
const MPI_Comm &mpi_communicator, T (&maxima)[N])
738 internal::all_reduce(MPI_MAX,
744 template <
typename T,
unsigned int N>
746 min(
const T (&values)[N],
const MPI_Comm &mpi_communicator, T (&minima)[N])
748 internal::all_reduce(MPI_MIN,
754 template <
typename T>
755 std::map<unsigned int, T>
757 const std::map<unsigned int, T> &objects_to_send)
759 # ifndef DEAL_II_WITH_MPI 761 Assert(objects_to_send.size() == 0,
762 ExcMessage(
"Cannot send to more than one processor."));
763 Assert(objects_to_send.find(0) != objects_to_send.end() ||
764 objects_to_send.size() == 0,
765 ExcMessage(
"Can only send to myself or to nobody."));
766 return objects_to_send;
769 std::vector<unsigned int> send_to(objects_to_send.size());
772 for (
const auto &m : objects_to_send)
773 send_to[i++] = m.first;
777 const auto receive_from =
782 std::vector<std::vector<char>> buffers_to_send(send_to.size());
783 std::vector<MPI_Request> buffer_send_requests(send_to.size());
786 for (
const auto &rank_obj : objects_to_send)
788 const auto &rank = rank_obj.first;
790 const int ierr = MPI_Isend(buffers_to_send[i].data(),
791 buffers_to_send[i].size(),
796 &buffer_send_requests[i]);
803 std::map<unsigned int, T> received_objects;
805 std::vector<char> buffer;
807 for (
unsigned int i = 0; i < receive_from.size(); ++i)
811 int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
816 ierr = MPI_Get_count(&status, MPI_CHAR, &len);
821 const unsigned int rank = status.MPI_SOURCE;
825 buffer.data(), len, MPI_CHAR, rank, 21, comm, MPI_STATUS_IGNORE);
827 Assert(received_objects.find(rank) == received_objects.end(),
829 "I should not receive again from this rank"));
830 received_objects[rank] = Utilities::unpack<T>(buffer);
835 MPI_Waitall(send_to.size(),
836 buffer_send_requests.data(),
837 MPI_STATUSES_IGNORE);
839 return received_objects;
840 # endif // deal.II with MPI 843 template <
typename T>
845 all_gather(
const MPI_Comm &comm,
const T &
object)
847 # ifndef DEAL_II_WITH_MPI 849 std::vector<T> v(1,
object);
852 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
856 int n_local_data = buffer.size();
859 std::vector<int> size_all_data(n_procs, 0);
863 &n_local_data, 1, MPI_INT, size_all_data.data(), 1, MPI_INT, comm);
867 std::vector<int> rdispls(n_procs);
869 for (
unsigned int i = 1; i < n_procs; ++i)
870 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
873 std::vector<char> received_unrolled_buffer(rdispls.back() +
874 size_all_data.back());
876 MPI_Allgatherv(buffer.data(),
879 received_unrolled_buffer.data(),
880 size_all_data.data(),
885 std::vector<T> received_objects(n_procs);
886 for (
unsigned int i = 0; i < n_procs; ++i)
888 std::vector<char> local_buffer(received_unrolled_buffer.begin() +
890 received_unrolled_buffer.begin() +
891 rdispls[i] + size_all_data[i]);
892 received_objects[i] = Utilities::unpack<T>(local_buffer);
895 return received_objects;
899 template <
typename T>
901 gather(
const MPI_Comm & comm,
902 const T & object_to_send,
903 const unsigned int root_process)
905 # ifndef DEAL_II_WITH_MPI 908 std::vector<T> v(1, object_to_send);
911 const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
912 const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
917 int n_local_data = buffer.size();
921 std::vector<int> size_all_data;
922 if (my_rank == root_process)
923 size_all_data.resize(n_procs, 0);
926 int ierr = MPI_Gather(&n_local_data,
929 size_all_data.data(),
938 std::vector<int> rdispls;
939 if (my_rank == root_process)
941 rdispls.resize(n_procs, 0);
942 for (
unsigned int i = 1; i < n_procs; ++i)
943 rdispls[i] = rdispls[i - 1] + size_all_data[i - 1];
946 std::vector<char> received_unrolled_buffer;
947 if (my_rank == root_process)
948 received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
950 ierr = MPI_Gatherv(buffer.data(),
953 received_unrolled_buffer.data(),
954 size_all_data.data(),
961 std::vector<T> received_objects;
963 if (my_rank == root_process)
965 received_objects.resize(n_procs);
967 for (
unsigned int i = 0; i < n_procs; ++i)
969 const std::vector<char> local_buffer(
970 received_unrolled_buffer.begin() + rdispls[i],
971 received_unrolled_buffer.begin() + rdispls[i] +
973 received_objects[i] = Utilities::unpack<T>(local_buffer);
976 return received_objects;
981 # ifdef DEAL_II_WITH_MPI 982 template <
class Iterator,
typename Number>
983 std::pair<Number, typename numbers::NumberTraits<Number>::real_type>
986 const MPI_Comm &comm)
994 const Number
sum = std::accumulate(begin, end, Number(0.));
1001 std::for_each(begin, end, [&mean, &sq_sum](
const Number &v) {
1005 return std::make_pair(mean,
1006 std::sqrt(sq_sum / static_cast<Std>(size - 1)));
1015 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
#define AssertDimension(dim1, dim2)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
static constexpr std::enable_if< std::is_same< Dummy, number >::value &&is_cuda_compatible< Dummy >::value, real_type >::type abs_square(const number &x)
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
types::global_dof_index size_type
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcDivideByZero()
std::pair< Number, typename numbers::NumberTraits< Number >::real_type > mean_and_standard_deviation(const Iterator begin, const Iterator end, const MPI_Comm &comm)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
#define AssertThrowMPI(error_code)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
T min(const T &t, const MPI_Comm &mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()