20 #include <deal.II/base/mpi.templates.h>
22 #include <deal.II/base/mpi_consensus_algorithms.templates.h>
31 #include <boost/serialization/utility.hpp>
38 #ifdef DEAL_II_WITH_TRILINOS
39 # ifdef DEAL_II_WITH_MPI
43 # include <Epetra_MpiComm.h>
47 #ifdef DEAL_II_WITH_PETSC
51 # include <petscsys.h>
54 #ifdef DEAL_II_WITH_SLEPC
57 # include <slepcsys.h>
60 #ifdef DEAL_II_WITH_P4EST
61 # include <p4est_bits.h>
64 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
65 # include <zoltan_cpp.h>
75 const unsigned int n_partitions,
78 const unsigned int remain = total_size % n_partitions;
83 min_size * my_partition_id +
std::min(my_partition_id, remain);
85 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
93 #ifdef DEAL_II_WITH_MPI
108 template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
109 template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
114 min_max_avg(
const double my_value,
const MPI_Comm &mpi_communicator)
126 std::vector<MinMaxAvg>
128 const MPI_Comm & mpi_communicator)
130 std::vector<MinMaxAvg> results(my_values.size());
138 #ifdef DEAL_II_WITH_MPI
143 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
154 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
162 const std::vector<unsigned int>
164 const MPI_Comm &comm_small)
167 return std::vector<unsigned int>{0};
172 std::vector<unsigned int> ranks(size);
173 const int ierr = MPI_Allgather(
174 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
185 MPI_Comm new_communicator;
186 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
188 return new_communicator;
197 const int ierr = MPI_Comm_free(&mpi_communicator);
205 const MPI_Group &group,
209 const int ierr = MPI_Comm_create_group(
comm, group, tag, new_comm);
216 std::vector<IndexSet>
221 const std::vector<IndexSet::size_type> sizes =
223 const auto total_size =
226 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
229 for (
unsigned int i = 0; i < n_proc; ++i)
254 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
266 const MPI_Count n_chunks = n_bytes / max_signed_int;
267 const MPI_Count n_bytes_remainder = n_bytes % max_signed_int;
269 Assert(
static_cast<std::size_t
>(max_signed_int * n_chunks +
270 n_bytes_remainder) == n_bytes,
275 int ierr = MPI_Type_vector(
276 n_chunks, max_signed_int, max_signed_int, MPI_BYTE, &chunks);
279 MPI_Datatype remainder;
280 ierr = MPI_Type_contiguous(n_bytes_remainder, MPI_BYTE, &remainder);
283 const int blocklengths[2] = {1, 1};
284 const MPI_Aint displacements[2] = {0,
285 static_cast<MPI_Aint
>(n_chunks) *
292 displacements[1] == n_chunks * max_signed_int,
294 "Error in create_mpi_data_type_n_bytes(): the size is too big to support."));
298 const MPI_Datatype
types[2] = {chunks, remainder};
300 MPI_Type_create_struct(2, blocklengths, displacements,
types, &result);
303 ierr = MPI_Type_commit(&result);
306 ierr = MPI_Type_free(&chunks);
308 ierr = MPI_Type_free(&remainder);
313 ierr = MPI_Type_size_x(result, &size64);
328 auto deleter = [](MPI_Datatype *p) {
331 const int ierr = MPI_Type_free(p);
339 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
340 new MPI_Datatype(result), deleter);
345 std::vector<unsigned int>
347 const MPI_Comm & mpi_comm,
348 const std::vector<unsigned int> &destinations)
355 for (
const unsigned int destination : destinations)
368 const bool my_destinations_are_unique = [destinations]() {
369 if (destinations.size() == 0)
373 std::vector<unsigned int> my_destinations = destinations;
374 std::sort(my_destinations.begin(), my_destinations.end());
375 return (std::adjacent_find(my_destinations.begin(),
376 my_destinations.end()) ==
377 my_destinations.end());
387 return ConsensusAlgorithms::nbx<char, char>(
388 destinations, {}, {}, {}, mpi_comm);
402 std::vector<unsigned int> dest_vector(n_procs);
403 for (
const auto &el : destinations)
409 unsigned int n_recv_from;
410 const int ierr = MPI_Reduce_scatter_block(
411 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
416 std::vector<MPI_Request> send_requests(destinations.size());
417 for (
const auto &el : destinations)
426 send_requests.data() + (&el - destinations.data()));
434 std::vector<unsigned int> origins(n_recv_from);
435 for (
auto &el : origins)
437 const int ierr = MPI_Recv(&el,
447 if (destinations.size() > 0)
449 const int ierr = MPI_Waitall(destinations.size(),
450 send_requests.data(),
451 MPI_STATUSES_IGNORE);
462 const MPI_Comm & mpi_comm,
463 const std::vector<unsigned int> &destinations)
467 const bool my_destinations_are_unique = [destinations]() {
468 std::vector<unsigned int> my_destinations = destinations;
469 const unsigned int n_destinations = my_destinations.size();
470 std::sort(my_destinations.begin(), my_destinations.end());
471 my_destinations.erase(std::unique(my_destinations.begin(),
472 my_destinations.end()),
473 my_destinations.end());
474 return (my_destinations.size() == n_destinations);
483 return ConsensusAlgorithms::nbx<char, char>(
484 destinations, {}, {}, {}, mpi_comm)
489 const unsigned int n_procs =
492 for (
const unsigned int destination : destinations)
498 "There is no point in communicating with ourselves."));
502 std::vector<unsigned int> dest_vector(n_procs);
503 for (
const auto &el : destinations)
508 unsigned int n_recv_from = 0;
510 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
529 max_reduce(
const void *in_lhs_,
534 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
535 MinMaxAvg * inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
537 for (
int i = 0; i < *len; ++i)
539 inout_rhs[i].sum += in_lhs[i].sum;
540 if (inout_rhs[i].
min > in_lhs[i].
min)
542 inout_rhs[i].min = in_lhs[i].min;
543 inout_rhs[i].min_index = in_lhs[i].min_index;
545 else if (inout_rhs[i].
min == in_lhs[i].
min)
548 if (inout_rhs[i].min_index > in_lhs[i].min_index)
549 inout_rhs[i].min_index = in_lhs[i].min_index;
552 if (inout_rhs[i].
max < in_lhs[i].
max)
554 inout_rhs[i].max = in_lhs[i].max;
555 inout_rhs[i].max_index = in_lhs[i].max_index;
557 else if (inout_rhs[i].
max == in_lhs[i].
max)
560 if (inout_rhs[i].max_index > in_lhs[i].max_index)
561 inout_rhs[i].max_index = in_lhs[i].max_index;
572 const MPI_Comm & mpi_communicator)
579 for (
unsigned int i = 0; i < my_values.
size(); ++i)
581 result[i].sum = my_values[i];
582 result[i].avg = my_values[i];
583 result[i].min = my_values[i];
584 result[i].max = my_values[i];
585 result[i].min_index = 0;
586 result[i].max_index = 0;
596 static MPI_Datatype type = []() {
599 int lengths[] = {3, 2, 1};
601 MPI_Aint displacements[] = {0,
605 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
608 MPI_Type_create_struct(3, lengths, displacements,
types, &type);
611 ierr = MPI_Type_commit(&type);
617 int ierr = MPI_Type_free(&type);
629 static MPI_Op op = []() {
633 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
634 static_cast<int>(
true),
641 int ierr = MPI_Op_free(&op);
657 std::numeric_limits<double>::lowest(),
662 for (
auto &i : result)
665 const unsigned int my_id =
667 const unsigned int numproc =
670 std::vector<MinMaxAvg> in(my_values.
size());
672 for (
unsigned int i = 0; i < my_values.
size(); ++i)
674 in[i].sum = in[i].min = in[i].max = my_values[i];
675 in[i].min_index = in[i].max_index = my_id;
678 int ierr = MPI_Allreduce(
679 in.data(), result.
data(), my_values.
size(), type, op, mpi_communicator);
682 for (
auto &r : result)
683 r.avg = r.sum / numproc;
705 const std::vector<unsigned int>
708 return std::vector<unsigned int>{0};
713 std::vector<IndexSet>
732 return mpi_communicator;
750 for (
unsigned int i = 0; i < my_values.
size(); ++i)
752 result[i].sum = my_values[i];
753 result[i].avg = my_values[i];
754 result[i].min = my_values[i];
755 result[i].max = my_values[i];
756 result[i].min_index = 0;
757 result[i].max_index = 0;
765 MPI_InitFinalize::Signals();
770 const unsigned int max_num_threads)
772 static bool constructor_has_already_run =
false;
773 (void)constructor_has_already_run;
774 Assert(constructor_has_already_run ==
false,
775 ExcMessage(
"You can only create a single object of this class "
776 "in a program since it initializes the MPI system."));
780 #ifdef DEAL_II_WITH_MPI
783 int MPI_has_been_started = 0;
784 ierr = MPI_Initialized(&MPI_has_been_started);
787 ExcMessage(
"MPI error. You can only start MPI once!"));
794 int wanted = MPI_THREAD_SERIALIZED;
795 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
812 #ifdef DEAL_II_WITH_PETSC
813 # ifdef DEAL_II_WITH_SLEPC
815 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
819 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
825 PetscPopSignalHandler();
829 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
831 Zoltan_Initialize(argc, argv, &version);
834 #ifdef DEAL_II_WITH_P4EST
836 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
841 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
843 p4est_init(
nullptr, SC_LP_SILENT);
846 constructor_has_already_run =
true;
860 #ifdef DEAL_II_WITH_MPI
869 const unsigned int max_hostname_size =
871 std::vector<char> hostname_array(max_hostname_size);
873 hostname.c_str() + hostname.size() + 1,
874 hostname_array.begin());
876 std::vector<char> all_hostnames(max_hostname_size *
878 const int ierr = MPI_Allgather(hostname_array.data(),
881 all_hostnames.data(),
889 unsigned int n_local_processes = 0;
890 unsigned int nth_process_on_host = 0;
893 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
898 ++nth_process_on_host;
909 const unsigned int n_threads =
911 (nth_process_on_host <=
945 "You tried to call unregister_request() with an invalid request."));
966 #ifdef DEAL_II_WITH_MPI
970 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
978 release_unused_memory();
982 release_unused_memory();
985 # ifdef DEAL_II_WITH_TRILINOS
996 #ifdef DEAL_II_WITH_PETSC
997 if ((PetscInitializeCalled == PETSC_TRUE) &&
998 (PetscFinalizeCalled == PETSC_FALSE))
1005 # ifdef DEAL_II_WITH_SLEPC
1018 #ifdef DEAL_II_WITH_CUDA
1021 release_unused_memory();
1024 release_unused_memory();
1027 #ifdef DEAL_II_WITH_P4EST
1037 #ifdef DEAL_II_WITH_MPI
1040 # if __cpp_lib_uncaught_exceptions >= 201411
1042 if (std::uncaught_exceptions() > 0)
1044 if (std::uncaught_exception() ==
true)
1064 #ifdef DEAL_II_WITH_MPI
1065 int MPI_has_been_started = 0;
1066 const int ierr = MPI_Initialized(&MPI_has_been_started);
1069 return (MPI_has_been_started > 0);
1077 std::vector<unsigned int>
1079 const IndexSet &indices_to_look_up,
1080 const MPI_Comm &
comm)
1083 ExcMessage(
"IndexSets have to have the same sizes."));
1087 ExcMessage(
"IndexSets have to have the same size on all processes."));
1089 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1097 owned_indices, indices_to_look_up,
comm, owning_ranks);
1105 std::pair<types::global_dof_index, types::global_dof_index>>,
1106 std::vector<unsigned int>>
1107 consensus_algorithm(process,
comm);
1108 consensus_algorithm.run();
1110 return owning_ranks;
1117 , request(MPI_REQUEST_NULL)
1129 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1144 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1146 #ifdef DEAL_II_WITH_MPI
1152 const int ierr = MPI_Barrier(
comm);
1158 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1178 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1180 #ifdef DEAL_II_WITH_MPI
1189 const int ierr = MPI_Barrier(
comm);
1201 logical_or<bool>(
const bool &,
const MPI_Comm &);
1210 template std::vector<unsigned int>
1212 const MPI_Comm &
comm);
1215 template std::set<unsigned int>
value_type * data() const noexcept
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
types::global_dof_index size_type
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
void lock(const MPI_Comm &comm)
static void unregister_request(MPI_Request &request)
static std::set< MPI_Request * > requests
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static void register_request(MPI_Request &request)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
template const MPI_Datatype mpi_type_id_for_type< int >
void free_communicator(MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
template const MPI_Datatype mpi_type_id_for_type< signed char >
template const MPI_Datatype mpi_type_id_for_type< bool >
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
template const MPI_Datatype mpi_type_id_for_type< short >
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
template const MPI_Datatype mpi_type_id_for_type< double >
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
template const MPI_Datatype mpi_type_id_for_type< char >
template const MPI_Datatype mpi_type_id_for_type< long double >
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
T min(const T &t, const MPI_Comm &mpi_communicator)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< long int >
T max(const T &t, const MPI_Comm &mpi_communicator)
template const MPI_Datatype mpi_type_id_for_type< float >
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
std::string get_hostname()
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
****code ** MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize