20#include <deal.II/base/mpi.templates.h>
33#include <boost/serialization/utility.hpp>
35#include <Kokkos_Core.hpp>
43#ifdef DEAL_II_WITH_TRILINOS
44# ifdef DEAL_II_WITH_MPI
48# include <Epetra_MpiComm.h>
52#ifdef DEAL_II_WITH_PETSC
59#ifdef DEAL_II_WITH_SLEPC
65#ifdef DEAL_II_WITH_P4EST
66# include <p4est_bits.h>
69#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
70# include <zoltan_cpp.h>
80 const unsigned int my_partition_id,
81 const unsigned int n_partitions,
85 std::is_same<types::global_dof_index, IndexSet::size_type>::value,
86 "IndexSet::size_type must match types::global_dof_index for "
87 "using this function");
88 const unsigned int remain = total_size % n_partitions;
93 min_size * my_partition_id +
std::min(my_partition_id, remain);
95 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
103#ifdef DEAL_II_WITH_MPI
118 template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
119 template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
136 std::vector<MinMaxAvg>
140 std::vector<MinMaxAvg> results(my_values.size());
148#ifdef DEAL_II_WITH_MPI
153 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
164 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
172 const std::vector<unsigned int>
177 return std::vector<unsigned int>{0};
182 std::vector<unsigned int> ranks(size);
183 const int ierr = MPI_Allgather(
184 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
196 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
198 return new_communicator;
207 const int ierr = MPI_Comm_free(&mpi_communicator);
215 const MPI_Group &group,
219 const int ierr = MPI_Comm_create_group(
comm, group, tag, new_comm);
226 std::vector<IndexSet>
232 std::is_same<types::global_dof_index, IndexSet::size_type>::value,
233 "IndexSet::size_type must match types::global_dof_index for "
234 "using this function");
236 const std::vector<IndexSet::size_type> sizes =
238 const auto total_size =
241 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
244 for (
unsigned int i = 0; i < n_proc; ++i)
246 res[i].add_range(begin, begin + sizes[i]);
247 begin = begin + sizes[i];
270 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
276 ierr = MPI_Type_commit(&result);
281 ierr = MPI_Type_size_x(result, &size64);
296 auto deleter = [](MPI_Datatype *p) {
299 const int ierr = MPI_Type_free(p);
307 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
308 new MPI_Datatype(result), deleter);
313 std::vector<unsigned int>
316 const std::vector<unsigned int> &destinations)
323 for (
const unsigned int destination : destinations)
336 const bool my_destinations_are_unique = [destinations]() {
337 if (destinations.size() == 0)
341 std::vector<unsigned int> my_destinations = destinations;
342 std::sort(my_destinations.begin(), my_destinations.end());
343 return (std::adjacent_find(my_destinations.begin(),
344 my_destinations.end()) ==
345 my_destinations.end());
355 return ConsensusAlgorithms::nbx<char, char>(
356 destinations, {}, {}, {}, mpi_comm);
370 std::vector<unsigned int> dest_vector(n_procs);
371 for (
const auto &el : destinations)
377 unsigned int n_recv_from;
378 const int ierr = MPI_Reduce_scatter_block(
379 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
384 std::vector<MPI_Request> send_requests(destinations.size());
385 for (
const auto &el : destinations)
394 send_requests.data() + (&el - destinations.data()));
402 std::vector<unsigned int> origins(n_recv_from);
403 for (
auto &el : origins)
405 const int ierr = MPI_Recv(&el,
415 if (destinations.size() > 0)
417 const int ierr = MPI_Waitall(destinations.size(),
418 send_requests.data(),
419 MPI_STATUSES_IGNORE);
431 const std::vector<unsigned int> &destinations)
435 const bool my_destinations_are_unique = [destinations]() {
436 std::vector<unsigned int> my_destinations = destinations;
437 const unsigned int n_destinations = my_destinations.size();
438 std::sort(my_destinations.begin(), my_destinations.end());
439 my_destinations.erase(std::unique(my_destinations.begin(),
440 my_destinations.end()),
441 my_destinations.end());
442 return (my_destinations.size() == n_destinations);
451 return ConsensusAlgorithms::nbx<char, char>(
452 destinations, {}, {}, {}, mpi_comm)
457 const unsigned int n_procs =
460 for (
const unsigned int destination : destinations)
466 "There is no point in communicating with ourselves."));
470 std::vector<unsigned int> dest_vector(n_procs);
471 for (
const auto &el : destinations)
476 unsigned int n_recv_from = 0;
478 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
497 max_reduce(
const void *in_lhs_,
502 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
503 MinMaxAvg * inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
505 for (
int i = 0; i < *len; ++i)
507 inout_rhs[i].sum += in_lhs[i].sum;
508 if (inout_rhs[i].min > in_lhs[i].min)
510 inout_rhs[i].min = in_lhs[i].min;
511 inout_rhs[i].min_index = in_lhs[i].min_index;
513 else if (inout_rhs[i].min == in_lhs[i].min)
516 if (inout_rhs[i].min_index > in_lhs[i].min_index)
517 inout_rhs[i].min_index = in_lhs[i].min_index;
520 if (inout_rhs[i].max < in_lhs[i].max)
522 inout_rhs[i].max = in_lhs[i].max;
523 inout_rhs[i].max_index = in_lhs[i].max_index;
525 else if (inout_rhs[i].max == in_lhs[i].max)
528 if (inout_rhs[i].max_index > in_lhs[i].max_index)
529 inout_rhs[i].max_index = in_lhs[i].max_index;
547 for (
unsigned int i = 0; i < my_values.
size(); ++i)
549 result[i].sum = my_values[i];
550 result[i].avg = my_values[i];
551 result[i].min = my_values[i];
552 result[i].max = my_values[i];
553 result[i].min_index = 0;
554 result[i].max_index = 0;
564 static MPI_Datatype type = []() {
567 int lengths[] = {3, 2, 1};
569 MPI_Aint displacements[] = {0,
573 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
576 MPI_Type_create_struct(3, lengths, displacements,
types, &type);
579 ierr = MPI_Type_commit(&type);
585 int ierr = MPI_Type_free(&type);
597 static MPI_Op op = []() {
601 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
602 static_cast<int>(
true),
609 int ierr = MPI_Op_free(&op);
624 std::numeric_limits<double>::max(),
625 std::numeric_limits<double>::lowest(),
630 for (
auto &i : result)
633 const unsigned int my_id =
635 const unsigned int numproc =
638 std::vector<MinMaxAvg> in(my_values.
size());
640 for (
unsigned int i = 0; i < my_values.
size(); ++i)
642 in[i].sum = in[i].min = in[i].max = my_values[i];
643 in[i].min_index = in[i].max_index = my_id;
646 int ierr = MPI_Allreduce(
647 in.data(), result.
data(), my_values.
size(), type, op, mpi_communicator);
650 for (
auto &r : result)
651 r.avg = r.sum / numproc;
673 const std::vector<unsigned int>
676 return std::vector<unsigned int>{0};
681 std::vector<IndexSet>
702 return mpi_communicator;
719 for (
unsigned int i = 0; i < my_values.
size(); ++i)
721 result[i].sum = my_values[i];
722 result[i].avg = my_values[i];
723 result[i].min = my_values[i];
724 result[i].max = my_values[i];
725 result[i].min_index = 0;
726 result[i].max_index = 0;
734 MPI_InitFinalize::Signals();
739 const unsigned int max_num_threads)
741 static bool constructor_has_already_run =
false;
742 (void)constructor_has_already_run;
743 Assert(constructor_has_already_run ==
false,
744 ExcMessage(
"You can only create a single object of this class "
745 "in a program since it initializes the MPI system."));
749#ifdef DEAL_II_WITH_MPI
752 int MPI_has_been_started = 0;
753 ierr = MPI_Initialized(&MPI_has_been_started);
756 ExcMessage(
"MPI error. You can only start MPI once!"));
763 int wanted = MPI_THREAD_SERIALIZED;
764 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
790 std::vector<char *> argv_new;
792 if (strcmp(arg,
"--help") != 0)
793 argv_new.push_back(arg);
795 std::stringstream threads_flag;
796#if KOKKOS_VERSION >= 30700
801 const std::string threads_flag_string = threads_flag.str();
802 argv_new.push_back(
const_cast<char *
>(threads_flag_string.c_str()));
803 argv_new.push_back(
nullptr);
808 int argc_new = argv_new.size() - 1;
809 Kokkos::initialize(argc_new, argv_new.data());
814#ifdef DEAL_II_WITH_PETSC
815# ifdef DEAL_II_WITH_SLEPC
818 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
823 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
829 PetscPopSignalHandler();
833#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
835 Zoltan_Initialize(argc, argv, &version);
838#ifdef DEAL_II_WITH_P4EST
840# if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
845 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
847 p4est_init(
nullptr, SC_LP_SILENT);
850 constructor_has_already_run =
true;
864#ifdef DEAL_II_WITH_MPI
873 const unsigned int max_hostname_size =
875 std::vector<char> hostname_array(max_hostname_size);
876 std::copy(hostname.c_str(),
877 hostname.c_str() + hostname.size() + 1,
878 hostname_array.begin());
880 std::vector<char> all_hostnames(max_hostname_size *
882 const int ierr = MPI_Allgather(hostname_array.data(),
885 all_hostnames.data(),
893 unsigned int n_local_processes = 0;
894 unsigned int nth_process_on_host = 0;
897 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
902 ++nth_process_on_host;
913 const unsigned int n_threads =
915 (nth_process_on_host <=
949 "You tried to call unregister_request() with an invalid request."));
970#ifdef DEAL_II_WITH_MPI
974 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
982 release_unused_memory();
986 release_unused_memory();
989# ifdef DEAL_II_WITH_TRILINOS
1000#ifdef DEAL_II_WITH_PETSC
1001 if (!PetscFinalizeCalled)
1008# ifdef DEAL_II_WITH_SLEPC
1019#ifdef DEAL_II_WITH_P4EST
1032#ifdef DEAL_II_WITH_MPI
1035# if __cpp_lib_uncaught_exceptions >= 201411
1037 if (std::uncaught_exceptions() > 0)
1039 if (std::uncaught_exception() ==
true)
1059#ifdef DEAL_II_WITH_MPI
1060 int MPI_has_been_started = 0;
1061 const int ierr = MPI_Initialized(&MPI_has_been_started);
1064 return (MPI_has_been_started > 0);
1072 std::vector<unsigned int>
1074 const IndexSet &indices_to_look_up,
1078 ExcMessage(
"IndexSets have to have the same sizes."));
1082 ExcMessage(
"IndexSets have to have the same size on all processes."));
1084 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1092 owned_indices, indices_to_look_up,
comm, owning_ranks);
1100 std::pair<types::global_dof_index, types::global_dof_index>>,
1101 std::vector<unsigned int>>
1102 consensus_algorithm;
1103 consensus_algorithm.
run(process,
comm);
1105 return owning_ranks;
1112 namespace CollectiveMutexImplementation
1121#ifdef DEAL_II_WITH_MPI
1122# if __cpp_lib_uncaught_exceptions >= 201411
1124 if (std::uncaught_exceptions() != 0)
1126 if (std::uncaught_exception() ==
true)
1130 <<
"---------------------------------------------------------\n"
1131 <<
"An exception was thrown inside a section of the program\n"
1132 <<
"guarded by a CollectiveMutex.\n"
1133 <<
"Because a CollectiveMutex guards critical communication\n"
1134 <<
"handling the exception would likely\n"
1135 <<
"deadlock because only the current process is aware of the\n"
1136 <<
"exception. To prevent this deadlock, the program will be\n"
1138 <<
"---------------------------------------------------------"
1141 MPI_Abort(MPI_COMM_WORLD, 1);
1152 , request(MPI_REQUEST_NULL)
1168 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1183 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1185#ifdef DEAL_II_WITH_MPI
1191 const int ierr = MPI_Barrier(
comm);
1197 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1221 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1223#ifdef DEAL_II_WITH_MPI
1232 const int ierr = MPI_Barrier(
comm);
1248 const std::function<
bool(
const bool &,
const bool &)> &,
1249 const unsigned int);
1251 template std::vector<bool>
1252 reduce(
const std::vector<bool> &,
1254 const std::function<std::vector<bool>(
const std::vector<bool> &,
1255 const std::vector<bool> &)> &,
1256 const unsigned int);
1261 const std::function<
bool(
const bool &,
const bool &)> &);
1263 template std::vector<bool>
1265 const std::vector<bool> &,
1267 const std::function<std::vector<bool>(
const std::vector<bool> &,
1268 const std::vector<bool> &)> &);
1273 internal::all_reduce<bool>(
const MPI_Op &,
1280 logical_or<bool>(
const bool &,
const MPI_Comm);
1289 template std::vector<unsigned int>
1294 template std::set<unsigned int>
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
value_type * data() const noexcept
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void lock(const MPI_Comm comm)
void unlock(const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
static void unregister_request(MPI_Request &request)
static std::set< MPI_Request * > requests
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static void register_request(MPI_Request &request)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
IndexSet complete_index_set(const IndexSet::size_type N)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
template const MPI_Datatype mpi_type_id_for_type< int >
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
template const MPI_Datatype mpi_type_id_for_type< signed char >
template const MPI_Datatype mpi_type_id_for_type< bool >
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
template const MPI_Datatype mpi_type_id_for_type< short >
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
template const MPI_Datatype mpi_type_id_for_type< double >
template const MPI_Datatype mpi_type_id_for_type< char >
template const MPI_Datatype mpi_type_id_for_type< long double >
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
template const MPI_Datatype mpi_type_id_for_type< long int >
template const MPI_Datatype mpi_type_id_for_type< float >
void free_communicator(MPI_Comm mpi_communicator)
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
std::string get_hostname()
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
****code * * MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize