19#include <deal.II/base/mpi.templates.h>
28#include <boost/serialization/utility.hpp>
43 const unsigned int my_partition_id,
44 const unsigned int n_partitions,
47 static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
48 "IndexSet::size_type must match types::global_dof_index for "
49 "using this function");
50 const unsigned int remain = total_size % n_partitions;
55 min_size * my_partition_id +
std::min(my_partition_id, remain);
57 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
78 std::vector<MinMaxAvg>
82 std::vector<MinMaxAvg> results(my_values.size());
90#ifdef DEAL_II_WITH_MPI
97 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
112 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
122 std::vector<unsigned int>
127 return std::vector<unsigned int>{0};
132 std::vector<unsigned int> ranks(size);
133 const int ierr = MPI_Allgather(
134 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
146 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
148 return new_communicator;
157 const int ierr = MPI_Comm_free(&mpi_communicator);
163 std::vector<IndexSet>
169 std::is_same_v<types::global_dof_index, IndexSet::size_type>,
170 "IndexSet::size_type must match types::global_dof_index for "
171 "using this function");
173 const std::vector<IndexSet::size_type> sizes =
175 const auto total_size =
178 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
181 for (
unsigned int i = 0; i < n_proc; ++i)
183 res[i].add_range(begin, begin + sizes[i]);
184 begin = begin + sizes[i];
207 std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
213 ierr = MPI_Type_commit(&result);
218 ierr = MPI_Type_size_x(result, &size64);
233 auto deleter = [](MPI_Datatype *p) {
236 [[maybe_unused]]
const int ierr = MPI_Type_free(p);
244 return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
245 new MPI_Datatype(result), deleter);
250 std::vector<unsigned int>
253 const std::vector<unsigned int> &destinations)
259 for (
const unsigned int destination : destinations)
269 const bool my_destinations_are_unique = [destinations]() {
270 if (destinations.empty())
274 std::vector<unsigned int> my_destinations = destinations;
275 std::sort(my_destinations.begin(), my_destinations.end());
276 return (std::adjacent_find(my_destinations.begin(),
277 my_destinations.end()) ==
278 my_destinations.end());
288 return ConsensusAlgorithms::nbx<char, char>(
289 destinations, {}, {}, {}, mpi_comm);
303 std::vector<unsigned int> dest_vector(n_procs);
304 for (
const auto &el : destinations)
310 unsigned int n_recv_from;
311 const int ierr = MPI_Reduce_scatter_block(
312 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
317 std::vector<MPI_Request> send_requests(destinations.size());
318 for (
const auto &el : destinations)
327 send_requests.data() + (&el - destinations.data()));
335 std::vector<unsigned int> origins(n_recv_from);
336 for (
auto &el : origins)
338 const int ierr = MPI_Recv(&el,
348 if (destinations.size() > 0)
350 const int ierr = MPI_Waitall(destinations.size(),
351 send_requests.data(),
352 MPI_STATUSES_IGNORE);
364 const std::vector<unsigned int> &destinations)
368 const bool my_destinations_are_unique = [destinations]() {
369 std::vector<unsigned int> my_destinations = destinations;
370 const unsigned int n_destinations = my_destinations.size();
371 std::sort(my_destinations.begin(), my_destinations.end());
372 my_destinations.erase(std::unique(my_destinations.begin(),
373 my_destinations.end()),
374 my_destinations.end());
375 return (my_destinations.size() == n_destinations);
384 return ConsensusAlgorithms::nbx<char, char>(
385 destinations, {}, {}, {}, mpi_comm)
390 const unsigned int n_procs =
394 for (
const unsigned int destination : destinations)
399 "There is no point in communicating with ourselves."));
404 std::vector<unsigned int> dest_vector(n_procs);
405 for (
const auto &el : destinations)
410 unsigned int n_recv_from = 0;
412 const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
431 max_reduce(
const void *in_lhs_,
436 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
437 MinMaxAvg *inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
439 for (
int i = 0; i < *len; ++i)
441 inout_rhs[i].sum += in_lhs[i].sum;
442 if (inout_rhs[i].
min > in_lhs[i].
min)
444 inout_rhs[i].min = in_lhs[i].min;
445 inout_rhs[i].min_index = in_lhs[i].min_index;
447 else if (inout_rhs[i].
min == in_lhs[i].
min)
450 if (inout_rhs[i].min_index > in_lhs[i].min_index)
451 inout_rhs[i].min_index = in_lhs[i].min_index;
454 if (inout_rhs[i].
max < in_lhs[i].
max)
456 inout_rhs[i].max = in_lhs[i].max;
457 inout_rhs[i].max_index = in_lhs[i].max_index;
459 else if (inout_rhs[i].
max == in_lhs[i].
max)
462 if (inout_rhs[i].max_index > in_lhs[i].max_index)
463 inout_rhs[i].max_index = in_lhs[i].max_index;
480 for (
unsigned int i = 0; i < my_values.
size(); ++i)
482 result[i].sum = my_values[i];
483 result[i].avg = my_values[i];
484 result[i].min = my_values[i];
485 result[i].max = my_values[i];
486 result[i].min_index = 0;
487 result[i].max_index = 0;
497 static MPI_Datatype type = []() {
500 int lengths[] = {3, 2, 1};
502 MPI_Aint displacements[] = {0,
506 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
509 MPI_Type_create_struct(3, lengths, displacements,
types, &type);
512 ierr = MPI_Type_commit(&type);
518 int ierr = MPI_Type_free(&type);
530 static MPI_Op op = []() {
534 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
535 static_cast<int>(
true),
542 int ierr = MPI_Op_free(&op);
557 std::numeric_limits<double>::max(),
558 std::numeric_limits<double>::lowest(),
563 for (
auto &i : result)
566 const unsigned int my_id =
568 const unsigned int numproc =
571 std::vector<MinMaxAvg> in(my_values.
size());
573 for (
unsigned int i = 0; i < my_values.
size(); ++i)
575 in[i].sum = in[i].min = in[i].max = my_values[i];
576 in[i].min_index = in[i].max_index = my_id;
579 int ierr = MPI_Allreduce(
580 in.data(), result.
data(), my_values.
size(), type, op, mpi_communicator);
583 for (
auto &r : result)
584 r.avg = r.sum / numproc;
606 std::vector<unsigned int>
609 return std::vector<unsigned int>{0};
614 std::vector<IndexSet>
635 return mpi_communicator;
653 for (
unsigned int i = 0; i < my_values.
size(); ++i)
655 result[i].sum = my_values[i];
656 result[i].avg = my_values[i];
657 result[i].min = my_values[i];
658 result[i].max = my_values[i];
659 result[i].min_index = 0;
660 result[i].max_index = 0;
669 const unsigned int max_num_threads)
683#ifdef DEAL_II_WITH_MPI
684 int MPI_has_been_started = 0;
685 const int ierr = MPI_Initialized(&MPI_has_been_started);
688 return (MPI_has_been_started > 0);
696 std::vector<unsigned int>
702 ExcMessage(
"IndexSets have to have the same sizes."));
706 ExcMessage(
"IndexSets have to have the same size on all processes."));
708 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
728 std::pair<types::global_dof_index, types::global_dof_index>>,
729 std::vector<unsigned int>>
731 consensus_algorithm.
run(process,
comm);
738 std::pair<std::vector<unsigned int>, std::map<unsigned int, IndexSet>>
744 ExcMessage(
"IndexSets have to have the same sizes."));
748 ExcMessage(
"IndexSets have to have the same size on all processes."));
750 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
758 owned_indices, indices_to_look_up,
comm, owning_ranks,
true);
766 std::pair<types::global_dof_index, types::global_dof_index>>,
767 std::vector<unsigned int>>
769 consensus_algorithm.
run(process,
comm);
778 namespace CollectiveMutexImplementation
787#ifdef DEAL_II_WITH_MPI
788 if (std::uncaught_exceptions() > 0)
791 <<
"---------------------------------------------------------\n"
792 <<
"An exception was thrown inside a section of the program\n"
793 <<
"guarded by a CollectiveMutex.\n"
794 <<
"Because a CollectiveMutex guards critical communication\n"
795 <<
"handling the exception would likely\n"
796 <<
"deadlock because only the current process is aware of the\n"
797 <<
"exception. To prevent this deadlock, the program will be\n"
799 <<
"---------------------------------------------------------"
802 MPI_Abort(MPI_COMM_WORLD, 1);
813 , request(MPI_REQUEST_NULL)
829 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
842 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
844#ifdef DEAL_II_WITH_MPI
852 const int ierr = MPI_Barrier(
comm);
858 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
883 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
885#ifdef DEAL_II_WITH_MPI
896 const int ierr = MPI_Barrier(
comm);
915 const std::function<
bool(
const bool &,
const bool &)> &,
918 template std::vector<bool>
919 reduce(
const std::vector<bool> &,
921 const std::function<std::vector<bool>(
const std::vector<bool> &,
922 const std::vector<bool> &)> &,
928 const std::function<
bool(
const bool &,
const bool &)> &);
930 template std::vector<bool>
932 const std::vector<bool> &,
934 const std::function<std::vector<bool>(
const std::vector<bool> &,
935 const std::vector<bool> &)> &);
940 internal::all_reduce<bool>(
const MPI_Op &,
947 logical_or<bool>(
const bool &,
const MPI_Comm);
956 template std::vector<unsigned int>
961 template std::set<unsigned int>
value_type * data() const noexcept
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
static void unregister_request(MPI_Request &request)
static void register_request(MPI_Request &request)
void lock(const MPI_Comm comm)
void unlock(const MPI_Comm comm)
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
std::map< unsigned int, IndexSet > get_requesters()
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
IndexSet complete_index_set(const IndexSet::size_type N)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
std::pair< std::vector< unsigned int >, std::map< unsigned int, IndexSet > > compute_index_owner_and_requesters(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
void free_communicator(MPI_Comm mpi_communicator)
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_finalize