20#include <deal.II/base/mpi.templates.h>
35#ifdef DEAL_II_WITH_TRILINOS
36# ifdef DEAL_II_WITH_MPI
40# include <Epetra_MpiComm.h>
44#ifdef DEAL_II_WITH_PETSC
51#ifdef DEAL_II_WITH_SLEPC
57#ifdef DEAL_II_WITH_P4EST
58# include <p4est_bits.h>
61#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62# include <zoltan_cpp.h>
72 const unsigned int n_partitions,
75 const unsigned int remain = total_size % n_partitions;
80 min_size * my_partition_id +
std::min(my_partition_id, remain);
82 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
103 std::vector<MinMaxAvg>
107 std::vector<MinMaxAvg> results(my_values.size());
115#ifdef DEAL_II_WITH_MPI
120 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
131 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
139 const std::vector<unsigned int>
144 return std::vector<unsigned int>{0};
149 std::vector<unsigned int> ranks(size);
150 const int ierr = MPI_Allgather(
151 &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
163 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
165 return new_communicator;
174 const int ierr = MPI_Comm_free(&mpi_communicator);
182 const MPI_Group &group,
186# if DEAL_II_MPI_VERSION_GTE(3, 0)
187 return MPI_Comm_create_group(
comm, group, tag, new_comm);
190 int ierr = MPI_Comm_rank(
comm, &rank);
194 ierr = MPI_Group_rank(group, &grp_rank);
196 if (grp_rank == MPI_UNDEFINED)
198 *new_comm = MPI_COMM_NULL;
203 ierr = MPI_Group_size(group, &grp_size);
206 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
209 MPI_Group parent_grp;
210 ierr = MPI_Comm_group(
comm, &parent_grp);
213 std::vector<int> pids(grp_size);
214 std::vector<int> grp_pids(grp_size);
215 std::iota(grp_pids.begin(), grp_pids.end(), 0);
216 ierr = MPI_Group_translate_ranks(
217 group, grp_size, grp_pids.data(), parent_grp, pids.data());
219 ierr = MPI_Group_free(&parent_grp);
224 for (
int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
226 const int gid = grp_rank / merge_sz;
227 comm_old = *new_comm;
230 if ((
gid + 1) * merge_sz < grp_size)
232 ierr = (MPI_Intercomm_create(
233 *new_comm, 0,
comm, pids[(
gid + 1) * merge_sz], tag, &ic));
235 ierr = MPI_Intercomm_merge(ic, 0 , new_comm);
241 ierr = MPI_Intercomm_create(
242 *new_comm, 0,
comm, pids[(
gid - 1) * merge_sz], tag, &ic);
244 ierr = MPI_Intercomm_merge(ic, 1 , new_comm);
247 if (*new_comm != comm_old)
249 ierr = MPI_Comm_free(&ic);
251 ierr = MPI_Comm_free(&comm_old);
262 std::vector<IndexSet>
267 const std::vector<IndexSet::size_type> sizes =
269 const auto total_size =
272 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
275 for (
unsigned int i = 0; i < n_proc; ++i)
315 const std::vector<T1> &,
316 std::vector<T2> &)
override
318 this->
sources.push_back(other_rank);
326 virtual std::vector<unsigned int>
337 std::vector<unsigned int>
358 std::vector<unsigned int>
361 const std::vector<unsigned int> &destinations)
368 for (
const unsigned int destination : destinations)
374# if DEAL_II_MPI_VERSION_GTE(3, 0)
379 consensus_algorithm(process, mpi_comm);
380 consensus_algorithm.run();
383# elif DEAL_II_MPI_VERSION_GTE(2, 2)
392 std::vector<unsigned int> dest_vector(n_procs);
393 for (
const auto &el : destinations)
399 unsigned int n_recv_from;
400 const int ierr = MPI_Reduce_scatter_block(
401 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
406 std::vector<MPI_Request> send_requests(destinations.size());
407 for (
const auto &el : destinations)
416 send_requests.data() + (&el - destinations.data()));
424 std::vector<unsigned int> origins(n_recv_from);
425 for (
auto &el : origins)
427 const int ierr = MPI_Recv(&el,
437 if (destinations.size() > 0)
439 const int ierr = MPI_Waitall(destinations.size(),
440 send_requests.data(),
441 MPI_STATUSES_IGNORE);
449 const unsigned int max_n_destinations =
452 if (max_n_destinations == 0)
454 return std::vector<unsigned int>();
459 std::vector<unsigned int> my_destinations(max_n_destinations,
463 my_destinations.begin());
469 std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
470 const int ierr = MPI_Allgather(my_destinations.data(),
473 all_destinations.data(),
481 std::vector<unsigned int> origins;
482 for (
unsigned int i = 0; i < n_procs; ++i)
483 for (
unsigned int j = 0; j < max_n_destinations; ++j)
484 if (all_destinations[i * max_n_destinations + j] == myid)
485 origins.push_back(i);
486 else if (all_destinations[i * max_n_destinations + j] ==
499 const std::vector<unsigned int> &destinations)
503 for (
const unsigned int destination : destinations)
509 "There is no point in communicating with ourselves."));
513 std::vector<unsigned int> dest_vector(n_procs);
514 for (
const auto &el : destinations)
517# if DEAL_II_MPI_VERSION_GTE(2, 2)
520 unsigned int n_recv_from = 0;
522 const int ierr = MPI_Reduce_scatter_block(
523 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
532 std::vector<unsigned int> buffer(dest_vector.size());
533 unsigned int n_recv_from = 0;
535 int ierr = MPI_Reduce(dest_vector.data(),
543 ierr = MPI_Scatter(buffer.data(),
563 max_reduce(
const void *in_lhs_,
568 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
569 MinMaxAvg * inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
571 for (
int i = 0; i < *len; i++)
573 inout_rhs[i].sum += in_lhs[i].sum;
574 if (inout_rhs[i].
min > in_lhs[i].
min)
576 inout_rhs[i].min = in_lhs[i].min;
577 inout_rhs[i].min_index = in_lhs[i].min_index;
579 else if (inout_rhs[i].
min == in_lhs[i].
min)
582 if (inout_rhs[i].min_index > in_lhs[i].min_index)
583 inout_rhs[i].min_index = in_lhs[i].min_index;
586 if (inout_rhs[i].
max < in_lhs[i].
max)
588 inout_rhs[i].max = in_lhs[i].max;
589 inout_rhs[i].max_index = in_lhs[i].max_index;
591 else if (inout_rhs[i].
max == in_lhs[i].
max)
594 if (inout_rhs[i].max_index > in_lhs[i].max_index)
595 inout_rhs[i].max_index = in_lhs[i].max_index;
613 for (
unsigned int i = 0; i < my_values.
size(); i++)
615 result[i].sum = my_values[i];
616 result[i].avg = my_values[i];
617 result[i].min = my_values[i];
618 result[i].max = my_values[i];
619 result[i].min_index = 0;
620 result[i].max_index = 0;
630 static MPI_Datatype type = []() {
633 int lengths[] = {3, 2, 1};
635 MPI_Aint displacements[] = {0,
639 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
642 MPI_Type_create_struct(3, lengths, displacements,
types, &type);
645 ierr = MPI_Type_commit(&type);
651 int ierr = MPI_Type_free(&type);
663 static MPI_Op op = []() {
667 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
675 int ierr = MPI_Op_free(&op);
696 for (
auto &i : result)
699 const unsigned int my_id =
701 const unsigned int numproc =
704 std::vector<MinMaxAvg> in(my_values.
size());
706 for (
unsigned int i = 0; i < my_values.
size(); i++)
708 in[i].sum = in[i].min = in[i].max = my_values[i];
709 in[i].min_index = in[i].max_index = my_id;
712 int ierr = MPI_Allreduce(
713 in.data(), result.
data(), my_values.
size(), type, op, mpi_communicator);
716 for (
auto &r : result)
717 r.avg = r.sum / numproc;
739 const std::vector<unsigned int>
742 return std::vector<unsigned int>{0};
747 std::vector<IndexSet>
766 return mpi_communicator;
784 for (
unsigned int i = 0; i < my_values.
size(); i++)
786 result[i].sum = my_values[i];
787 result[i].avg = my_values[i];
788 result[i].min = my_values[i];
789 result[i].max = my_values[i];
790 result[i].min_index = 0;
791 result[i].max_index = 0;
799 MPI_InitFinalize::Signals();
804 const unsigned int max_num_threads)
806 static bool constructor_has_already_run =
false;
807 (void)constructor_has_already_run;
808 Assert(constructor_has_already_run ==
false,
809 ExcMessage(
"You can only create a single object of this class "
810 "in a program since it initializes the MPI system."));
814#ifdef DEAL_II_WITH_MPI
817 int MPI_has_been_started = 0;
818 ierr = MPI_Initialized(&MPI_has_been_started);
821 ExcMessage(
"MPI error. You can only start MPI once!"));
828 int wanted = MPI_THREAD_SERIALIZED;
829 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
846#ifdef DEAL_II_WITH_PETSC
847# ifdef DEAL_II_WITH_SLEPC
849 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
853 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
859 PetscPopSignalHandler();
863#ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
865 Zoltan_Initialize(argc, argv, &version);
868#ifdef DEAL_II_WITH_P4EST
870# if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
876 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
878 p4est_init(
nullptr, SC_LP_SILENT);
881 constructor_has_already_run =
true;
895#ifdef DEAL_II_WITH_MPI
904 const unsigned int max_hostname_size =
906 std::vector<char> hostname_array(max_hostname_size);
908 hostname.c_str() + hostname.size() + 1,
909 hostname_array.begin());
911 std::vector<char> all_hostnames(max_hostname_size *
913 const int ierr = MPI_Allgather(hostname_array.data(),
916 all_hostnames.data(),
924 unsigned int n_local_processes = 0;
925 unsigned int nth_process_on_host = 0;
928 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
933 ++nth_process_on_host;
944 const unsigned int n_threads =
946 (nth_process_on_host <=
980 "You tried to call unregister_request() with an invalid request."));
1001#ifdef DEAL_II_WITH_MPI
1005 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
1013 release_unused_memory();
1017 release_unused_memory();
1020# if defined(DEAL_II_WITH_TRILINOS)
1031#ifdef DEAL_II_WITH_PETSC
1032 if ((PetscInitializeCalled == PETSC_TRUE) &&
1033 (PetscFinalizeCalled == PETSC_FALSE))
1040# ifdef DEAL_II_WITH_SLEPC
1053#ifdef DEAL_II_WITH_CUDA
1056 release_unused_memory();
1059 release_unused_memory();
1062#ifdef DEAL_II_WITH_P4EST
1072#ifdef DEAL_II_WITH_MPI
1075# if __cpp_lib_uncaught_exceptions >= 201411
1077 if (std::uncaught_exceptions() > 0)
1079 if (std::uncaught_exception() ==
true)
1099#ifdef DEAL_II_WITH_MPI
1100 int MPI_has_been_started = 0;
1101 const int ierr = MPI_Initialized(&MPI_has_been_started);
1104 return (MPI_has_been_started > 0);
1112 std::vector<unsigned int>
1114 const IndexSet &indices_to_look_up,
1118 ExcMessage(
"IndexSets have to have the same sizes."));
1122 ExcMessage(
"IndexSets have to have the same size on all processes."));
1124 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1132 owned_indices, indices_to_look_up,
comm, owning_ranks);
1139 std::pair<types::global_dof_index, types::global_dof_index>,
1141 consensus_algorithm(process,
comm);
1142 consensus_algorithm.run();
1144 return owning_ranks;
1151 , request(MPI_REQUEST_NULL)
1163 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1178 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1180#ifdef DEAL_II_WITH_MPI
1186 const int ierr = MPI_Barrier(
comm);
1189# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1192 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1212 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1214#ifdef DEAL_II_WITH_MPI
1220# if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1224 const int ierr = MPI_Barrier(
comm);
1236 logical_or<bool>(
const bool &,
const MPI_Comm &);
1245 template std::vector<unsigned int>
1250 template std::set<unsigned int>
value_type * data() const noexcept
size_type n_elements() const
void add_range(const size_type begin, const size_type end)
static unsigned int n_cores()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void unlock(const MPI_Comm &comm)
void lock(const MPI_Comm &comm)
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
ConsensusAlgorithmsProcessTargets(const std::vector< unsigned int > &target)
std::vector< unsigned int > get_result()
virtual std::vector< unsigned int > compute_targets() override
std::vector< unsigned int > sources
const std::vector< unsigned int > & target
static void unregister_request(MPI_Request &request)
static std::set< MPI_Request * > requests
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
static void register_request(MPI_Request &request)
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertNothrow(cond, exc)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
int gid(const Epetra_BlockMap &map, int i)
IndexSet complete_index_set(const IndexSet::size_type N)
VectorType::value_type * end(VectorType &V)
VectorType::value_type * begin(VectorType &V)
void free_communicator(MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type locally_owned_size)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T min(const T &t, const MPI_Comm &mpi_communicator)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
const std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm &comm_large, const MPI_Comm &comm_small)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
T max(const T &t, const MPI_Comm &mpi_communicator)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
std::string get_hostname()
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
****code ** MPI_Finalize()
*braid_SplitCommworld & comm
boost::signals2::signal< void()> at_mpi_init
boost::signals2::signal< void()> at_mpi_finalize