20 #include <deal.II/base/mpi.templates.h>
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
40 # include <Epetra_MpiComm.h>
44 #ifdef DEAL_II_WITH_PETSC
48 # include <petscsys.h>
51 #ifdef DEAL_II_WITH_SLEPC
54 # include <slepcsys.h>
57 #ifdef DEAL_II_WITH_P4EST
58 # include <p4est_bits.h>
61 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62 # include <zoltan_cpp.h>
72 const unsigned int n_partitions,
75 const unsigned int remain = total_size % n_partitions;
80 min_size * my_partition_id +
std::min(my_partition_id, remain);
82 min_size * (my_partition_id + 1) +
std::min(my_partition_id + 1, remain);
103 std::vector<MinMaxAvg>
107 std::vector<MinMaxAvg> results(my_values.size());
115 #ifdef DEAL_II_WITH_MPI
120 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
131 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
142 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
144 return new_communicator;
153 const int ierr = MPI_Comm_free(&mpi_communicator);
161 const MPI_Group &group,
165 # if DEAL_II_MPI_VERSION_GTE(3, 0)
166 return MPI_Comm_create_group(comm, group, tag, new_comm);
169 int ierr = MPI_Comm_rank(comm, &rank);
173 ierr = MPI_Group_rank(group, &grp_rank);
175 if (grp_rank == MPI_UNDEFINED)
177 *new_comm = MPI_COMM_NULL;
182 ierr = MPI_Group_size(group, &grp_size);
185 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
188 MPI_Group parent_grp;
189 ierr = MPI_Comm_group(comm, &parent_grp);
192 std::vector<int> pids(grp_size);
193 std::vector<int> grp_pids(grp_size);
194 std::iota(grp_pids.begin(), grp_pids.end(), 0);
195 ierr = MPI_Group_translate_ranks(
196 group, grp_size, grp_pids.data(), parent_grp, pids.data());
198 ierr = MPI_Group_free(&parent_grp);
203 for (
int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
205 const int gid = grp_rank / merge_sz;
206 comm_old = *new_comm;
209 if ((
gid + 1) * merge_sz < grp_size)
211 ierr = (MPI_Intercomm_create(
212 *new_comm, 0, comm, pids[(
gid + 1) * merge_sz], tag, &ic));
214 ierr = MPI_Intercomm_merge(ic, 0 , new_comm);
220 ierr = MPI_Intercomm_create(
221 *new_comm, 0, comm, pids[(
gid - 1) * merge_sz], tag, &ic);
223 ierr = MPI_Intercomm_merge(ic, 1 , new_comm);
226 if (*new_comm != comm_old)
228 ierr = MPI_Comm_free(&ic);
230 ierr = MPI_Comm_free(&comm_old);
241 std::vector<IndexSet>
246 const std::vector<IndexSet::size_type> sizes =
248 const auto total_size =
251 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
254 for (
unsigned int i = 0; i < n_proc; ++i)
294 const std::vector<T1> &,
295 std::vector<T2> &)
override
297 this->
sources.push_back(other_rank);
305 virtual std::vector<unsigned int>
316 std::vector<unsigned int>
337 std::vector<unsigned int>
340 const std::vector<unsigned int> &destinations)
347 for (
const unsigned int destination : destinations)
351 Assert(destination != myid,
353 "There is no point in communicating with ourselves."));
356 # if DEAL_II_MPI_VERSION_GTE(3, 0)
361 consensus_algorithm(process, mpi_comm);
362 consensus_algorithm.run();
365 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
374 std::vector<unsigned int> dest_vector(n_procs);
375 for (
const auto &el : destinations)
381 unsigned int n_recv_from;
382 const int ierr = MPI_Reduce_scatter_block(
383 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
388 std::vector<MPI_Request> send_requests(destinations.size());
389 for (
const auto &el : destinations)
398 send_requests.data() + (&el - destinations.data()));
406 std::vector<unsigned int> origins(n_recv_from);
407 for (
auto &el : origins)
409 const int ierr = MPI_Recv(&el,
419 if (destinations.size() > 0)
421 const int ierr = MPI_Waitall(destinations.size(),
422 send_requests.data(),
423 MPI_STATUSES_IGNORE);
431 const unsigned int max_n_destinations =
434 if (max_n_destinations == 0)
436 return std::vector<unsigned int>();
441 std::vector<unsigned int> my_destinations(max_n_destinations,
445 my_destinations.begin());
451 std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
452 const int ierr = MPI_Allgather(my_destinations.data(),
455 all_destinations.data(),
463 std::vector<unsigned int> origins;
464 for (
unsigned int i = 0; i < n_procs; ++i)
465 for (
unsigned int j = 0; j < max_n_destinations; ++j)
466 if (all_destinations[i * max_n_destinations + j] == myid)
467 origins.push_back(i);
468 else if (all_destinations[i * max_n_destinations + j] ==
481 const std::vector<unsigned int> &destinations)
485 for (
const unsigned int destination : destinations)
491 "There is no point in communicating with ourselves."));
495 std::vector<unsigned int> dest_vector(n_procs);
496 for (
const auto &el : destinations)
499 # if DEAL_II_MPI_VERSION_GTE(2, 2)
502 unsigned int n_recv_from = 0;
504 const int ierr = MPI_Reduce_scatter_block(
505 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
514 std::vector<unsigned int> buffer(dest_vector.size());
515 unsigned int n_recv_from = 0;
517 MPI_Reduce(dest_vector.data(),
524 MPI_Scatter(buffer.data(),
543 max_reduce(
const void *in_lhs_,
548 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
549 MinMaxAvg * inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
551 for (
int i = 0; i < *len; i++)
553 inout_rhs[i].sum += in_lhs[i].sum;
554 if (inout_rhs[i].
min > in_lhs[i].
min)
556 inout_rhs[i].min = in_lhs[i].min;
557 inout_rhs[i].min_index = in_lhs[i].min_index;
559 else if (inout_rhs[i].
min == in_lhs[i].
min)
562 if (inout_rhs[i].min_index > in_lhs[i].min_index)
563 inout_rhs[i].min_index = in_lhs[i].min_index;
566 if (inout_rhs[i].
max < in_lhs[i].
max)
568 inout_rhs[i].max = in_lhs[i].max;
569 inout_rhs[i].max_index = in_lhs[i].max_index;
571 else if (inout_rhs[i].
max == in_lhs[i].
max)
574 if (inout_rhs[i].max_index > in_lhs[i].max_index)
575 inout_rhs[i].max_index = in_lhs[i].max_index;
593 for (
unsigned int i = 0; i < my_values.
size(); i++)
595 result[i].sum = my_values[i];
596 result[i].avg = my_values[i];
597 result[i].min = my_values[i];
598 result[i].max = my_values[i];
599 result[i].min_index = 0;
600 result[i].max_index = 0;
621 for (
auto &i : result)
624 const unsigned int my_id =
626 const unsigned int numproc =
631 MPI_Op_create(
reinterpret_cast<MPI_User_function *
>(&max_reduce),
636 std::vector<MinMaxAvg> in(my_values.
size());
638 for (
unsigned int i = 0; i < my_values.
size(); i++)
640 in[i].sum = in[i].min = in[i].max = my_values[i];
641 in[i].min_index = in[i].max_index = my_id;
645 int lengths[] = {3, 2, 1};
646 MPI_Aint displacements[] = {0,
649 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
651 ierr = MPI_Type_create_struct(3, lengths, displacements,
types, &type);
654 ierr = MPI_Type_commit(&type);
656 ierr = MPI_Allreduce(
657 in.data(), result.data(), my_values.
size(), type, op, mpi_communicator);
660 ierr = MPI_Type_free(&type);
663 ierr = MPI_Op_free(&op);
666 for (
auto &r : result)
667 r.avg = r.sum / numproc;
689 std::vector<IndexSet>
708 return mpi_communicator;
726 for (
unsigned int i = 0; i < my_values.
size(); i++)
728 result[i].sum = my_values[i];
729 result[i].avg = my_values[i];
730 result[i].min = my_values[i];
731 result[i].max = my_values[i];
732 result[i].min_index = 0;
733 result[i].max_index = 0;
743 const unsigned int max_num_threads)
745 static bool constructor_has_already_run =
false;
746 (void)constructor_has_already_run;
747 Assert(constructor_has_already_run ==
false,
748 ExcMessage(
"You can only create a single object of this class "
749 "in a program since it initializes the MPI system."));
753 #ifdef DEAL_II_WITH_MPI
756 int MPI_has_been_started = 0;
757 ierr = MPI_Initialized(&MPI_has_been_started);
760 ExcMessage(
"MPI error. You can only start MPI once!"));
767 int wanted = MPI_THREAD_SERIALIZED;
768 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
785 #ifdef DEAL_II_WITH_PETSC
786 # ifdef DEAL_II_WITH_SLEPC
788 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
792 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
798 PetscPopSignalHandler();
802 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
804 Zoltan_Initialize(argc, argv, &version);
807 #ifdef DEAL_II_WITH_P4EST
809 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
815 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
817 p4est_init(
nullptr, SC_LP_SILENT);
820 constructor_has_already_run =
true;
834 #ifdef DEAL_II_WITH_MPI
843 const unsigned int max_hostname_size =
845 std::vector<char> hostname_array(max_hostname_size);
847 hostname.c_str() + hostname.size() + 1,
848 hostname_array.begin());
850 std::vector<char> all_hostnames(max_hostname_size *
852 const int ierr = MPI_Allgather(hostname_array.data(),
855 all_hostnames.data(),
863 unsigned int n_local_processes = 0;
864 unsigned int nth_process_on_host = 0;
867 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
872 ++nth_process_on_host;
883 const unsigned int n_threads =
885 (nth_process_on_host <=
916 "You tried to call unregister_request() with an invalid request."));
934 #ifdef DEAL_II_WITH_MPI
938 const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
946 release_unused_memory();
950 release_unused_memory();
953 # if defined(DEAL_II_WITH_TRILINOS)
964 #ifdef DEAL_II_WITH_PETSC
965 if ((PetscInitializeCalled == PETSC_TRUE) &&
966 (PetscFinalizeCalled == PETSC_FALSE))
973 # ifdef DEAL_II_WITH_SLEPC
986 #ifdef DEAL_II_WITH_CUDA
989 release_unused_memory();
992 release_unused_memory();
995 #ifdef DEAL_II_WITH_P4EST
1005 #ifdef DEAL_II_WITH_MPI
1008 # if __cpp_lib_uncaught_exceptions >= 201411
1010 if (std::uncaught_exceptions() > 0)
1012 if (std::uncaught_exception() ==
true)
1019 const int ierr = MPI_Finalize();
1032 #ifdef DEAL_II_WITH_MPI
1033 int MPI_has_been_started = 0;
1034 const int ierr = MPI_Initialized(&MPI_has_been_started);
1037 return (MPI_has_been_started > 0);
1045 std::vector<unsigned int>
1047 const IndexSet &indices_to_look_up,
1051 ExcMessage(
"IndexSets have to have the same sizes."));
1055 ExcMessage(
"IndexSets have to have the same size on all processes."));
1057 std::vector<unsigned int> owning_ranks(indices_to_look_up.
n_elements());
1065 owned_indices, indices_to_look_up, comm, owning_ranks);
1072 std::pair<types::global_dof_index, types::global_dof_index>,
1074 consensus_algorithm(process, comm);
1075 consensus_algorithm.run();
1077 return owning_ranks;
1084 , request(MPI_REQUEST_NULL)
1096 "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1111 "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1113 #ifdef DEAL_II_WITH_MPI
1119 const int ierr = MPI_Barrier(comm);
1122 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1125 const int ierr = MPI_Wait(&
request, MPI_STATUS_IGNORE);
1145 "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1147 #ifdef DEAL_II_WITH_MPI
1153 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1154 const int ierr = MPI_Ibarrier(comm, &
request);
1157 const int ierr = MPI_Barrier(comm);
1166 template std::vector<unsigned int>
1171 template std::set<unsigned int>