17 #include <deal.II/base/exceptions.h> 18 #include <deal.II/base/index_set.h> 19 #include <deal.II/base/mpi.h> 20 #include <deal.II/base/mpi.templates.h> 21 #include <deal.II/base/multithread_info.h> 22 #include <deal.II/base/utilities.h> 24 #include <deal.II/lac/la_parallel_block_vector.h> 25 #include <deal.II/lac/la_parallel_vector.h> 26 #include <deal.II/lac/vector_memory.h> 31 #ifdef DEAL_II_WITH_TRILINOS 32 # ifdef DEAL_II_WITH_MPI 33 # include <deal.II/lac/trilinos_parallel_block_vector.h> 34 # include <deal.II/lac/trilinos_vector.h> 35 # include <deal.II/lac/vector_memory.h> 37 # include <Epetra_MpiComm.h> 41 #ifdef DEAL_II_WITH_PETSC 42 # include <deal.II/lac/petsc_block_vector.h> 43 # include <deal.II/lac/petsc_vector.h> 45 # include <petscsys.h> 48 #ifdef DEAL_II_WITH_SLEPC 49 # include <deal.II/lac/slepc_solver.h> 51 # include <slepcsys.h> 54 #ifdef DEAL_II_WITH_P4EST 55 # include <p4est_bits.h> 58 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 59 # include <zoltan_cpp.h> 62 DEAL_II_NAMESPACE_OPEN
69 #ifdef DEAL_II_WITH_MPI 74 const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
85 const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
95 MPI_Comm new_communicator;
96 const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
98 return new_communicator;
105 const MPI_Group &group,
109 # if DEAL_II_MPI_VERSION_GTE(3, 0) 110 return MPI_Comm_create_group(comm, group, tag, new_comm);
113 int ierr = MPI_Comm_rank(comm, &rank);
117 ierr = MPI_Group_rank(group, &grp_rank);
119 if (grp_rank == MPI_UNDEFINED)
121 *new_comm = MPI_COMM_NULL;
126 ierr = MPI_Group_size(group, &grp_size);
129 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
132 MPI_Group parent_grp;
133 ierr = MPI_Comm_group(comm, &parent_grp);
136 std::vector<int> pids(grp_size);
137 std::vector<int> grp_pids(grp_size);
138 std::iota(grp_pids.begin(), grp_pids.end(), 0);
139 ierr = MPI_Group_translate_ranks(
140 group, grp_size, grp_pids.data(), parent_grp, pids.data());
142 ierr = MPI_Group_free(&parent_grp);
145 MPI_Comm comm_old = *new_comm;
147 for (
int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
149 const int gid = grp_rank / merge_sz;
150 comm_old = *new_comm;
153 if ((gid + 1) * merge_sz < grp_size)
155 ierr = (MPI_Intercomm_create(
156 *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
158 ierr = MPI_Intercomm_merge(ic, 0 , new_comm);
164 ierr = MPI_Intercomm_create(
165 *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
167 ierr = MPI_Intercomm_merge(ic, 1 , new_comm);
170 if (*new_comm != comm_old)
172 ierr = MPI_Comm_free(&ic);
174 ierr = MPI_Comm_free(&comm_old);
185 std::vector<IndexSet>
190 const std::vector<IndexSet::size_type> sizes =
192 const auto total_size =
195 std::vector<IndexSet> res(n_proc,
IndexSet(total_size));
198 for (
unsigned int i = 0; i < n_proc; ++i)
200 res[i].add_range(begin, begin + sizes[i]);
201 begin = begin + sizes[i];
209 std::vector<unsigned int>
211 const MPI_Comm & mpi_comm,
212 const std::vector<unsigned int> &destinations)
217 for (
const unsigned int destination : destinations)
221 Assert(destination != myid,
223 "There is no point in communicating with ourselves."));
226 # if DEAL_II_MPI_VERSION_GTE(2, 2) 228 std::vector<unsigned int> dest_vector(n_procs);
229 for (
const auto &el : destinations)
235 unsigned int n_recv_from;
236 const int ierr = MPI_Reduce_scatter_block(
237 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
242 std::vector<MPI_Request> send_requests(destinations.size());
243 for (
const auto &el : destinations)
250 send_requests.data() + (&el - destinations.data()));
253 if (n_recv_from == 0)
254 return std::vector<unsigned int>();
259 std::vector<unsigned int> origins(n_recv_from);
260 for (
auto &el : origins)
269 MPI_Waitall(destinations.size(),
270 send_requests.data(),
271 MPI_STATUSES_IGNORE);
276 const unsigned int max_n_destinations =
279 if (max_n_destinations == 0)
281 return std::vector<unsigned int>();
286 std::vector<unsigned int> my_destinations(max_n_destinations,
288 std::copy(destinations.begin(),
290 my_destinations.begin());
296 std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
297 const int ierr = MPI_Allgather(my_destinations.data(),
300 all_destinations.data(),
308 std::vector<unsigned int> origins;
309 for (
unsigned int i = 0; i < n_procs; ++i)
310 for (
unsigned int j = 0; j < max_n_destinations; ++j)
311 if (all_destinations[i * max_n_destinations + j] == myid)
312 origins.push_back(i);
313 else if (all_destinations[i * max_n_destinations + j] ==
325 const MPI_Comm & mpi_comm,
326 const std::vector<unsigned int> &destinations)
330 for (
const unsigned int destination : destinations)
336 "There is no point in communicating with ourselves."));
340 std::vector<unsigned int> dest_vector(n_procs);
341 for (
const auto &el : destinations)
344 # if DEAL_II_MPI_VERSION_GTE(2, 2) 347 unsigned int n_recv_from = 0;
349 const int ierr = MPI_Reduce_scatter_block(
350 dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
359 std::vector<unsigned int> buffer(dest_vector.size());
360 unsigned int n_recv_from = 0;
362 MPI_Reduce(dest_vector.data(),
369 MPI_Scatter(buffer.data(),
388 max_reduce(
const void *in_lhs_,
394 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
395 MinMaxAvg * inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
399 inout_rhs->sum += in_lhs->sum;
400 if (inout_rhs->min > in_lhs->min)
402 inout_rhs->min = in_lhs->min;
403 inout_rhs->min_index = in_lhs->min_index;
405 else if (inout_rhs->min == in_lhs->min)
408 if (inout_rhs->min_index > in_lhs->min_index)
409 inout_rhs->min_index = in_lhs->min_index;
412 if (inout_rhs->max < in_lhs->max)
414 inout_rhs->max = in_lhs->max;
415 inout_rhs->max_index = in_lhs->max_index;
417 else if (inout_rhs->max == in_lhs->max)
420 if (inout_rhs->max_index > in_lhs->max_index)
421 inout_rhs->max_index = in_lhs->max_index;
429 min_max_avg(
const double my_value,
const MPI_Comm &mpi_communicator)
436 result.
sum = my_value;
437 result.
avg = my_value;
438 result.
min = my_value;
439 result.
max = my_value;
449 std::numeric_limits<double>::max(),
450 -std::numeric_limits<double>::max(),
455 const unsigned int my_id =
456 ::Utilities::MPI::this_mpi_process(mpi_communicator);
457 const unsigned int numproc =
458 ::Utilities::MPI::n_mpi_processes(mpi_communicator);
462 MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
472 int lengths[] = {3, 2};
473 MPI_Aint displacements[] = {0, offsetof(
MinMaxAvg, min_index)};
474 MPI_Datatype
types[] = {MPI_DOUBLE, MPI_INT};
476 ierr = MPI_Type_create_struct(2, lengths, displacements,
types, &type);
479 ierr = MPI_Type_commit(&type);
481 ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
484 ierr = MPI_Type_free(&type);
487 ierr = MPI_Op_free(&op);
490 result.
avg = result.
sum / numproc;
512 std::vector<IndexSet>
516 return std::vector<IndexSet>(1, complete_index_set(local_size));
523 return mpi_communicator;
529 min_max_avg(
const double my_value,
const MPI_Comm &)
533 result.
sum = my_value;
534 result.avg = my_value;
535 result.min = my_value;
536 result.max = my_value;
537 result.min_index = 0;
538 result.max_index = 0;
549 const unsigned int max_num_threads)
551 static bool constructor_has_already_run =
false;
552 (void)constructor_has_already_run;
553 Assert(constructor_has_already_run ==
false,
554 ExcMessage(
"You can only create a single object of this class " 555 "in a program since it initializes the MPI system."));
559 #ifdef DEAL_II_WITH_MPI 562 int MPI_has_been_started = 0;
563 ierr = MPI_Initialized(&MPI_has_been_started);
566 ExcMessage(
"MPI error. You can only start MPI once!"));
573 int wanted = MPI_THREAD_SERIALIZED;
574 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
591 #ifdef DEAL_II_WITH_PETSC 592 # ifdef DEAL_II_WITH_SLEPC 594 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
598 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
604 PetscPopSignalHandler();
608 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 610 Zoltan_Initialize(argc, argv, &version);
613 #ifdef DEAL_II_WITH_P4EST 615 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0) 621 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
623 p4est_init(
nullptr, SC_LP_SILENT);
626 constructor_has_already_run =
true;
640 #ifdef DEAL_II_WITH_MPI 649 const unsigned int max_hostname_size =
651 std::vector<char> hostname_array(max_hostname_size);
652 std::copy(hostname.c_str(),
653 hostname.c_str() + hostname.size() + 1,
654 hostname_array.begin());
656 std::vector<char> all_hostnames(max_hostname_size *
658 const int ierr = MPI_Allgather(hostname_array.data(),
661 all_hostnames.data(),
669 unsigned int n_local_processes = 0;
670 unsigned int nth_process_on_host = 0;
673 if (std::string(all_hostnames.data() + i * max_hostname_size) ==
678 ++nth_process_on_host;
689 const unsigned int n_threads =
691 (nth_process_on_host <=
713 #ifdef DEAL_II_WITH_MPI 719 release_unused_memory();
723 release_unused_memory();
726 # if defined(DEAL_II_WITH_TRILINOS) 737 #ifdef DEAL_II_WITH_PETSC 738 if ((PetscInitializeCalled == PETSC_TRUE) &&
739 (PetscFinalizeCalled == PETSC_FALSE))
746 # ifdef DEAL_II_WITH_SLEPC 759 #ifdef DEAL_II_WITH_CUDA 762 release_unused_memory();
765 release_unused_memory();
768 #ifdef DEAL_II_WITH_P4EST 778 #ifdef DEAL_II_WITH_MPI 781 if (std::uncaught_exception())
784 <<
"ERROR: Uncaught exception in MPI_InitFinalize on proc " 786 <<
". Skipping MPI_Finalize() to avoid a deadlock." 791 const int ierr = MPI_Finalize();
804 #ifdef DEAL_II_WITH_MPI 805 int MPI_has_been_started = 0;
806 const int ierr = MPI_Initialized(&MPI_has_been_started);
809 return (MPI_has_been_started > 0);
821 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
#define AssertNothrow(cond, exc)
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
#define AssertThrow(cond, exc)
types::global_dof_index size_type
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
std::string get_hostname()
#define AssertThrowMPI(error_code)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()