17 #include <deal.II/base/mpi.h> 18 #include <deal.II/base/mpi.templates.h> 19 #include <deal.II/base/utilities.h> 20 #include <deal.II/base/exceptions.h> 21 #include <deal.II/lac/vector_memory.h> 22 #include <deal.II/lac/la_parallel_vector.h> 23 #include <deal.II/lac/la_parallel_block_vector.h> 24 #include <deal.II/base/multithread_info.h> 28 #ifdef DEAL_II_WITH_TRILINOS 29 # ifdef DEAL_II_WITH_MPI 30 # include <Epetra_MpiComm.h> 31 # include <deal.II/lac/vector_memory.h> 32 # include <deal.II/lac/trilinos_vector.h> 33 # include <deal.II/lac/trilinos_parallel_block_vector.h> 37 #ifdef DEAL_II_WITH_PETSC 38 # include <petscsys.h> 39 # include <deal.II/lac/petsc_parallel_block_vector.h> 40 # include <deal.II/lac/petsc_parallel_vector.h> 43 #ifdef DEAL_II_WITH_SLEPC 44 # include <slepcsys.h> 45 # include <deal.II/lac/slepc_solver.h> 48 #ifdef DEAL_II_WITH_P4EST 49 # include <p4est_bits.h> 52 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 53 # include <zoltan_cpp.h> 56 DEAL_II_NAMESPACE_OPEN
64 #ifdef DEAL_II_WITH_MPI 68 const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs);
78 const int ierr = MPI_Comm_rank (mpi_communicator, &rank);
87 MPI_Comm new_communicator;
88 const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator);
90 return new_communicator;
96 const MPI_Group &group,
100 #if DEAL_II_MPI_VERSION_GTE(3, 0) 101 return MPI_Comm_create_group(comm, group, tag, new_comm);
104 int ierr = MPI_Comm_rank(comm, &rank);
108 ierr = MPI_Group_rank(group, &grp_rank);
110 if (grp_rank == MPI_UNDEFINED)
112 *new_comm = MPI_COMM_NULL;
117 ierr = MPI_Group_size(group, &grp_size);
120 ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
123 MPI_Group parent_grp;
124 ierr = MPI_Comm_group(comm, &parent_grp);
127 std::vector<int> pids(grp_size);
128 std::vector<int> grp_pids(grp_size);
129 std::iota(grp_pids.begin(), grp_pids.end(), 0);
130 ierr = MPI_Group_translate_ranks(group, grp_size, grp_pids.data(), parent_grp, pids.data());
132 ierr = MPI_Group_free(&parent_grp);
135 MPI_Comm comm_old = *new_comm;
137 for (
int merge_sz = 1; merge_sz < grp_size; merge_sz *=2)
139 const int gid = grp_rank/merge_sz;
140 comm_old = *new_comm;
143 if ((gid + 1) * merge_sz < grp_size)
145 ierr = (MPI_Intercomm_create(*new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
147 ierr = MPI_Intercomm_merge(ic, 0 , new_comm);
153 ierr = MPI_Intercomm_create(*new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
155 ierr = MPI_Intercomm_merge(ic, 1 , new_comm);
158 if (*new_comm != comm_old)
160 ierr = MPI_Comm_free(&ic);
162 ierr = MPI_Comm_free(&comm_old);
173 std::vector<unsigned int>
175 const std::vector<unsigned int> &destinations)
180 for (
unsigned int i=0; i<destinations.size(); ++i)
182 Assert (destinations[i] < n_procs,
184 Assert (destinations[i] != myid,
185 ExcMessage (
"There is no point in communicating with ourselves."));
191 const unsigned int max_n_destinations
194 if (max_n_destinations==0)
196 return std::vector<unsigned int>();
201 std::vector<unsigned int> my_destinations(max_n_destinations,
203 std::copy (destinations.begin(), destinations.end(),
204 my_destinations.begin());
210 std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
211 const int ierr = MPI_Allgather (my_destinations.data(), max_n_destinations, MPI_UNSIGNED,
212 all_destinations.data(), max_n_destinations, MPI_UNSIGNED,
218 std::vector<unsigned int> origins;
219 for (
unsigned int i=0; i<n_procs; ++i)
220 for (
unsigned int j=0; j<max_n_destinations; ++j)
221 if (all_destinations[i*max_n_destinations + j] == myid)
222 origins.push_back (i);
223 else if (all_destinations[i*max_n_destinations + j] ==
234 void max_reduce (
const void *in_lhs_,
240 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
241 MinMaxAvg *inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
245 inout_rhs->sum += in_lhs->sum;
246 if (inout_rhs->min>in_lhs->min)
248 inout_rhs->min = in_lhs->min;
249 inout_rhs->min_index = in_lhs->min_index;
251 else if (inout_rhs->min == in_lhs->min)
254 if (inout_rhs->min_index > in_lhs->min_index)
255 inout_rhs->min_index = in_lhs->min_index;
258 if (inout_rhs->max < in_lhs->max)
260 inout_rhs->max = in_lhs->max;
261 inout_rhs->max_index = in_lhs->max_index;
263 else if (inout_rhs->max == in_lhs->max)
266 if (inout_rhs->max_index > in_lhs->max_index)
267 inout_rhs->max_index = in_lhs->max_index;
276 const MPI_Comm &mpi_communicator)
283 result.
sum = my_value;
284 result.
avg = my_value;
285 result.
min = my_value;
286 result.
max = my_value;
295 MinMaxAvg result = { 0., std::numeric_limits<double>::max(),
296 -std::numeric_limits<double>::max(), 0, 0, 0.
299 const unsigned int my_id
300 = ::Utilities::MPI::this_mpi_process(mpi_communicator);
301 const unsigned int numproc
302 = ::Utilities::MPI::n_mpi_processes(mpi_communicator);
305 int ierr = MPI_Op_create((MPI_User_function *)&max_reduce,
true, &op);
313 int lengths[]= {3,2};
314 MPI_Aint displacements[]= {0,offsetof(
MinMaxAvg, min_index)};
315 MPI_Datatype
types[]= {MPI_DOUBLE, MPI_INT};
317 ierr = MPI_Type_struct(2, lengths, displacements,
types, &type);
320 ierr = MPI_Type_commit(&type);
322 ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
325 ierr = MPI_Type_free (&type);
328 ierr = MPI_Op_free(&op);
331 result.
avg = result.
sum / numproc;
353 return mpi_communicator;
364 result.
sum = my_value;
365 result.avg = my_value;
366 result.min = my_value;
367 result.max = my_value;
368 result.min_index = 0;
369 result.max_index = 0;
380 const unsigned int max_num_threads)
382 static bool constructor_has_already_run =
false;
383 (void)constructor_has_already_run;
384 Assert (constructor_has_already_run ==
false,
385 ExcMessage (
"You can only create a single object of this class " 386 "in a program since it initializes the MPI system."));
390 #ifdef DEAL_II_WITH_MPI 393 int MPI_has_been_started = 0;
394 ierr = MPI_Initialized(&MPI_has_been_started);
397 ExcMessage (
"MPI error. You can only start MPI once!"));
404 int wanted = MPI_THREAD_SERIALIZED;
405 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
421 #ifdef DEAL_II_WITH_PETSC 422 # ifdef DEAL_II_WITH_SLEPC 424 ierr = SlepcInitialize(&argc, &argv,
nullptr,
nullptr);
428 ierr = PetscInitialize(&argc, &argv,
nullptr,
nullptr);
434 PetscPopSignalHandler();
438 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN 440 Zoltan_Initialize(argc, argv, &version);
443 #ifdef DEAL_II_WITH_P4EST 445 #if !(DEAL_II_P4EST_VERSION_GTE(2,0,0,0)) 450 sc_init(MPI_COMM_WORLD, 0, 0,
nullptr, SC_LP_SILENT);
452 p4est_init (
nullptr, SC_LP_SILENT);
455 constructor_has_already_run =
true;
469 #ifdef DEAL_II_WITH_MPI 480 std::vector<char> hostname_array (max_hostname_size);
481 std::copy (hostname.c_str(), hostname.c_str()+hostname.size()+1,
482 hostname_array.begin());
484 std::vector<char> all_hostnames(max_hostname_size *
486 const int ierr = MPI_Allgather (hostname_array.data(), max_hostname_size, MPI_CHAR,
487 all_hostnames.data(), max_hostname_size, MPI_CHAR,
493 unsigned int n_local_processes=0;
494 unsigned int nth_process_on_host = 0;
496 if (std::string (all_hostnames.data() + i*max_hostname_size) == hostname)
500 ++nth_process_on_host;
511 const unsigned int n_threads
537 #ifdef DEAL_II_WITH_MPI 541 ::release_unused_memory ();
543 ::release_unused_memory ();
545 ::release_unused_memory ();
547 ::release_unused_memory ();
550 # if defined(DEAL_II_WITH_TRILINOS) 561 #ifdef DEAL_II_WITH_PETSC 562 if ((PetscInitializeCalled == PETSC_TRUE)
564 (PetscFinalizeCalled == PETSC_FALSE))
571 # ifdef DEAL_II_WITH_SLEPC 581 #ifdef DEAL_II_WITH_P4EST 591 #ifdef DEAL_II_WITH_MPI 594 if (std::uncaught_exception())
596 std::cerr <<
"ERROR: Uncaught exception in MPI_InitFinalize on proc " 598 <<
". Skipping MPI_Finalize() to avoid a deadlock." 603 const int ierr = MPI_Finalize();
615 #ifdef DEAL_II_WITH_MPI 616 int MPI_has_been_started = 0;
617 const int ierr = MPI_Initialized(&MPI_has_been_started);
620 return (MPI_has_been_started > 0);
632 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
#define AssertNothrow(cond, exc)
static void release_unused_memory()
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
#define AssertThrow(cond, exc)
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
std::string get_hostname()
#define AssertThrowMPI(error_code)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()