17 #include <deal.II/base/mpi.h> 18 #include <deal.II/base/mpi.templates.h> 19 #include <deal.II/base/utilities.h> 20 #include <deal.II/base/exceptions.h> 21 #include <deal.II/lac/vector_memory.h> 22 #include <deal.II/lac/la_parallel_vector.h> 23 #include <deal.II/lac/la_parallel_block_vector.h> 24 #include <deal.II/base/multithread_info.h> 28 #ifdef DEAL_II_WITH_TRILINOS 29 # ifdef DEAL_II_WITH_MPI 30 # include <Epetra_MpiComm.h> 31 # include <deal.II/lac/vector_memory.h> 32 # include <deal.II/lac/trilinos_vector.h> 33 # include <deal.II/lac/trilinos_block_vector.h> 37 #ifdef DEAL_II_WITH_PETSC 38 # include <petscsys.h> 39 # include <deal.II/lac/petsc_block_vector.h> 40 # include <deal.II/lac/petsc_parallel_block_vector.h> 41 # include <deal.II/lac/petsc_vector.h> 42 # include <deal.II/lac/petsc_parallel_vector.h> 45 #ifdef DEAL_II_WITH_SLEPC 46 # include <slepcsys.h> 47 # include <deal.II/lac/slepc_solver.h> 50 #ifdef DEAL_II_WITH_P4EST 51 # include <p4est_bits.h> 54 DEAL_II_NAMESPACE_OPEN
62 #ifdef DEAL_II_WITH_MPI 66 const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs);
76 const int ierr = MPI_Comm_rank (mpi_communicator, &rank);
85 MPI_Comm new_communicator;
86 const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator);
88 return new_communicator;
92 std::vector<unsigned int>
94 const std::vector<unsigned int> &destinations)
99 for (
unsigned int i=0; i<destinations.size(); ++i)
101 Assert (destinations[i] < n_procs,
103 Assert (destinations[i] != myid,
104 ExcMessage (
"There is no point in communicating with ourselves."));
110 const unsigned int max_n_destinations
113 if (max_n_destinations==0)
115 return std::vector<unsigned int>();
120 std::vector<unsigned int> my_destinations(max_n_destinations,
122 std::copy (destinations.begin(), destinations.end(),
123 my_destinations.begin());
129 std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
130 const int ierr = MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
131 &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
137 std::vector<unsigned int> origins;
138 for (
unsigned int i=0; i<n_procs; ++i)
139 for (
unsigned int j=0; j<max_n_destinations; ++j)
140 if (all_destinations[i*max_n_destinations + j] == myid)
141 origins.push_back (i);
142 else if (all_destinations[i*max_n_destinations + j] ==
153 void max_reduce (
const void *in_lhs_,
159 const MinMaxAvg *in_lhs =
static_cast<const MinMaxAvg *
>(in_lhs_);
160 MinMaxAvg *inout_rhs =
static_cast<MinMaxAvg *
>(inout_rhs_);
164 inout_rhs->sum += in_lhs->sum;
165 if (inout_rhs->min>in_lhs->min)
167 inout_rhs->min = in_lhs->min;
168 inout_rhs->min_index = in_lhs->min_index;
170 else if (inout_rhs->min == in_lhs->min)
173 if (inout_rhs->min_index > in_lhs->min_index)
174 inout_rhs->min_index = in_lhs->min_index;
177 if (inout_rhs->max < in_lhs->max)
179 inout_rhs->max = in_lhs->max;
180 inout_rhs->max_index = in_lhs->max_index;
182 else if (inout_rhs->max == in_lhs->max)
185 if (inout_rhs->max_index > in_lhs->max_index)
186 inout_rhs->max_index = in_lhs->max_index;
195 const MPI_Comm &mpi_communicator)
202 result.sum = my_value;
203 result.avg = my_value;
204 result.min = my_value;
205 result.max = my_value;
206 result.min_index = 0;
207 result.max_index = 0;
214 MinMaxAvg result = { 0., std::numeric_limits<double>::max(),
215 -std::numeric_limits<double>::max(), 0, 0, 0.
218 const unsigned int my_id
219 = ::Utilities::MPI::this_mpi_process(mpi_communicator);
220 const unsigned int numproc
221 = ::Utilities::MPI::n_mpi_processes(mpi_communicator);
224 int ierr = MPI_Op_create((MPI_User_function *)&max_reduce,
true, &op);
228 in.sum = in.min = in.max = my_value;
229 in.min_index = in.max_index = my_id;
232 int lengths[]= {3,2};
233 MPI_Aint displacements[]= {0,offsetof(
MinMaxAvg, min_index)};
234 MPI_Datatype
types[]= {MPI_DOUBLE, MPI_INT};
236 ierr = MPI_Type_struct(2, lengths, displacements,
types, &type);
239 ierr = MPI_Type_commit(&type);
241 ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
244 ierr = MPI_Type_free (&type);
247 ierr = MPI_Op_free(&op);
250 result.avg = result.sum / numproc;
272 return mpi_communicator;
283 result.sum = my_value;
284 result.avg = my_value;
285 result.min = my_value;
286 result.max = my_value;
287 result.min_index = 0;
288 result.max_index = 0;
299 const unsigned int max_num_threads)
301 static bool constructor_has_already_run =
false;
302 (void)constructor_has_already_run;
303 Assert (constructor_has_already_run ==
false,
304 ExcMessage (
"You can only create a single object of this class " 305 "in a program since it initializes the MPI system."));
309 #ifdef DEAL_II_WITH_MPI 312 int MPI_has_been_started = 0;
313 ierr = MPI_Initialized(&MPI_has_been_started);
316 ExcMessage (
"MPI error. You can only start MPI once!"));
323 int wanted = MPI_THREAD_SERIALIZED;
324 ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
340 #ifdef DEAL_II_WITH_PETSC 341 # ifdef DEAL_II_WITH_SLEPC 343 ierr = SlepcInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
347 ierr = PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
352 #ifdef DEAL_II_WITH_P4EST 354 sc_init(MPI_COMM_WORLD, 0, 0, NULL, SC_LP_SILENT);
355 p4est_init (0, SC_LP_SILENT);
358 constructor_has_already_run =
true;
372 #ifdef DEAL_II_WITH_MPI 383 std::vector<char> hostname_array (max_hostname_size);
384 std::copy (hostname.c_str(), hostname.c_str()+hostname.size()+1,
385 hostname_array.begin());
387 std::vector<char> all_hostnames(max_hostname_size *
389 const int ierr = MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
390 &all_hostnames[0], max_hostname_size, MPI_CHAR,
396 unsigned int n_local_processes=0;
397 unsigned int nth_process_on_host = 0;
399 if (std::string (&all_hostnames[0] + i*max_hostname_size) == hostname)
403 ++nth_process_on_host;
414 const unsigned int n_threads
440 #ifdef DEAL_II_WITH_MPI 444 ::release_unused_memory ();
446 ::release_unused_memory ();
448 ::release_unused_memory ();
450 ::release_unused_memory ();
453 # if defined(DEAL_II_WITH_TRILINOS) 464 #ifdef DEAL_II_WITH_PETSC 465 if ((PetscInitializeCalled == PETSC_TRUE)
467 (PetscFinalizeCalled == PETSC_FALSE))
478 # ifdef DEAL_II_WITH_SLEPC 488 #ifdef DEAL_II_WITH_P4EST 498 #ifdef DEAL_II_WITH_MPI 501 if (std::uncaught_exception())
503 std::cerr <<
"ERROR: Uncaught exception in MPI_InitFinalize on proc " 505 <<
". Skipping MPI_Finalize() to avoid a deadlock." 510 const int ierr = MPI_Finalize();
521 #ifdef DEAL_II_WITH_MPI 522 int MPI_has_been_started = 0;
523 const int ierr = MPI_Initialized(&MPI_has_been_started);
526 return (MPI_has_been_started > 0);
538 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
static void release_unused_memory()
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
#define AssertThrow(cond, exc)
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define Assert(cond, exc)
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
std::string get_hostname()
#define AssertThrowMPI(error_code)
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()