Reference documentation for deal.II version 8.5.1
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2017 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/mpi.h>
18 #include <deal.II/base/mpi.templates.h>
19 #include <deal.II/base/utilities.h>
20 #include <deal.II/base/exceptions.h>
21 #include <deal.II/lac/vector_memory.h>
22 #include <deal.II/lac/la_parallel_vector.h>
23 #include <deal.II/lac/la_parallel_block_vector.h>
24 #include <deal.II/base/multithread_info.h>
25 
26 #include <iostream>
27 
28 #ifdef DEAL_II_WITH_TRILINOS
29 # ifdef DEAL_II_WITH_MPI
30 # include <Epetra_MpiComm.h>
31 # include <deal.II/lac/vector_memory.h>
32 # include <deal.II/lac/trilinos_vector.h>
33 # include <deal.II/lac/trilinos_block_vector.h>
34 # endif
35 #endif
36 
37 #ifdef DEAL_II_WITH_PETSC
38 # include <petscsys.h>
39 # include <deal.II/lac/petsc_block_vector.h>
40 # include <deal.II/lac/petsc_parallel_block_vector.h>
41 # include <deal.II/lac/petsc_vector.h>
42 # include <deal.II/lac/petsc_parallel_vector.h>
43 #endif
44 
45 #ifdef DEAL_II_WITH_SLEPC
46 # include <slepcsys.h>
47 # include <deal.II/lac/slepc_solver.h>
48 #endif
49 
50 #ifdef DEAL_II_WITH_P4EST
51 # include <p4est_bits.h>
52 #endif
53 
54 DEAL_II_NAMESPACE_OPEN
55 
56 
57 namespace Utilities
58 {
59 
60  namespace MPI
61  {
62 #ifdef DEAL_II_WITH_MPI
63  unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
64  {
65  int n_jobs=1;
66  const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs);
67  AssertThrowMPI(ierr);
68 
69  return n_jobs;
70  }
71 
72 
73  unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
74  {
75  int rank=0;
76  const int ierr = MPI_Comm_rank (mpi_communicator, &rank);
77  AssertThrowMPI(ierr);
78 
79  return rank;
80  }
81 
82 
83  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
84  {
85  MPI_Comm new_communicator;
86  const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator);
87  AssertThrowMPI(ierr);
88  return new_communicator;
89  }
90 
91 
92  std::vector<unsigned int>
94  const std::vector<unsigned int> &destinations)
95  {
96  unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
97  unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
98 
99  for (unsigned int i=0; i<destinations.size(); ++i)
100  {
101  Assert (destinations[i] < n_procs,
102  ExcIndexRange (destinations[i], 0, n_procs));
103  Assert (destinations[i] != myid,
104  ExcMessage ("There is no point in communicating with ourselves."));
105  }
106 
107 
108  // let all processors communicate the maximal number of destinations
109  // they have
110  const unsigned int max_n_destinations
111  = Utilities::MPI::max (destinations.size(), mpi_comm);
112 
113  if (max_n_destinations==0)
114  // all processes have nothing to send/receive:
115  return std::vector<unsigned int>();
116 
117  // now that we know the number of data packets every processor wants to
118  // send, set up a buffer with the maximal size and copy our destinations
119  // in there, padded with -1's
120  std::vector<unsigned int> my_destinations(max_n_destinations,
122  std::copy (destinations.begin(), destinations.end(),
123  my_destinations.begin());
124 
125  // now exchange these (we could communicate less data if we used
126  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
127  // processors in this case, which is more expensive than the reduction
128  // operation above in MPI_Allreduce)
129  std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
130  const int ierr = MPI_Allgather (&my_destinations[0], max_n_destinations, MPI_UNSIGNED,
131  &all_destinations[0], max_n_destinations, MPI_UNSIGNED,
132  mpi_comm);
133  AssertThrowMPI(ierr);
134 
135  // now we know who is going to communicate with whom. collect who is
136  // going to communicate with us!
137  std::vector<unsigned int> origins;
138  for (unsigned int i=0; i<n_procs; ++i)
139  for (unsigned int j=0; j<max_n_destinations; ++j)
140  if (all_destinations[i*max_n_destinations + j] == myid)
141  origins.push_back (i);
142  else if (all_destinations[i*max_n_destinations + j] ==
144  break;
145 
146  return origins;
147  }
148 
149 
150  namespace
151  {
152  // custom MIP_Op for calculate_collective_mpi_min_max_avg
153  void max_reduce ( const void *in_lhs_,
154  void *inout_rhs_,
155  int *len,
156  MPI_Datatype *)
157  {
158  (void)len;
159  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
160  MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
161 
162  Assert(*len==1, ExcInternalError());
163 
164  inout_rhs->sum += in_lhs->sum;
165  if (inout_rhs->min>in_lhs->min)
166  {
167  inout_rhs->min = in_lhs->min;
168  inout_rhs->min_index = in_lhs->min_index;
169  }
170  else if (inout_rhs->min == in_lhs->min)
171  {
172  // choose lower cpu index when tied to make operator commutative
173  if (inout_rhs->min_index > in_lhs->min_index)
174  inout_rhs->min_index = in_lhs->min_index;
175  }
176 
177  if (inout_rhs->max < in_lhs->max)
178  {
179  inout_rhs->max = in_lhs->max;
180  inout_rhs->max_index = in_lhs->max_index;
181  }
182  else if (inout_rhs->max == in_lhs->max)
183  {
184  // choose lower cpu index when tied to make operator commutative
185  if (inout_rhs->max_index > in_lhs->max_index)
186  inout_rhs->max_index = in_lhs->max_index;
187  }
188  }
189  }
190 
191 
192 
193  MinMaxAvg
194  min_max_avg(const double my_value,
195  const MPI_Comm &mpi_communicator)
196  {
197  // If MPI was not started, we have a serial computation and cannot run
198  // the other MPI commands
199  if (job_supports_mpi() == false)
200  {
201  MinMaxAvg result;
202  result.sum = my_value;
203  result.avg = my_value;
204  result.min = my_value;
205  result.max = my_value;
206  result.min_index = 0;
207  result.max_index = 0;
208 
209  return result;
210  }
211 
212  // To avoid uninitialized values on some MPI implementations, provide
213  // result with a default value already...
214  MinMaxAvg result = { 0., std::numeric_limits<double>::max(),
215  -std::numeric_limits<double>::max(), 0, 0, 0.
216  };
217 
218  const unsigned int my_id
219  = ::Utilities::MPI::this_mpi_process(mpi_communicator);
220  const unsigned int numproc
221  = ::Utilities::MPI::n_mpi_processes(mpi_communicator);
222 
223  MPI_Op op;
224  int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
225  AssertThrowMPI(ierr);
226 
227  MinMaxAvg in;
228  in.sum = in.min = in.max = my_value;
229  in.min_index = in.max_index = my_id;
230 
231  MPI_Datatype type;
232  int lengths[]= {3,2};
233  MPI_Aint displacements[]= {0,offsetof(MinMaxAvg, min_index)};
234  MPI_Datatype types[]= {MPI_DOUBLE, MPI_INT};
235 
236  ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
237  AssertThrowMPI(ierr);
238 
239  ierr = MPI_Type_commit(&type);
240  AssertThrowMPI(ierr);
241  ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
242  AssertThrowMPI(ierr);
243 
244  ierr = MPI_Type_free (&type);
245  AssertThrowMPI(ierr);
246 
247  ierr = MPI_Op_free(&op);
248  AssertThrowMPI(ierr);
249 
250  result.avg = result.sum / numproc;
251 
252  return result;
253  }
254 
255 #else
256 
257  unsigned int n_mpi_processes (const MPI_Comm &)
258  {
259  return 1;
260  }
261 
262 
263 
264  unsigned int this_mpi_process (const MPI_Comm &)
265  {
266  return 0;
267  }
268 
269 
270  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
271  {
272  return mpi_communicator;
273  }
274 
275 
276 
277  MinMaxAvg
278  min_max_avg(const double my_value,
279  const MPI_Comm &)
280  {
281  MinMaxAvg result;
282 
283  result.sum = my_value;
284  result.avg = my_value;
285  result.min = my_value;
286  result.max = my_value;
287  result.min_index = 0;
288  result.max_index = 0;
289 
290  return result;
291  }
292 
293 #endif
294 
295 
296 
298  char ** &argv,
299  const unsigned int max_num_threads)
300  {
301  static bool constructor_has_already_run = false;
302  (void)constructor_has_already_run;
303  Assert (constructor_has_already_run == false,
304  ExcMessage ("You can only create a single object of this class "
305  "in a program since it initializes the MPI system."));
306 
307 
308  int ierr;
309 #ifdef DEAL_II_WITH_MPI
310  // if we have PETSc, we will initialize it and let it handle MPI.
311  // Otherwise, we will do it.
312  int MPI_has_been_started = 0;
313  ierr = MPI_Initialized(&MPI_has_been_started);
314  AssertThrowMPI(ierr);
315  AssertThrow (MPI_has_been_started == 0,
316  ExcMessage ("MPI error. You can only start MPI once!"));
317 
318  int provided;
319  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
320  // we might use several threads but never call two MPI functions at the
321  // same time. For an explanation see on why we do this see
322  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
323  int wanted = MPI_THREAD_SERIALIZED;
324  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
325  AssertThrowMPI(ierr);
326 
327  // disable for now because at least some implementations always return
328  // MPI_THREAD_SINGLE.
329  //Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
330  // ExcMessage("MPI reports that we are not allowed to use multiple threads."));
331 #else
332  // make sure the compiler doesn't warn about these variables
333  (void)argc;
334  (void)argv;
335  (void)ierr;
336 #endif
337 
338  // we are allowed to call MPI_Init ourselves and PETScInitialize will
339  // detect this. This allows us to use MPI_Init_thread instead.
340 #ifdef DEAL_II_WITH_PETSC
341 # ifdef DEAL_II_WITH_SLEPC
342  // Initialize SLEPc (with PETSc):
343  ierr = SlepcInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
345 # else
346  // or just initialize PETSc alone:
347  ierr = PetscInitialize(&argc, &argv, PETSC_NULL, PETSC_NULL);
348  AssertThrow (ierr == 0, ExcPETScError(ierr));
349 # endif
350 #endif
351 
352 #ifdef DEAL_II_WITH_P4EST
353  //Initialize p4est and libsc components
354  sc_init(MPI_COMM_WORLD, 0, 0, NULL, SC_LP_SILENT);
355  p4est_init (0, SC_LP_SILENT);
356 #endif
357 
358  constructor_has_already_run = true;
359 
360 
361  // Now also see how many threads we'd like to run
362  if (max_num_threads != numbers::invalid_unsigned_int)
363  {
364  // set maximum number of threads (also respecting the environment
365  // variable that the called function evaluates) based on what the
366  // user asked
367  MultithreadInfo::set_thread_limit(max_num_threads);
368  }
369  else
370  // user wants automatic choice
371  {
372 #ifdef DEAL_II_WITH_MPI
373  // we need to figure out how many MPI processes there are on the
374  // current node, as well as how many CPU cores we have. for the
375  // first task, check what get_hostname() returns and then to an
376  // allgather so each processor gets the answer
377  //
378  // in calculating the length of the string, don't forget the
379  // terminating \0 on C-style strings
380  const std::string hostname = Utilities::System::get_hostname();
381  const unsigned int max_hostname_size = Utilities::MPI::max (hostname.size()+1,
382  MPI_COMM_WORLD);
383  std::vector<char> hostname_array (max_hostname_size);
384  std::copy (hostname.c_str(), hostname.c_str()+hostname.size()+1,
385  hostname_array.begin());
386 
387  std::vector<char> all_hostnames(max_hostname_size *
388  MPI::n_mpi_processes(MPI_COMM_WORLD));
389  const int ierr = MPI_Allgather (&hostname_array[0], max_hostname_size, MPI_CHAR,
390  &all_hostnames[0], max_hostname_size, MPI_CHAR,
391  MPI_COMM_WORLD);
392  AssertThrowMPI(ierr);
393 
394  // search how often our own hostname appears and the how-manyth
395  // instance the current process represents
396  unsigned int n_local_processes=0;
397  unsigned int nth_process_on_host = 0;
398  for (unsigned int i=0; i<MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
399  if (std::string (&all_hostnames[0] + i*max_hostname_size) == hostname)
400  {
401  ++n_local_processes;
402  if (i <= MPI::this_mpi_process (MPI_COMM_WORLD))
403  ++nth_process_on_host;
404  }
405  Assert (nth_process_on_host > 0, ExcInternalError());
406 
407 
408  // compute how many cores each process gets. if the number does not
409  // divide evenly, then we get one more core if we are among the
410  // first few processes
411  //
412  // if the number would be zero, round up to one since every process
413  // needs to have at least one thread
414  const unsigned int n_threads
415  = std::max(MultithreadInfo::n_cores() / n_local_processes
416  +
417  (nth_process_on_host <= MultithreadInfo::n_cores() % n_local_processes
418  ?
419  1
420  :
421  0),
422  1U);
423 #else
424  const unsigned int n_threads = MultithreadInfo::n_cores();
425 #endif
426 
427  // finally set this number of threads
429  }
430  }
431 
432 
434  {
435  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
436  // are no longer used at this point. this is relevant because the static
437  // object destructors run for these vectors at the end of the program
438  // would run after MPI_Finalize is called, leading to errors
439 
440 #ifdef DEAL_II_WITH_MPI
441  // Start with the deal.II MPI vectors (need to do this before finalizing
442  // PETSc because it finalizes MPI). Delete vectors from the pools:
444  ::release_unused_memory ();
446  ::release_unused_memory ();
448  ::release_unused_memory ();
450  ::release_unused_memory ();
451 
452  // Next with Trilinos:
453 # if defined(DEAL_II_WITH_TRILINOS)
458 # endif
459 #endif
460 
461 
462  // Now deal with PETSc (with or without MPI). Only delete the vectors if
463  // finalize hasn't been called yet, otherwise this will lead to errors.
464 #ifdef DEAL_II_WITH_PETSC
465  if ((PetscInitializeCalled == PETSC_TRUE)
466  &&
467  (PetscFinalizeCalled == PETSC_FALSE))
468  {
477 
478 # ifdef DEAL_II_WITH_SLEPC
479  // and now end SLEPc (with PETSc)
480  SlepcFinalize();
481 # else
482  // or just end PETSc.
483  PetscFinalize();
484 # endif
485  }
486 #endif
487 
488 #ifdef DEAL_II_WITH_P4EST
489  // now end p4est and libsc
490  // Note: p4est has no finalize function
491  sc_finalize ();
492 #endif
493 
494 
495  // only MPI_Finalize if we are running with MPI. We also need to do this
496  // when running PETSc, because we initialize MPI ourselves before
497  // calling PetscInitialize
498 #ifdef DEAL_II_WITH_MPI
499  if (job_supports_mpi() == true)
500  {
501  if (std::uncaught_exception())
502  {
503  std::cerr << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
504  << this_mpi_process(MPI_COMM_WORLD)
505  << ". Skipping MPI_Finalize() to avoid a deadlock."
506  << std::endl;
507  }
508  else
509  {
510  const int ierr = MPI_Finalize();
511  AssertThrowMPI(ierr);
512  }
513  }
514 #endif
515  }
516 
517 
518 
520  {
521 #ifdef DEAL_II_WITH_MPI
522  int MPI_has_been_started = 0;
523  const int ierr = MPI_Initialized(&MPI_has_been_started);
524  AssertThrowMPI(ierr);
525 
526  return (MPI_has_been_started > 0);
527 #else
528  return false;
529 #endif
530  }
531 
532 
533 
534 #include "mpi.inst"
535  } // end of namespace MPI
536 } // end of namespace Utilities
537 
538 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
Definition: types.h:170
static void release_unused_memory()
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:297
#define AssertThrow(cond, exc)
Definition: exceptions.h:369
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:30
#define Assert(cond, exc)
Definition: exceptions.h:313
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:63
Definition: mpi.h:48
std::string get_hostname()
Definition: utilities.cc:646
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1211
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:83
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:73
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:194
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:93
bool job_supports_mpi()
Definition: mpi.cc:519
T max(const T &t, const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()