Reference documentation for deal.II version 9.0.0
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/mpi.h>
18 #include <deal.II/base/mpi.templates.h>
19 #include <deal.II/base/utilities.h>
20 #include <deal.II/base/exceptions.h>
21 #include <deal.II/lac/vector_memory.h>
22 #include <deal.II/lac/la_parallel_vector.h>
23 #include <deal.II/lac/la_parallel_block_vector.h>
24 #include <deal.II/base/multithread_info.h>
25 
26 #include <iostream>
27 
28 #ifdef DEAL_II_WITH_TRILINOS
29 # ifdef DEAL_II_WITH_MPI
30 # include <Epetra_MpiComm.h>
31 # include <deal.II/lac/vector_memory.h>
32 # include <deal.II/lac/trilinos_vector.h>
33 # include <deal.II/lac/trilinos_parallel_block_vector.h>
34 # endif
35 #endif
36 
37 #ifdef DEAL_II_WITH_PETSC
38 # include <petscsys.h>
39 # include <deal.II/lac/petsc_parallel_block_vector.h>
40 # include <deal.II/lac/petsc_parallel_vector.h>
41 #endif
42 
43 #ifdef DEAL_II_WITH_SLEPC
44 # include <slepcsys.h>
45 # include <deal.II/lac/slepc_solver.h>
46 #endif
47 
48 #ifdef DEAL_II_WITH_P4EST
49 # include <p4est_bits.h>
50 #endif
51 
52 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
53 # include <zoltan_cpp.h>
54 #endif
55 
56 DEAL_II_NAMESPACE_OPEN
57 
58 
59 namespace Utilities
60 {
61 
62  namespace MPI
63  {
64 #ifdef DEAL_II_WITH_MPI
65  unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator)
66  {
67  int n_jobs=1;
68  const int ierr = MPI_Comm_size (mpi_communicator, &n_jobs);
69  AssertThrowMPI(ierr);
70 
71  return n_jobs;
72  }
73 
74 
75  unsigned int this_mpi_process (const MPI_Comm &mpi_communicator)
76  {
77  int rank=0;
78  const int ierr = MPI_Comm_rank (mpi_communicator, &rank);
79  AssertThrowMPI(ierr);
80 
81  return rank;
82  }
83 
84 
85  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
86  {
87  MPI_Comm new_communicator;
88  const int ierr = MPI_Comm_dup (mpi_communicator, &new_communicator);
89  AssertThrowMPI(ierr);
90  return new_communicator;
91  }
92 
93 
94 
95  int create_group(const MPI_Comm &comm,
96  const MPI_Group &group,
97  const int tag,
98  MPI_Comm *new_comm)
99  {
100 #if DEAL_II_MPI_VERSION_GTE(3, 0)
101  return MPI_Comm_create_group(comm, group, tag, new_comm);
102 #else
103  int rank;
104  int ierr = MPI_Comm_rank(comm, &rank);
105  AssertThrowMPI(ierr);
106 
107  int grp_rank;
108  ierr = MPI_Group_rank(group, &grp_rank);
109  AssertThrowMPI(ierr);
110  if (grp_rank == MPI_UNDEFINED)
111  {
112  *new_comm = MPI_COMM_NULL;
113  return MPI_SUCCESS;
114  }
115 
116  int grp_size;
117  ierr = MPI_Group_size(group, &grp_size);
118  AssertThrowMPI(ierr);
119 
120  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
121  AssertThrowMPI(ierr);
122 
123  MPI_Group parent_grp;
124  ierr = MPI_Comm_group(comm, &parent_grp);
125  AssertThrowMPI(ierr);
126 
127  std::vector<int> pids(grp_size);
128  std::vector<int> grp_pids(grp_size);
129  std::iota(grp_pids.begin(), grp_pids.end(), 0);
130  ierr = MPI_Group_translate_ranks(group, grp_size, grp_pids.data(), parent_grp, pids.data());
131  AssertThrowMPI(ierr);
132  ierr = MPI_Group_free(&parent_grp);
133  AssertThrowMPI(ierr);
134 
135  MPI_Comm comm_old = *new_comm;
136  MPI_Comm ic;
137  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *=2)
138  {
139  const int gid = grp_rank/merge_sz;
140  comm_old = *new_comm;
141  if (gid % 2 == 0)
142  {
143  if ((gid + 1) * merge_sz < grp_size)
144  {
145  ierr = (MPI_Intercomm_create(*new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
146  AssertThrowMPI(ierr);
147  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
148  AssertThrowMPI(ierr);
149  }
150  }
151  else
152  {
153  ierr = MPI_Intercomm_create(*new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
154  AssertThrowMPI(ierr);
155  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
156  AssertThrowMPI(ierr);
157  }
158  if (*new_comm != comm_old)
159  {
160  ierr = MPI_Comm_free(&ic);
161  AssertThrowMPI(ierr);
162  ierr = MPI_Comm_free(&comm_old);
163  AssertThrowMPI(ierr);
164  }
165  }
166 
167  return MPI_SUCCESS;
168 #endif
169  }
170 
171 
172 
173  std::vector<unsigned int>
175  const std::vector<unsigned int> &destinations)
176  {
177  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
178  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
179 
180  for (unsigned int i=0; i<destinations.size(); ++i)
181  {
182  Assert (destinations[i] < n_procs,
183  ExcIndexRange (destinations[i], 0, n_procs));
184  Assert (destinations[i] != myid,
185  ExcMessage ("There is no point in communicating with ourselves."));
186  }
187 
188 
189  // let all processors communicate the maximal number of destinations
190  // they have
191  const unsigned int max_n_destinations
192  = Utilities::MPI::max (destinations.size(), mpi_comm);
193 
194  if (max_n_destinations==0)
195  // all processes have nothing to send/receive:
196  return std::vector<unsigned int>();
197 
198  // now that we know the number of data packets every processor wants to
199  // send, set up a buffer with the maximal size and copy our destinations
200  // in there, padded with -1's
201  std::vector<unsigned int> my_destinations(max_n_destinations,
203  std::copy (destinations.begin(), destinations.end(),
204  my_destinations.begin());
205 
206  // now exchange these (we could communicate less data if we used
207  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
208  // processors in this case, which is more expensive than the reduction
209  // operation above in MPI_Allreduce)
210  std::vector<unsigned int> all_destinations (max_n_destinations * n_procs);
211  const int ierr = MPI_Allgather (my_destinations.data(), max_n_destinations, MPI_UNSIGNED,
212  all_destinations.data(), max_n_destinations, MPI_UNSIGNED,
213  mpi_comm);
214  AssertThrowMPI(ierr);
215 
216  // now we know who is going to communicate with whom. collect who is
217  // going to communicate with us!
218  std::vector<unsigned int> origins;
219  for (unsigned int i=0; i<n_procs; ++i)
220  for (unsigned int j=0; j<max_n_destinations; ++j)
221  if (all_destinations[i*max_n_destinations + j] == myid)
222  origins.push_back (i);
223  else if (all_destinations[i*max_n_destinations + j] ==
225  break;
226 
227  return origins;
228  }
229 
230 
231  namespace
232  {
233  // custom MIP_Op for calculate_collective_mpi_min_max_avg
234  void max_reduce ( const void *in_lhs_,
235  void *inout_rhs_,
236  int *len,
237  MPI_Datatype *)
238  {
239  (void)len;
240  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
241  MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
242 
243  Assert(*len==1, ExcInternalError());
244 
245  inout_rhs->sum += in_lhs->sum;
246  if (inout_rhs->min>in_lhs->min)
247  {
248  inout_rhs->min = in_lhs->min;
249  inout_rhs->min_index = in_lhs->min_index;
250  }
251  else if (inout_rhs->min == in_lhs->min)
252  {
253  // choose lower cpu index when tied to make operator commutative
254  if (inout_rhs->min_index > in_lhs->min_index)
255  inout_rhs->min_index = in_lhs->min_index;
256  }
257 
258  if (inout_rhs->max < in_lhs->max)
259  {
260  inout_rhs->max = in_lhs->max;
261  inout_rhs->max_index = in_lhs->max_index;
262  }
263  else if (inout_rhs->max == in_lhs->max)
264  {
265  // choose lower cpu index when tied to make operator commutative
266  if (inout_rhs->max_index > in_lhs->max_index)
267  inout_rhs->max_index = in_lhs->max_index;
268  }
269  }
270  }
271 
272 
273 
274  MinMaxAvg
275  min_max_avg(const double my_value,
276  const MPI_Comm &mpi_communicator)
277  {
278  // If MPI was not started, we have a serial computation and cannot run
279  // the other MPI commands
280  if (job_supports_mpi() == false)
281  {
282  MinMaxAvg result;
283  result.sum = my_value;
284  result.avg = my_value;
285  result.min = my_value;
286  result.max = my_value;
287  result.min_index = 0;
288  result.max_index = 0;
289 
290  return result;
291  }
292 
293  // To avoid uninitialized values on some MPI implementations, provide
294  // result with a default value already...
295  MinMaxAvg result = { 0., std::numeric_limits<double>::max(),
296  -std::numeric_limits<double>::max(), 0, 0, 0.
297  };
298 
299  const unsigned int my_id
300  = ::Utilities::MPI::this_mpi_process(mpi_communicator);
301  const unsigned int numproc
302  = ::Utilities::MPI::n_mpi_processes(mpi_communicator);
303 
304  MPI_Op op;
305  int ierr = MPI_Op_create((MPI_User_function *)&max_reduce, true, &op);
306  AssertThrowMPI(ierr);
307 
308  MinMaxAvg in;
309  in.sum = in.min = in.max = my_value;
310  in.min_index = in.max_index = my_id;
311 
312  MPI_Datatype type;
313  int lengths[]= {3,2};
314  MPI_Aint displacements[]= {0,offsetof(MinMaxAvg, min_index)};
315  MPI_Datatype types[]= {MPI_DOUBLE, MPI_INT};
316 
317  ierr = MPI_Type_struct(2, lengths, displacements, types, &type);
318  AssertThrowMPI(ierr);
319 
320  ierr = MPI_Type_commit(&type);
321  AssertThrowMPI(ierr);
322  ierr = MPI_Allreduce (&in, &result, 1, type, op, mpi_communicator);
323  AssertThrowMPI(ierr);
324 
325  ierr = MPI_Type_free (&type);
326  AssertThrowMPI(ierr);
327 
328  ierr = MPI_Op_free(&op);
329  AssertThrowMPI(ierr);
330 
331  result.avg = result.sum / numproc;
332 
333  return result;
334  }
335 
336 #else
337 
338  unsigned int n_mpi_processes (const MPI_Comm &)
339  {
340  return 1;
341  }
342 
343 
344 
345  unsigned int this_mpi_process (const MPI_Comm &)
346  {
347  return 0;
348  }
349 
350 
351  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator)
352  {
353  return mpi_communicator;
354  }
355 
356 
357 
358  MinMaxAvg
359  min_max_avg(const double my_value,
360  const MPI_Comm &)
361  {
362  MinMaxAvg result;
363 
364  result.sum = my_value;
365  result.avg = my_value;
366  result.min = my_value;
367  result.max = my_value;
368  result.min_index = 0;
369  result.max_index = 0;
370 
371  return result;
372  }
373 
374 #endif
375 
376 
377 
379  char ** &argv,
380  const unsigned int max_num_threads)
381  {
382  static bool constructor_has_already_run = false;
383  (void)constructor_has_already_run;
384  Assert (constructor_has_already_run == false,
385  ExcMessage ("You can only create a single object of this class "
386  "in a program since it initializes the MPI system."));
387 
388 
389  int ierr = 0;
390 #ifdef DEAL_II_WITH_MPI
391  // if we have PETSc, we will initialize it and let it handle MPI.
392  // Otherwise, we will do it.
393  int MPI_has_been_started = 0;
394  ierr = MPI_Initialized(&MPI_has_been_started);
395  AssertThrowMPI(ierr);
396  AssertThrow (MPI_has_been_started == 0,
397  ExcMessage ("MPI error. You can only start MPI once!"));
398 
399  int provided;
400  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
401  // we might use several threads but never call two MPI functions at the
402  // same time. For an explanation see on why we do this see
403  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
404  int wanted = MPI_THREAD_SERIALIZED;
405  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
406  AssertThrowMPI(ierr);
407 
408  // disable for now because at least some implementations always return
409  // MPI_THREAD_SINGLE.
410  //Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
411  // ExcMessage("MPI reports that we are not allowed to use multiple threads."));
412 #else
413  // make sure the compiler doesn't warn about these variables
414  (void)argc;
415  (void)argv;
416  (void)ierr;
417 #endif
418 
419  // we are allowed to call MPI_Init ourselves and PETScInitialize will
420  // detect this. This allows us to use MPI_Init_thread instead.
421 #ifdef DEAL_II_WITH_PETSC
422 # ifdef DEAL_II_WITH_SLEPC
423  // Initialize SLEPc (with PETSc):
424  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
426 # else
427  // or just initialize PETSc alone:
428  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
429  AssertThrow (ierr == 0, ExcPETScError(ierr));
430 # endif
431 
432  // Disable PETSc exception handling. This just prints a large wall
433  // of text that is not particularly helpful for what we do:
434  PetscPopSignalHandler();
435 #endif
436 
437  //Initialize zoltan
438 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
439  float version;
440  Zoltan_Initialize(argc, argv, &version);
441 #endif
442 
443 #ifdef DEAL_II_WITH_P4EST
444  //Initialize p4est and libsc components
445 #if !(DEAL_II_P4EST_VERSION_GTE(2,0,0,0))
446  // This feature is broken in version 2.0.0 for calls to
447  // MPI_Comm_create_group (see cburstedde/p4est#30).
448  // Disabling it leads to more verbose p4est error messages
449  // which should be fine.
450  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
451 #endif
452  p4est_init (nullptr, SC_LP_SILENT);
453 #endif
454 
455  constructor_has_already_run = true;
456 
457 
458  // Now also see how many threads we'd like to run
459  if (max_num_threads != numbers::invalid_unsigned_int)
460  {
461  // set maximum number of threads (also respecting the environment
462  // variable that the called function evaluates) based on what the
463  // user asked
464  MultithreadInfo::set_thread_limit(max_num_threads);
465  }
466  else
467  // user wants automatic choice
468  {
469 #ifdef DEAL_II_WITH_MPI
470  // we need to figure out how many MPI processes there are on the
471  // current node, as well as how many CPU cores we have. for the
472  // first task, check what get_hostname() returns and then to an
473  // allgather so each processor gets the answer
474  //
475  // in calculating the length of the string, don't forget the
476  // terminating \0 on C-style strings
477  const std::string hostname = Utilities::System::get_hostname();
478  const unsigned int max_hostname_size = Utilities::MPI::max (hostname.size()+1,
479  MPI_COMM_WORLD);
480  std::vector<char> hostname_array (max_hostname_size);
481  std::copy (hostname.c_str(), hostname.c_str()+hostname.size()+1,
482  hostname_array.begin());
483 
484  std::vector<char> all_hostnames(max_hostname_size *
485  MPI::n_mpi_processes(MPI_COMM_WORLD));
486  const int ierr = MPI_Allgather (hostname_array.data(), max_hostname_size, MPI_CHAR,
487  all_hostnames.data(), max_hostname_size, MPI_CHAR,
488  MPI_COMM_WORLD);
489  AssertThrowMPI(ierr);
490 
491  // search how often our own hostname appears and the how-manyth
492  // instance the current process represents
493  unsigned int n_local_processes=0;
494  unsigned int nth_process_on_host = 0;
495  for (unsigned int i=0; i<MPI::n_mpi_processes(MPI_COMM_WORLD); ++i)
496  if (std::string (all_hostnames.data() + i*max_hostname_size) == hostname)
497  {
498  ++n_local_processes;
499  if (i <= MPI::this_mpi_process (MPI_COMM_WORLD))
500  ++nth_process_on_host;
501  }
502  Assert (nth_process_on_host > 0, ExcInternalError());
503 
504 
505  // compute how many cores each process gets. if the number does not
506  // divide evenly, then we get one more core if we are among the
507  // first few processes
508  //
509  // if the number would be zero, round up to one since every process
510  // needs to have at least one thread
511  const unsigned int n_threads
512  = std::max(MultithreadInfo::n_cores() / n_local_processes
513  +
514  (nth_process_on_host <= MultithreadInfo::n_cores() % n_local_processes
515  ?
516  1
517  :
518  0),
519  1U);
520 #else
521  const unsigned int n_threads = MultithreadInfo::n_cores();
522 #endif
523 
524  // finally set this number of threads
526  }
527  }
528 
529 
531  {
532  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
533  // are no longer used at this point. this is relevant because the static
534  // object destructors run for these vectors at the end of the program
535  // would run after MPI_Finalize is called, leading to errors
536 
537 #ifdef DEAL_II_WITH_MPI
538  // Start with the deal.II MPI vectors (need to do this before finalizing
539  // PETSc because it finalizes MPI). Delete vectors from the pools:
541  ::release_unused_memory ();
543  ::release_unused_memory ();
545  ::release_unused_memory ();
547  ::release_unused_memory ();
548 
549  // Next with Trilinos:
550 # if defined(DEAL_II_WITH_TRILINOS)
555 # endif
556 #endif
557 
558 
559  // Now deal with PETSc (with or without MPI). Only delete the vectors if
560  // finalize hasn't been called yet, otherwise this will lead to errors.
561 #ifdef DEAL_II_WITH_PETSC
562  if ((PetscInitializeCalled == PETSC_TRUE)
563  &&
564  (PetscFinalizeCalled == PETSC_FALSE))
565  {
570 
571 # ifdef DEAL_II_WITH_SLEPC
572  // and now end SLEPc (with PETSc)
573  SlepcFinalize();
574 # else
575  // or just end PETSc.
576  PetscFinalize();
577 # endif
578  }
579 #endif
580 
581 #ifdef DEAL_II_WITH_P4EST
582  // now end p4est and libsc
583  // Note: p4est has no finalize function
584  sc_finalize ();
585 #endif
586 
587 
588  // only MPI_Finalize if we are running with MPI. We also need to do this
589  // when running PETSc, because we initialize MPI ourselves before
590  // calling PetscInitialize
591 #ifdef DEAL_II_WITH_MPI
592  if (job_supports_mpi() == true)
593  {
594  if (std::uncaught_exception())
595  {
596  std::cerr << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
597  << this_mpi_process(MPI_COMM_WORLD)
598  << ". Skipping MPI_Finalize() to avoid a deadlock."
599  << std::endl;
600  }
601  else
602  {
603  const int ierr = MPI_Finalize();
604  (void) ierr;
605  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
606  }
607  }
608 #endif
609  }
610 
611 
612 
614  {
615 #ifdef DEAL_II_WITH_MPI
616  int MPI_has_been_started = 0;
617  const int ierr = MPI_Initialized(&MPI_has_been_started);
618  AssertThrowMPI(ierr);
619 
620  return (MPI_has_been_started > 0);
621 #else
622  return false;
623 #endif
624  }
625 
626 
627 
628 #include "mpi.inst"
629  } // end of namespace MPI
630 } // end of namespace Utilities
631 
632 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1183
static void release_unused_memory()
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:378
#define AssertThrow(cond, exc)
Definition: exceptions.h:1221
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:30
#define Assert(cond, exc)
Definition: exceptions.h:1142
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:95
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
Definition: cuda.h:31
std::string get_hostname()
Definition: utilities.cc:687
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
static ::ExceptionBase & ExcSLEPcError(int arg1)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:275
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:174
bool job_supports_mpi()
Definition: mpi.cc:613
unsigned int min_index
Definition: mpi.h:391
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:401
static ::ExceptionBase & ExcInternalError()