Reference documentation for deal.II version 9.1.1
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/multithread_info.h>
22 #include <deal.II/base/utilities.h>
23 
24 #include <deal.II/lac/la_parallel_block_vector.h>
25 #include <deal.II/lac/la_parallel_vector.h>
26 #include <deal.II/lac/vector_memory.h>
27 
28 #include <iostream>
29 #include <numeric>
30 
31 #ifdef DEAL_II_WITH_TRILINOS
32 # ifdef DEAL_II_WITH_MPI
33 # include <deal.II/lac/trilinos_parallel_block_vector.h>
34 # include <deal.II/lac/trilinos_vector.h>
35 # include <deal.II/lac/vector_memory.h>
36 
37 # include <Epetra_MpiComm.h>
38 # endif
39 #endif
40 
41 #ifdef DEAL_II_WITH_PETSC
42 # include <deal.II/lac/petsc_block_vector.h>
43 # include <deal.II/lac/petsc_vector.h>
44 
45 # include <petscsys.h>
46 #endif
47 
48 #ifdef DEAL_II_WITH_SLEPC
49 # include <deal.II/lac/slepc_solver.h>
50 
51 # include <slepcsys.h>
52 #endif
53 
54 #ifdef DEAL_II_WITH_P4EST
55 # include <p4est_bits.h>
56 #endif
57 
58 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
59 # include <zoltan_cpp.h>
60 #endif
61 
62 DEAL_II_NAMESPACE_OPEN
63 
64 
65 namespace Utilities
66 {
67  namespace MPI
68  {
69 #ifdef DEAL_II_WITH_MPI
70  unsigned int
71  n_mpi_processes(const MPI_Comm &mpi_communicator)
72  {
73  int n_jobs = 1;
74  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
75  AssertThrowMPI(ierr);
76 
77  return n_jobs;
78  }
79 
80 
81  unsigned int
82  this_mpi_process(const MPI_Comm &mpi_communicator)
83  {
84  int rank = 0;
85  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
86  AssertThrowMPI(ierr);
87 
88  return rank;
89  }
90 
91 
92  MPI_Comm
93  duplicate_communicator(const MPI_Comm &mpi_communicator)
94  {
95  MPI_Comm new_communicator;
96  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
97  AssertThrowMPI(ierr);
98  return new_communicator;
99  }
100 
101 
102 
103  int
104  create_group(const MPI_Comm & comm,
105  const MPI_Group &group,
106  const int tag,
107  MPI_Comm * new_comm)
108  {
109 # if DEAL_II_MPI_VERSION_GTE(3, 0)
110  return MPI_Comm_create_group(comm, group, tag, new_comm);
111 # else
112  int rank;
113  int ierr = MPI_Comm_rank(comm, &rank);
114  AssertThrowMPI(ierr);
115 
116  int grp_rank;
117  ierr = MPI_Group_rank(group, &grp_rank);
118  AssertThrowMPI(ierr);
119  if (grp_rank == MPI_UNDEFINED)
120  {
121  *new_comm = MPI_COMM_NULL;
122  return MPI_SUCCESS;
123  }
124 
125  int grp_size;
126  ierr = MPI_Group_size(group, &grp_size);
127  AssertThrowMPI(ierr);
128 
129  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
130  AssertThrowMPI(ierr);
131 
132  MPI_Group parent_grp;
133  ierr = MPI_Comm_group(comm, &parent_grp);
134  AssertThrowMPI(ierr);
135 
136  std::vector<int> pids(grp_size);
137  std::vector<int> grp_pids(grp_size);
138  std::iota(grp_pids.begin(), grp_pids.end(), 0);
139  ierr = MPI_Group_translate_ranks(
140  group, grp_size, grp_pids.data(), parent_grp, pids.data());
141  AssertThrowMPI(ierr);
142  ierr = MPI_Group_free(&parent_grp);
143  AssertThrowMPI(ierr);
144 
145  MPI_Comm comm_old = *new_comm;
146  MPI_Comm ic;
147  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
148  {
149  const int gid = grp_rank / merge_sz;
150  comm_old = *new_comm;
151  if (gid % 2 == 0)
152  {
153  if ((gid + 1) * merge_sz < grp_size)
154  {
155  ierr = (MPI_Intercomm_create(
156  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
157  AssertThrowMPI(ierr);
158  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
159  AssertThrowMPI(ierr);
160  }
161  }
162  else
163  {
164  ierr = MPI_Intercomm_create(
165  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
166  AssertThrowMPI(ierr);
167  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
168  AssertThrowMPI(ierr);
169  }
170  if (*new_comm != comm_old)
171  {
172  ierr = MPI_Comm_free(&ic);
173  AssertThrowMPI(ierr);
174  ierr = MPI_Comm_free(&comm_old);
175  AssertThrowMPI(ierr);
176  }
177  }
178 
179  return MPI_SUCCESS;
180 # endif
181  }
182 
183 
184 
185  std::vector<IndexSet>
186  create_ascending_partitioning(const MPI_Comm & comm,
187  const IndexSet::size_type &local_size)
188  {
189  const unsigned int n_proc = n_mpi_processes(comm);
190  const std::vector<IndexSet::size_type> sizes =
191  all_gather(comm, local_size);
192  const auto total_size =
193  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
194 
195  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
196 
197  IndexSet::size_type begin = 0;
198  for (unsigned int i = 0; i < n_proc; ++i)
199  {
200  res[i].add_range(begin, begin + sizes[i]);
201  begin = begin + sizes[i];
202  }
203 
204  return res;
205  }
206 
207 
208 
209  std::vector<unsigned int>
211  const MPI_Comm & mpi_comm,
212  const std::vector<unsigned int> &destinations)
213  {
214  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
215  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
216 
217  for (const unsigned int destination : destinations)
218  {
219  (void)destination;
220  Assert(destination < n_procs, ExcIndexRange(destination, 0, n_procs));
221  Assert(destination != myid,
222  ExcMessage(
223  "There is no point in communicating with ourselves."));
224  }
225 
226 # if DEAL_II_MPI_VERSION_GTE(2, 2)
227  // Calculate the number of messages to send to each process
228  std::vector<unsigned int> dest_vector(n_procs);
229  for (const auto &el : destinations)
230  ++dest_vector[el];
231 
232  // Find how many processes will send to this one
233  // by reducing with sum and then scattering the
234  // results over all processes
235  unsigned int n_recv_from;
236  const int ierr = MPI_Reduce_scatter_block(
237  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
238 
239  AssertThrowMPI(ierr);
240 
241  // Send myid to every process in `destinations` vector...
242  std::vector<MPI_Request> send_requests(destinations.size());
243  for (const auto &el : destinations)
244  MPI_Isend(&myid,
245  1,
246  MPI_UNSIGNED,
247  el,
248  32766,
249  mpi_comm,
250  send_requests.data() + (&el - destinations.data()));
251 
252  // if no one to receive from, return an empty vector
253  if (n_recv_from == 0)
254  return std::vector<unsigned int>();
255 
256  // ...otherwise receive `n_recv_from` times from the processes
257  // who communicate with this one. Store the obtained id's
258  // in the resulting vector
259  std::vector<unsigned int> origins(n_recv_from);
260  for (auto &el : origins)
261  MPI_Recv(&el,
262  1,
263  MPI_UNSIGNED,
264  MPI_ANY_SOURCE,
265  32766,
266  mpi_comm,
267  MPI_STATUS_IGNORE);
268 
269  MPI_Waitall(destinations.size(),
270  send_requests.data(),
271  MPI_STATUSES_IGNORE);
272  return origins;
273 # else
274  // let all processors communicate the maximal number of destinations
275  // they have
276  const unsigned int max_n_destinations =
277  Utilities::MPI::max(destinations.size(), mpi_comm);
278 
279  if (max_n_destinations == 0)
280  // all processes have nothing to send/receive:
281  return std::vector<unsigned int>();
282 
283  // now that we know the number of data packets every processor wants to
284  // send, set up a buffer with the maximal size and copy our destinations
285  // in there, padded with -1's
286  std::vector<unsigned int> my_destinations(max_n_destinations,
288  std::copy(destinations.begin(),
289  destinations.end(),
290  my_destinations.begin());
291 
292  // now exchange these (we could communicate less data if we used
293  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
294  // processors in this case, which is more expensive than the reduction
295  // operation above in MPI_Allreduce)
296  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
297  const int ierr = MPI_Allgather(my_destinations.data(),
298  max_n_destinations,
299  MPI_UNSIGNED,
300  all_destinations.data(),
301  max_n_destinations,
302  MPI_UNSIGNED,
303  mpi_comm);
304  AssertThrowMPI(ierr);
305 
306  // now we know who is going to communicate with whom. collect who is
307  // going to communicate with us!
308  std::vector<unsigned int> origins;
309  for (unsigned int i = 0; i < n_procs; ++i)
310  for (unsigned int j = 0; j < max_n_destinations; ++j)
311  if (all_destinations[i * max_n_destinations + j] == myid)
312  origins.push_back(i);
313  else if (all_destinations[i * max_n_destinations + j] ==
315  break;
316 
317  return origins;
318 # endif
319  }
320 
321 
322 
323  unsigned int
325  const MPI_Comm & mpi_comm,
326  const std::vector<unsigned int> &destinations)
327  {
328  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
329 
330  for (const unsigned int destination : destinations)
331  {
332  (void)destination;
333  Assert(destination < n_procs, ExcIndexRange(destination, 0, n_procs));
334  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
335  ExcMessage(
336  "There is no point in communicating with ourselves."));
337  }
338 
339  // Calculate the number of messages to send to each process
340  std::vector<unsigned int> dest_vector(n_procs);
341  for (const auto &el : destinations)
342  ++dest_vector[el];
343 
344 # if DEAL_II_MPI_VERSION_GTE(2, 2)
345  // Find out how many processes will send to this one
346  // MPI_Reduce_scatter(_block) does exactly this
347  unsigned int n_recv_from = 0;
348 
349  const int ierr = MPI_Reduce_scatter_block(
350  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
351 
352  AssertThrowMPI(ierr);
353 
354  return n_recv_from;
355 # else
356  // Find out how many processes will send to this one
357  // by reducing with sum and then scattering the
358  // results over all processes
359  std::vector<unsigned int> buffer(dest_vector.size());
360  unsigned int n_recv_from = 0;
361 
362  MPI_Reduce(dest_vector.data(),
363  buffer.data(),
364  dest_vector.size(),
365  MPI_UNSIGNED,
366  MPI_SUM,
367  0,
368  mpi_comm);
369  MPI_Scatter(buffer.data(),
370  1,
371  MPI_UNSIGNED,
372  &n_recv_from,
373  1,
374  MPI_UNSIGNED,
375  0,
376  mpi_comm);
377 
378  return n_recv_from;
379 # endif
380  }
381 
382 
383 
384  namespace
385  {
386  // custom MIP_Op for calculate_collective_mpi_min_max_avg
387  void
388  max_reduce(const void *in_lhs_,
389  void * inout_rhs_,
390  int * len,
391  MPI_Datatype *)
392  {
393  (void)len;
394  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
395  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
396 
397  Assert(*len == 1, ExcInternalError());
398 
399  inout_rhs->sum += in_lhs->sum;
400  if (inout_rhs->min > in_lhs->min)
401  {
402  inout_rhs->min = in_lhs->min;
403  inout_rhs->min_index = in_lhs->min_index;
404  }
405  else if (inout_rhs->min == in_lhs->min)
406  {
407  // choose lower cpu index when tied to make operator commutative
408  if (inout_rhs->min_index > in_lhs->min_index)
409  inout_rhs->min_index = in_lhs->min_index;
410  }
411 
412  if (inout_rhs->max < in_lhs->max)
413  {
414  inout_rhs->max = in_lhs->max;
415  inout_rhs->max_index = in_lhs->max_index;
416  }
417  else if (inout_rhs->max == in_lhs->max)
418  {
419  // choose lower cpu index when tied to make operator commutative
420  if (inout_rhs->max_index > in_lhs->max_index)
421  inout_rhs->max_index = in_lhs->max_index;
422  }
423  }
424  } // namespace
425 
426 
427 
428  MinMaxAvg
429  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
430  {
431  // If MPI was not started, we have a serial computation and cannot run
432  // the other MPI commands
433  if (job_supports_mpi() == false)
434  {
435  MinMaxAvg result;
436  result.sum = my_value;
437  result.avg = my_value;
438  result.min = my_value;
439  result.max = my_value;
440  result.min_index = 0;
441  result.max_index = 0;
442 
443  return result;
444  }
445 
446  // To avoid uninitialized values on some MPI implementations, provide
447  // result with a default value already...
448  MinMaxAvg result = {0.,
449  std::numeric_limits<double>::max(),
450  -std::numeric_limits<double>::max(),
451  0,
452  0,
453  0.};
454 
455  const unsigned int my_id =
456  ::Utilities::MPI::this_mpi_process(mpi_communicator);
457  const unsigned int numproc =
458  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
459 
460  MPI_Op op;
461  int ierr =
462  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
463  true,
464  &op);
465  AssertThrowMPI(ierr);
466 
467  MinMaxAvg in;
468  in.sum = in.min = in.max = my_value;
469  in.min_index = in.max_index = my_id;
470 
471  MPI_Datatype type;
472  int lengths[] = {3, 2};
473  MPI_Aint displacements[] = {0, offsetof(MinMaxAvg, min_index)};
474  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT};
475 
476  ierr = MPI_Type_create_struct(2, lengths, displacements, types, &type);
477  AssertThrowMPI(ierr);
478 
479  ierr = MPI_Type_commit(&type);
480  AssertThrowMPI(ierr);
481  ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
482  AssertThrowMPI(ierr);
483 
484  ierr = MPI_Type_free(&type);
485  AssertThrowMPI(ierr);
486 
487  ierr = MPI_Op_free(&op);
488  AssertThrowMPI(ierr);
489 
490  result.avg = result.sum / numproc;
491 
492  return result;
493  }
494 
495 #else
496 
497  unsigned int
498  n_mpi_processes(const MPI_Comm &)
499  {
500  return 1;
501  }
502 
503 
504 
505  unsigned int
506  this_mpi_process(const MPI_Comm &)
507  {
508  return 0;
509  }
510 
511 
512  std::vector<IndexSet>
513  create_ascending_partitioning(const MPI_Comm & /*comm*/,
514  const IndexSet::size_type &local_size)
515  {
516  return std::vector<IndexSet>(1, complete_index_set(local_size));
517  }
518 
519 
520  MPI_Comm
521  duplicate_communicator(const MPI_Comm &mpi_communicator)
522  {
523  return mpi_communicator;
524  }
525 
526 
527 
528  MinMaxAvg
529  min_max_avg(const double my_value, const MPI_Comm &)
530  {
531  MinMaxAvg result;
532 
533  result.sum = my_value;
534  result.avg = my_value;
535  result.min = my_value;
536  result.max = my_value;
537  result.min_index = 0;
538  result.max_index = 0;
539 
540  return result;
541  }
542 
543 #endif
544 
545 
546 
548  char **& argv,
549  const unsigned int max_num_threads)
550  {
551  static bool constructor_has_already_run = false;
552  (void)constructor_has_already_run;
553  Assert(constructor_has_already_run == false,
554  ExcMessage("You can only create a single object of this class "
555  "in a program since it initializes the MPI system."));
556 
557 
558  int ierr = 0;
559 #ifdef DEAL_II_WITH_MPI
560  // if we have PETSc, we will initialize it and let it handle MPI.
561  // Otherwise, we will do it.
562  int MPI_has_been_started = 0;
563  ierr = MPI_Initialized(&MPI_has_been_started);
564  AssertThrowMPI(ierr);
565  AssertThrow(MPI_has_been_started == 0,
566  ExcMessage("MPI error. You can only start MPI once!"));
567 
568  int provided;
569  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
570  // we might use several threads but never call two MPI functions at the
571  // same time. For an explanation see on why we do this see
572  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
573  int wanted = MPI_THREAD_SERIALIZED;
574  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
575  AssertThrowMPI(ierr);
576 
577  // disable for now because at least some implementations always return
578  // MPI_THREAD_SINGLE.
579  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
580  // ExcMessage("MPI reports that we are not allowed to use multiple
581  // threads."));
582 #else
583  // make sure the compiler doesn't warn about these variables
584  (void)argc;
585  (void)argv;
586  (void)ierr;
587 #endif
588 
589  // we are allowed to call MPI_Init ourselves and PETScInitialize will
590  // detect this. This allows us to use MPI_Init_thread instead.
591 #ifdef DEAL_II_WITH_PETSC
592 # ifdef DEAL_II_WITH_SLEPC
593  // Initialize SLEPc (with PETSc):
594  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
596 # else
597  // or just initialize PETSc alone:
598  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
599  AssertThrow(ierr == 0, ExcPETScError(ierr));
600 # endif
601 
602  // Disable PETSc exception handling. This just prints a large wall
603  // of text that is not particularly helpful for what we do:
604  PetscPopSignalHandler();
605 #endif
606 
607  // Initialize zoltan
608 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
609  float version;
610  Zoltan_Initialize(argc, argv, &version);
611 #endif
612 
613 #ifdef DEAL_II_WITH_P4EST
614  // Initialize p4est and libsc components
615 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
616 # else
617  // This feature is broken in version 2.0.0 for calls to
618  // MPI_Comm_create_group (see cburstedde/p4est#30).
619  // Disabling it leads to more verbose p4est error messages
620  // which should be fine.
621  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
622 # endif
623  p4est_init(nullptr, SC_LP_SILENT);
624 #endif
625 
626  constructor_has_already_run = true;
627 
628 
629  // Now also see how many threads we'd like to run
630  if (max_num_threads != numbers::invalid_unsigned_int)
631  {
632  // set maximum number of threads (also respecting the environment
633  // variable that the called function evaluates) based on what the
634  // user asked
635  MultithreadInfo::set_thread_limit(max_num_threads);
636  }
637  else
638  // user wants automatic choice
639  {
640 #ifdef DEAL_II_WITH_MPI
641  // we need to figure out how many MPI processes there are on the
642  // current node, as well as how many CPU cores we have. for the
643  // first task, check what get_hostname() returns and then to an
644  // allgather so each processor gets the answer
645  //
646  // in calculating the length of the string, don't forget the
647  // terminating \0 on C-style strings
648  const std::string hostname = Utilities::System::get_hostname();
649  const unsigned int max_hostname_size =
650  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
651  std::vector<char> hostname_array(max_hostname_size);
652  std::copy(hostname.c_str(),
653  hostname.c_str() + hostname.size() + 1,
654  hostname_array.begin());
655 
656  std::vector<char> all_hostnames(max_hostname_size *
657  MPI::n_mpi_processes(MPI_COMM_WORLD));
658  const int ierr = MPI_Allgather(hostname_array.data(),
659  max_hostname_size,
660  MPI_CHAR,
661  all_hostnames.data(),
662  max_hostname_size,
663  MPI_CHAR,
664  MPI_COMM_WORLD);
665  AssertThrowMPI(ierr);
666 
667  // search how often our own hostname appears and the how-manyth
668  // instance the current process represents
669  unsigned int n_local_processes = 0;
670  unsigned int nth_process_on_host = 0;
671  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
672  ++i)
673  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
674  hostname)
675  {
676  ++n_local_processes;
677  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
678  ++nth_process_on_host;
679  }
680  Assert(nth_process_on_host > 0, ExcInternalError());
681 
682 
683  // compute how many cores each process gets. if the number does not
684  // divide evenly, then we get one more core if we are among the
685  // first few processes
686  //
687  // if the number would be zero, round up to one since every process
688  // needs to have at least one thread
689  const unsigned int n_threads =
690  std::max(MultithreadInfo::n_cores() / n_local_processes +
691  (nth_process_on_host <=
692  MultithreadInfo::n_cores() % n_local_processes ?
693  1 :
694  0),
695  1U);
696 #else
697  const unsigned int n_threads = MultithreadInfo::n_cores();
698 #endif
699 
700  // finally set this number of threads
702  }
703  }
704 
705 
707  {
708  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
709  // are no longer used at this point. this is relevant because the static
710  // object destructors run for these vectors at the end of the program
711  // would run after MPI_Finalize is called, leading to errors
712 
713 #ifdef DEAL_II_WITH_MPI
714  // Start with the deal.II MPI vectors (need to do this before finalizing
715  // PETSc because it finalizes MPI). Delete vectors from the pools:
717  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
719  release_unused_memory();
721  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
723  release_unused_memory();
724 
725  // Next with Trilinos:
726 # if defined(DEAL_II_WITH_TRILINOS)
728  TrilinosWrappers::MPI::Vector>::release_unused_memory();
730  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
731 # endif
732 #endif
733 
734 
735  // Now deal with PETSc (with or without MPI). Only delete the vectors if
736  // finalize hasn't been called yet, otherwise this will lead to errors.
737 #ifdef DEAL_II_WITH_PETSC
738  if ((PetscInitializeCalled == PETSC_TRUE) &&
739  (PetscFinalizeCalled == PETSC_FALSE))
740  {
742  PETScWrappers::MPI::Vector>::release_unused_memory();
744  PETScWrappers::MPI::BlockVector>::release_unused_memory();
745 
746 # ifdef DEAL_II_WITH_SLEPC
747  // and now end SLEPc (with PETSc)
748  SlepcFinalize();
749 # else
750  // or just end PETSc.
751  PetscFinalize();
752 # endif
753  }
754 #endif
755 
756 // There is a similar issue with CUDA: The destructor of static objects might
757 // run after the CUDA driver is unloaded. Hence, also release all memory
758 // related to CUDA vectors.
759 #ifdef DEAL_II_WITH_CUDA
762  release_unused_memory();
765  release_unused_memory();
766 #endif
767 
768 #ifdef DEAL_II_WITH_P4EST
769  // now end p4est and libsc
770  // Note: p4est has no finalize function
771  sc_finalize();
772 #endif
773 
774 
775  // only MPI_Finalize if we are running with MPI. We also need to do this
776  // when running PETSc, because we initialize MPI ourselves before
777  // calling PetscInitialize
778 #ifdef DEAL_II_WITH_MPI
779  if (job_supports_mpi() == true)
780  {
781  if (std::uncaught_exception())
782  {
783  std::cerr
784  << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
785  << this_mpi_process(MPI_COMM_WORLD)
786  << ". Skipping MPI_Finalize() to avoid a deadlock."
787  << std::endl;
788  }
789  else
790  {
791  const int ierr = MPI_Finalize();
792  (void)ierr;
793  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
794  }
795  }
796 #endif
797  }
798 
799 
800 
801  bool
803  {
804 #ifdef DEAL_II_WITH_MPI
805  int MPI_has_been_started = 0;
806  const int ierr = MPI_Initialized(&MPI_has_been_started);
807  AssertThrowMPI(ierr);
808 
809  return (MPI_has_been_started > 0);
810 #else
811  return false;
812 #endif
813  }
814 
815 
816 
817 #include "mpi.inst"
818  } // end of namespace MPI
819 } // end of namespace Utilities
820 
821 DEAL_II_NAMESPACE_CLOSE
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1471
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:324
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:547
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1407
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:104
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:71
Definition: cuda.h:31
std::string get_hostname()
Definition: utilities.cc:977
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:93
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:82
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:186
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:429
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:210
bool job_supports_mpi()
Definition: mpi.cc:802
unsigned int min_index
Definition: mpi.h:495
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:505
static ::ExceptionBase & ExcInternalError()