Reference documentation for deal.II version Git da67ed5bf7 2020-02-15 08:03:21 -0800
\(\newcommand{\vcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\vcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/mpi_compute_index_owner_internal.h>
22 #include <deal.II/base/mpi_tags.h>
23 #include <deal.II/base/multithread_info.h>
24 #include <deal.II/base/utilities.h>
25 
26 #include <deal.II/lac/la_parallel_block_vector.h>
27 #include <deal.II/lac/la_parallel_vector.h>
28 #include <deal.II/lac/vector_memory.h>
29 
30 #include <iostream>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
37 # include <deal.II/lac/trilinos_parallel_block_vector.h>
38 # include <deal.II/lac/trilinos_vector.h>
39 # include <deal.II/lac/vector_memory.h>
40 
41 # include <Epetra_MpiComm.h>
42 # endif
43 #endif
44 
45 #ifdef DEAL_II_WITH_PETSC
46 # include <deal.II/lac/petsc_block_vector.h>
47 # include <deal.II/lac/petsc_vector.h>
48 
49 # include <petscsys.h>
50 #endif
51 
52 #ifdef DEAL_II_WITH_SLEPC
53 # include <deal.II/lac/slepc_solver.h>
54 
55 # include <slepcsys.h>
56 #endif
57 
58 #ifdef DEAL_II_WITH_P4EST
59 # include <p4est_bits.h>
60 #endif
61 
62 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
63 # include <zoltan_cpp.h>
64 #endif
65 
66 DEAL_II_NAMESPACE_OPEN
67 
68 
69 namespace Utilities
70 {
71  namespace MPI
72  {
73  MinMaxAvg
74  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
75  {
76  MinMaxAvg result;
78  ArrayView<MinMaxAvg>(result),
79  mpi_communicator);
80 
81  return result;
82  }
83 
84 
85 
86  std::vector<MinMaxAvg>
87  min_max_avg(const std::vector<double> &my_values,
88  const MPI_Comm & mpi_communicator)
89  {
90  std::vector<MinMaxAvg> results(my_values.size());
91  min_max_avg(my_values, results, mpi_communicator);
92 
93  return results;
94  }
95 
96 
97 
98 #ifdef DEAL_II_WITH_MPI
99  unsigned int
100  n_mpi_processes(const MPI_Comm &mpi_communicator)
101  {
102  int n_jobs = 1;
103  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
104  AssertThrowMPI(ierr);
105 
106  return n_jobs;
107  }
108 
109 
110  unsigned int
111  this_mpi_process(const MPI_Comm &mpi_communicator)
112  {
113  int rank = 0;
114  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
115  AssertThrowMPI(ierr);
116 
117  return rank;
118  }
119 
120 
121  MPI_Comm
122  duplicate_communicator(const MPI_Comm &mpi_communicator)
123  {
124  MPI_Comm new_communicator;
125  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
126  AssertThrowMPI(ierr);
127  return new_communicator;
128  }
129 
130 
131 
132  void
133  free_communicator(MPI_Comm &mpi_communicator)
134  {
135  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
136  const int ierr = MPI_Comm_free(&mpi_communicator);
137  AssertThrowMPI(ierr);
138  }
139 
140 
141 
142  int
143  create_group(const MPI_Comm & comm,
144  const MPI_Group &group,
145  const int tag,
146  MPI_Comm * new_comm)
147  {
148 # if DEAL_II_MPI_VERSION_GTE(3, 0)
149  return MPI_Comm_create_group(comm, group, tag, new_comm);
150 # else
151  int rank;
152  int ierr = MPI_Comm_rank(comm, &rank);
153  AssertThrowMPI(ierr);
154 
155  int grp_rank;
156  ierr = MPI_Group_rank(group, &grp_rank);
157  AssertThrowMPI(ierr);
158  if (grp_rank == MPI_UNDEFINED)
159  {
160  *new_comm = MPI_COMM_NULL;
161  return MPI_SUCCESS;
162  }
163 
164  int grp_size;
165  ierr = MPI_Group_size(group, &grp_size);
166  AssertThrowMPI(ierr);
167 
168  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
169  AssertThrowMPI(ierr);
170 
171  MPI_Group parent_grp;
172  ierr = MPI_Comm_group(comm, &parent_grp);
173  AssertThrowMPI(ierr);
174 
175  std::vector<int> pids(grp_size);
176  std::vector<int> grp_pids(grp_size);
177  std::iota(grp_pids.begin(), grp_pids.end(), 0);
178  ierr = MPI_Group_translate_ranks(
179  group, grp_size, grp_pids.data(), parent_grp, pids.data());
180  AssertThrowMPI(ierr);
181  ierr = MPI_Group_free(&parent_grp);
182  AssertThrowMPI(ierr);
183 
184  MPI_Comm comm_old = *new_comm;
185  MPI_Comm ic;
186  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
187  {
188  const int gid = grp_rank / merge_sz;
189  comm_old = *new_comm;
190  if (gid % 2 == 0)
191  {
192  if ((gid + 1) * merge_sz < grp_size)
193  {
194  ierr = (MPI_Intercomm_create(
195  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
196  AssertThrowMPI(ierr);
197  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
198  AssertThrowMPI(ierr);
199  }
200  }
201  else
202  {
203  ierr = MPI_Intercomm_create(
204  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
205  AssertThrowMPI(ierr);
206  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
207  AssertThrowMPI(ierr);
208  }
209  if (*new_comm != comm_old)
210  {
211  ierr = MPI_Comm_free(&ic);
212  AssertThrowMPI(ierr);
213  ierr = MPI_Comm_free(&comm_old);
214  AssertThrowMPI(ierr);
215  }
216  }
217 
218  return MPI_SUCCESS;
219 # endif
220  }
221 
222 
223 
224  std::vector<IndexSet>
225  create_ascending_partitioning(const MPI_Comm & comm,
226  const IndexSet::size_type &local_size)
227  {
228  const unsigned int n_proc = n_mpi_processes(comm);
229  const std::vector<IndexSet::size_type> sizes =
230  all_gather(comm, local_size);
231  const auto total_size =
232  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
233 
234  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
235 
236  IndexSet::size_type begin = 0;
237  for (unsigned int i = 0; i < n_proc; ++i)
238  {
239  res[i].add_range(begin, begin + sizes[i]);
240  begin = begin + sizes[i];
241  }
242 
243  return res;
244  }
245 
246 
247 
253  : public ConsensusAlgorithmProcess<int, int>
254  {
255  public:
256  ConsensusAlgorithmProcessTargets(const std::vector<unsigned int> &target)
257  : target(target)
258  {}
259 
260  using T1 = int;
261  using T2 = int;
262 
263  virtual void
264  answer_request(const unsigned int other_rank,
265  const std::vector<T1> &,
266  std::vector<T2> &) override
267  {
268  this->sources.push_back(other_rank);
269  }
270 
276  virtual std::vector<unsigned int>
277  compute_targets() override
278  {
279  return target;
280  }
281 
287  std::vector<unsigned int>
289  {
290  std::sort(sources.begin(), sources.end());
291  return sources;
292  }
293 
294  private:
298  const std::vector<unsigned int> &target;
299 
303  std::vector<unsigned int> sources;
304  };
305 
306 
307 
308  std::vector<unsigned int>
310  const MPI_Comm & mpi_comm,
311  const std::vector<unsigned int> &destinations)
312  {
313  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
314  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
315  (void)myid;
316  (void)n_procs;
317 
318  for (const unsigned int destination : destinations)
319  {
320  (void)destination;
321  AssertIndexRange(destination, n_procs);
322  Assert(destination != myid,
323  ExcMessage(
324  "There is no point in communicating with ourselves."));
325  }
326 
327 # if DEAL_II_MPI_VERSION_GTE(3, 0)
328 
329  ConsensusAlgorithmProcessTargets process(destinations);
330  ConsensusAlgorithm_NBX<ConsensusAlgorithmProcessTargets::T1,
331  ConsensusAlgorithmProcessTargets::T2>
332  consensus_algorithm(process, mpi_comm);
333  consensus_algorithm.run();
334  return process.get_result();
335 
336 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
337 
338  static CollectiveMutex mutex;
339  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
340 
341  const int mpi_tag =
343 
344  // Calculate the number of messages to send to each process
345  std::vector<unsigned int> dest_vector(n_procs);
346  for (const auto &el : destinations)
347  ++dest_vector[el];
348 
349  // Find how many processes will send to this one
350  // by reducing with sum and then scattering the
351  // results over all processes
352  unsigned int n_recv_from;
353  const int ierr = MPI_Reduce_scatter_block(
354  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
355 
356  AssertThrowMPI(ierr);
357 
358  // Send myid to every process in `destinations` vector...
359  std::vector<MPI_Request> send_requests(destinations.size());
360  for (const auto &el : destinations)
361  {
362  const int ierr =
363  MPI_Isend(&myid,
364  1,
365  MPI_UNSIGNED,
366  el,
367  mpi_tag,
368  mpi_comm,
369  send_requests.data() + (&el - destinations.data()));
370  AssertThrowMPI(ierr);
371  }
372 
373 
374  // Receive `n_recv_from` times from the processes
375  // who communicate with this one. Store the obtained id's
376  // in the resulting vector
377  std::vector<unsigned int> origins(n_recv_from);
378  for (auto &el : origins)
379  {
380  const int ierr = MPI_Recv(&el,
381  1,
382  MPI_UNSIGNED,
383  MPI_ANY_SOURCE,
384  mpi_tag,
385  mpi_comm,
386  MPI_STATUS_IGNORE);
387  AssertThrowMPI(ierr);
388  }
389 
390  if (destinations.size() > 0)
391  {
392  const int ierr = MPI_Waitall(destinations.size(),
393  send_requests.data(),
394  MPI_STATUSES_IGNORE);
395  AssertThrowMPI(ierr);
396  }
397 
398  return origins;
399 # else
400  // let all processors communicate the maximal number of destinations
401  // they have
402  const unsigned int max_n_destinations =
403  Utilities::MPI::max(destinations.size(), mpi_comm);
404 
405  if (max_n_destinations == 0)
406  // all processes have nothing to send/receive:
407  return std::vector<unsigned int>();
408 
409  // now that we know the number of data packets every processor wants to
410  // send, set up a buffer with the maximal size and copy our destinations
411  // in there, padded with -1's
412  std::vector<unsigned int> my_destinations(max_n_destinations,
414  std::copy(destinations.begin(),
415  destinations.end(),
416  my_destinations.begin());
417 
418  // now exchange these (we could communicate less data if we used
419  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
420  // processors in this case, which is more expensive than the reduction
421  // operation above in MPI_Allreduce)
422  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
423  const int ierr = MPI_Allgather(my_destinations.data(),
424  max_n_destinations,
425  MPI_UNSIGNED,
426  all_destinations.data(),
427  max_n_destinations,
428  MPI_UNSIGNED,
429  mpi_comm);
430  AssertThrowMPI(ierr);
431 
432  // now we know who is going to communicate with whom. collect who is
433  // going to communicate with us!
434  std::vector<unsigned int> origins;
435  for (unsigned int i = 0; i < n_procs; ++i)
436  for (unsigned int j = 0; j < max_n_destinations; ++j)
437  if (all_destinations[i * max_n_destinations + j] == myid)
438  origins.push_back(i);
439  else if (all_destinations[i * max_n_destinations + j] ==
441  break;
442 
443  return origins;
444 # endif
445  }
446 
447 
448 
449  unsigned int
451  const MPI_Comm & mpi_comm,
452  const std::vector<unsigned int> &destinations)
453  {
454  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
455 
456  for (const unsigned int destination : destinations)
457  {
458  (void)destination;
459  AssertIndexRange(destination, n_procs);
460  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
461  ExcMessage(
462  "There is no point in communicating with ourselves."));
463  }
464 
465  // Calculate the number of messages to send to each process
466  std::vector<unsigned int> dest_vector(n_procs);
467  for (const auto &el : destinations)
468  ++dest_vector[el];
469 
470 # if DEAL_II_MPI_VERSION_GTE(2, 2)
471  // Find out how many processes will send to this one
472  // MPI_Reduce_scatter(_block) does exactly this
473  unsigned int n_recv_from = 0;
474 
475  const int ierr = MPI_Reduce_scatter_block(
476  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
477 
478  AssertThrowMPI(ierr);
479 
480  return n_recv_from;
481 # else
482  // Find out how many processes will send to this one
483  // by reducing with sum and then scattering the
484  // results over all processes
485  std::vector<unsigned int> buffer(dest_vector.size());
486  unsigned int n_recv_from = 0;
487 
488  MPI_Reduce(dest_vector.data(),
489  buffer.data(),
490  dest_vector.size(),
491  MPI_UNSIGNED,
492  MPI_SUM,
493  0,
494  mpi_comm);
495  MPI_Scatter(buffer.data(),
496  1,
497  MPI_UNSIGNED,
498  &n_recv_from,
499  1,
500  MPI_UNSIGNED,
501  0,
502  mpi_comm);
503 
504  return n_recv_from;
505 # endif
506  }
507 
508 
509 
510  namespace
511  {
512  // custom MIP_Op for calculate_collective_mpi_min_max_avg
513  void
514  max_reduce(const void *in_lhs_,
515  void * inout_rhs_,
516  int * len,
517  MPI_Datatype *)
518  {
519  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
520  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
521 
522  for (int i = 0; i < *len; i++)
523  {
524  inout_rhs[i].sum += in_lhs[i].sum;
525  if (inout_rhs[i].min > in_lhs[i].min)
526  {
527  inout_rhs[i].min = in_lhs[i].min;
528  inout_rhs[i].min_index = in_lhs[i].min_index;
529  }
530  else if (inout_rhs[i].min == in_lhs[i].min)
531  {
532  // choose lower cpu index when tied to make operator commutative
533  if (inout_rhs[i].min_index > in_lhs[i].min_index)
534  inout_rhs[i].min_index = in_lhs[i].min_index;
535  }
536 
537  if (inout_rhs[i].max < in_lhs[i].max)
538  {
539  inout_rhs[i].max = in_lhs[i].max;
540  inout_rhs[i].max_index = in_lhs[i].max_index;
541  }
542  else if (inout_rhs[i].max == in_lhs[i].max)
543  {
544  // choose lower cpu index when tied to make operator commutative
545  if (inout_rhs[i].max_index > in_lhs[i].max_index)
546  inout_rhs[i].max_index = in_lhs[i].max_index;
547  }
548  }
549  }
550  } // namespace
551 
552 
553 
554  void
556  const ArrayView<MinMaxAvg> & result,
557  const MPI_Comm & mpi_communicator)
558  {
559  // If MPI was not started, we have a serial computation and cannot run
560  // the other MPI commands
561  if (job_supports_mpi() == false ||
562  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
563  {
564  for (unsigned int i = 0; i < my_values.size(); i++)
565  {
566  result[i].sum = my_values[i];
567  result[i].avg = my_values[i];
568  result[i].min = my_values[i];
569  result[i].max = my_values[i];
570  result[i].min_index = 0;
571  result[i].max_index = 0;
572  }
573  return;
574  }
575 
576  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
577  Utilities::MPI::max(my_values.size(), mpi_communicator));
578 
579  AssertDimension(my_values.size(), result.size());
580 
581 
582 
583  // To avoid uninitialized values on some MPI implementations, provide
584  // result with a default value already...
585  MinMaxAvg dummy = {0.,
586  std::numeric_limits<double>::max(),
587  -std::numeric_limits<double>::max(),
588  0,
589  0,
590  0.};
591 
592  for (auto &i : result)
593  i = dummy;
594 
595  const unsigned int my_id =
596  ::Utilities::MPI::this_mpi_process(mpi_communicator);
597  const unsigned int numproc =
598  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
599 
600  MPI_Op op;
601  int ierr =
602  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
603  true,
604  &op);
605  AssertThrowMPI(ierr);
606 
607  std::vector<MinMaxAvg> in(my_values.size());
608 
609  for (unsigned int i = 0; i < my_values.size(); i++)
610  {
611  in[i].sum = in[i].min = in[i].max = my_values[i];
612  in[i].min_index = in[i].max_index = my_id;
613  }
614 
615  MPI_Datatype type;
616  int lengths[] = {3, 2, 1};
617  MPI_Aint displacements[] = {0,
618  offsetof(MinMaxAvg, min_index),
619  offsetof(MinMaxAvg, avg)};
620  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
621 
622  ierr = MPI_Type_create_struct(3, lengths, displacements, types, &type);
623  AssertThrowMPI(ierr);
624 
625  ierr = MPI_Type_commit(&type);
626  AssertThrowMPI(ierr);
627  ierr = MPI_Allreduce(
628  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
629  AssertThrowMPI(ierr);
630 
631  ierr = MPI_Type_free(&type);
632  AssertThrowMPI(ierr);
633 
634  ierr = MPI_Op_free(&op);
635  AssertThrowMPI(ierr);
636 
637  for (auto &r : result)
638  r.avg = r.sum / numproc;
639  }
640 
641 
642 #else
643 
644  unsigned int
645  n_mpi_processes(const MPI_Comm &)
646  {
647  return 1;
648  }
649 
650 
651 
652  unsigned int
653  this_mpi_process(const MPI_Comm &)
654  {
655  return 0;
656  }
657 
658 
659 
660  std::vector<IndexSet>
661  create_ascending_partitioning(const MPI_Comm & /*comm*/,
662  const IndexSet::size_type &local_size)
663  {
664  return std::vector<IndexSet>(1, complete_index_set(local_size));
665  }
666 
667 
668 
669  MPI_Comm
670  duplicate_communicator(const MPI_Comm &mpi_communicator)
671  {
672  return mpi_communicator;
673  }
674 
675 
676 
677  void
678  free_communicator(MPI_Comm & /*mpi_communicator*/)
679  {}
680 
681 
682 
683  void
684  min_max_avg(const ArrayView<const double> &my_values,
685  const ArrayView<MinMaxAvg> & result,
686  const MPI_Comm &)
687  {
688  AssertDimension(my_values.size(), result.size());
689 
690  for (unsigned int i = 0; i < my_values.size(); i++)
691  {
692  result[i].sum = my_values[i];
693  result[i].avg = my_values[i];
694  result[i].min = my_values[i];
695  result[i].max = my_values[i];
696  result[i].min_index = 0;
697  result[i].max_index = 0;
698  }
699  }
700 
701 #endif
702 
703 
704 
706  char **& argv,
707  const unsigned int max_num_threads)
708  {
709  static bool constructor_has_already_run = false;
710  (void)constructor_has_already_run;
711  Assert(constructor_has_already_run == false,
712  ExcMessage("You can only create a single object of this class "
713  "in a program since it initializes the MPI system."));
714 
715 
716  int ierr = 0;
717 #ifdef DEAL_II_WITH_MPI
718  // if we have PETSc, we will initialize it and let it handle MPI.
719  // Otherwise, we will do it.
720  int MPI_has_been_started = 0;
721  ierr = MPI_Initialized(&MPI_has_been_started);
722  AssertThrowMPI(ierr);
723  AssertThrow(MPI_has_been_started == 0,
724  ExcMessage("MPI error. You can only start MPI once!"));
725 
726  int provided;
727  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
728  // we might use several threads but never call two MPI functions at the
729  // same time. For an explanation see on why we do this see
730  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
731  int wanted = MPI_THREAD_SERIALIZED;
732  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
733  AssertThrowMPI(ierr);
734 
735  // disable for now because at least some implementations always return
736  // MPI_THREAD_SINGLE.
737  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
738  // ExcMessage("MPI reports that we are not allowed to use multiple
739  // threads."));
740 #else
741  // make sure the compiler doesn't warn about these variables
742  (void)argc;
743  (void)argv;
744  (void)ierr;
745 #endif
746 
747  // we are allowed to call MPI_Init ourselves and PETScInitialize will
748  // detect this. This allows us to use MPI_Init_thread instead.
749 #ifdef DEAL_II_WITH_PETSC
750 # ifdef DEAL_II_WITH_SLEPC
751  // Initialize SLEPc (with PETSc):
752  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
754 # else
755  // or just initialize PETSc alone:
756  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
757  AssertThrow(ierr == 0, ExcPETScError(ierr));
758 # endif
759 
760  // Disable PETSc exception handling. This just prints a large wall
761  // of text that is not particularly helpful for what we do:
762  PetscPopSignalHandler();
763 #endif
764 
765  // Initialize zoltan
766 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
767  float version;
768  Zoltan_Initialize(argc, argv, &version);
769 #endif
770 
771 #ifdef DEAL_II_WITH_P4EST
772  // Initialize p4est and libsc components
773 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
774 # else
775  // This feature is broken in version 2.0.0 for calls to
776  // MPI_Comm_create_group (see cburstedde/p4est#30).
777  // Disabling it leads to more verbose p4est error messages
778  // which should be fine.
779  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
780 # endif
781  p4est_init(nullptr, SC_LP_SILENT);
782 #endif
783 
784  constructor_has_already_run = true;
785 
786 
787  // Now also see how many threads we'd like to run
788  if (max_num_threads != numbers::invalid_unsigned_int)
789  {
790  // set maximum number of threads (also respecting the environment
791  // variable that the called function evaluates) based on what the
792  // user asked
793  MultithreadInfo::set_thread_limit(max_num_threads);
794  }
795  else
796  // user wants automatic choice
797  {
798 #ifdef DEAL_II_WITH_MPI
799  // we need to figure out how many MPI processes there are on the
800  // current node, as well as how many CPU cores we have. for the
801  // first task, check what get_hostname() returns and then do an
802  // allgather so each processor gets the answer
803  //
804  // in calculating the length of the string, don't forget the
805  // terminating \0 on C-style strings
806  const std::string hostname = Utilities::System::get_hostname();
807  const unsigned int max_hostname_size =
808  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
809  std::vector<char> hostname_array(max_hostname_size);
810  std::copy(hostname.c_str(),
811  hostname.c_str() + hostname.size() + 1,
812  hostname_array.begin());
813 
814  std::vector<char> all_hostnames(max_hostname_size *
815  MPI::n_mpi_processes(MPI_COMM_WORLD));
816  const int ierr = MPI_Allgather(hostname_array.data(),
817  max_hostname_size,
818  MPI_CHAR,
819  all_hostnames.data(),
820  max_hostname_size,
821  MPI_CHAR,
822  MPI_COMM_WORLD);
823  AssertThrowMPI(ierr);
824 
825  // search how often our own hostname appears and the how-manyth
826  // instance the current process represents
827  unsigned int n_local_processes = 0;
828  unsigned int nth_process_on_host = 0;
829  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
830  ++i)
831  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
832  hostname)
833  {
834  ++n_local_processes;
835  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
836  ++nth_process_on_host;
837  }
838  Assert(nth_process_on_host > 0, ExcInternalError());
839 
840 
841  // compute how many cores each process gets. if the number does not
842  // divide evenly, then we get one more core if we are among the
843  // first few processes
844  //
845  // if the number would be zero, round up to one since every process
846  // needs to have at least one thread
847  const unsigned int n_threads =
848  std::max(MultithreadInfo::n_cores() / n_local_processes +
849  (nth_process_on_host <=
850  MultithreadInfo::n_cores() % n_local_processes ?
851  1 :
852  0),
853  1U);
854 #else
855  const unsigned int n_threads = MultithreadInfo::n_cores();
856 #endif
857 
858  // finally set this number of threads
860  }
861  }
862 
863 
864 
865  void
867  {
868  // insert if it is not in the set already:
869  requests.insert(&request);
870  }
871 
872 
873 
874  void
876  {
877  Assert(
878  requests.find(&request) != requests.end(),
879  ExcMessage(
880  "You tried to call unregister_request() with an invalid request."));
881 
882  requests.erase(&request);
883  }
884 
885 
886 
887  std::set<MPI_Request *> MPI_InitFinalize::requests;
888 
889 
890 
892  {
893  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
894  // are no longer used at this point. this is relevant because the static
895  // object destructors run for these vectors at the end of the program
896  // would run after MPI_Finalize is called, leading to errors
897 
898 #ifdef DEAL_II_WITH_MPI
899  // Before exiting, wait for nonblocking communication to complete:
900  for (auto request : requests)
901  {
902  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
903  AssertThrowMPI(ierr);
904  }
905 
906  // Start with deal.II MPI vectors and delete vectors from the pools:
908  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
910  release_unused_memory();
912  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
914  release_unused_memory();
915 
916  // Next with Trilinos:
917 # if defined(DEAL_II_WITH_TRILINOS)
919  TrilinosWrappers::MPI::Vector>::release_unused_memory();
921  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
922 # endif
923 #endif
924 
925 
926  // Now deal with PETSc (with or without MPI). Only delete the vectors if
927  // finalize hasn't been called yet, otherwise this will lead to errors.
928 #ifdef DEAL_II_WITH_PETSC
929  if ((PetscInitializeCalled == PETSC_TRUE) &&
930  (PetscFinalizeCalled == PETSC_FALSE))
931  {
933  PETScWrappers::MPI::Vector>::release_unused_memory();
935  PETScWrappers::MPI::BlockVector>::release_unused_memory();
936 
937 # ifdef DEAL_II_WITH_SLEPC
938  // and now end SLEPc (with PETSc)
939  SlepcFinalize();
940 # else
941  // or just end PETSc.
942  PetscFinalize();
943 # endif
944  }
945 #endif
946 
947 // There is a similar issue with CUDA: The destructor of static objects might
948 // run after the CUDA driver is unloaded. Hence, also release all memory
949 // related to CUDA vectors.
950 #ifdef DEAL_II_WITH_CUDA
953  release_unused_memory();
956  release_unused_memory();
957 #endif
958 
959 #ifdef DEAL_II_WITH_P4EST
960  // now end p4est and libsc
961  // Note: p4est has no finalize function
962  sc_finalize();
963 #endif
964 
965 
966  // only MPI_Finalize if we are running with MPI. We also need to do this
967  // when running PETSc, because we initialize MPI ourselves before
968  // calling PetscInitialize
969 #ifdef DEAL_II_WITH_MPI
970  if (job_supports_mpi() == true)
971  {
972 # if __cpp_lib_uncaught_exceptions >= 201411
973  // std::uncaught_exception() is deprecated in c++17
974  if (std::uncaught_exceptions() > 0)
975 # else
976  if (std::uncaught_exception() == true)
977 # endif
978  {
979  std::cerr
980  << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
981  << this_mpi_process(MPI_COMM_WORLD)
982  << ". Skipping MPI_Finalize() to avoid a deadlock."
983  << std::endl;
984  }
985  else
986  {
987  const int ierr = MPI_Finalize();
988  (void)ierr;
989  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
990  }
991  }
992 #endif
993  }
994 
995 
996 
997  bool
999  {
1000 #ifdef DEAL_II_WITH_MPI
1001  int MPI_has_been_started = 0;
1002  const int ierr = MPI_Initialized(&MPI_has_been_started);
1003  AssertThrowMPI(ierr);
1004 
1005  return (MPI_has_been_started > 0);
1006 #else
1007  return false;
1008 #endif
1009  }
1010 
1011 
1012 
1013  std::vector<unsigned int>
1014  compute_index_owner(const IndexSet &owned_indices,
1015  const IndexSet &indices_to_look_up,
1016  const MPI_Comm &comm)
1017  {
1018  Assert(owned_indices.size() == indices_to_look_up.size(),
1019  ExcMessage("IndexSets have to have the same sizes."));
1020 
1021  Assert(
1022  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1023  ExcMessage("IndexSets have to have the same size on all processes."));
1024 
1025  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1026 
1027  // Step 1: setup dictionary
1028  // The input owned_indices can be partitioned arbitrarily. In the
1029  // dictionary, the index set is statically repartitioned among the
1030  // processes again and extended with information with the actual owner
1031  // of that the index.
1033  owned_indices, indices_to_look_up, comm, owning_ranks);
1034 
1035  // Step 2: read dictionary
1036  // Communicate with the process who owns the index in the static
1037  // partition (i.e. in the dictionary). This process returns the actual
1038  // owner of the index.
1040  std::pair<types::global_dof_index, types::global_dof_index>,
1041  unsigned int>
1042  consensus_algorithm(process, comm);
1043  consensus_algorithm.run();
1044 
1045  return owning_ranks;
1046  }
1047 
1048  template class ConsensusAlgorithmSelector<
1049  std::pair<types::global_dof_index, types::global_dof_index>,
1050  unsigned int>;
1051 
1052 
1053 
1055  : locked(false)
1056  , request(MPI_REQUEST_NULL)
1057  {
1059  }
1060 
1061 
1062 
1064  {
1065  Assert(
1066  !locked,
1067  ExcMessage(
1068  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1069 
1071  }
1072 
1073 
1074 
1075  void
1076  CollectiveMutex::lock(MPI_Comm comm)
1077  {
1078  (void)comm;
1079 
1080  Assert(
1081  !locked,
1082  ExcMessage(
1083  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1084 
1085 #ifdef DEAL_II_WITH_MPI
1086 
1087  // TODO: For now, we implement this mutex with a blocking barrier
1088  // in the lock and unlock. It needs to be tested, if we can move
1089  // to a nonblocking barrier (code disabled below).
1090 
1091  const int ierr = MPI_Barrier(comm);
1092  AssertThrowMPI(ierr);
1093 
1094 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1095  // wait for non-blocking barrier to finish. This is a noop the
1096  // first time we lock().
1097  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1098  AssertThrowMPI(ierr);
1099 # else
1100  // nothing to do as blocking barrier already completed
1101 # endif
1102 #endif
1103 
1104  locked = true;
1105  }
1106 
1107 
1108 
1109  void
1111  {
1112  (void)comm;
1113 
1114  Assert(
1115  locked,
1116  ExcMessage(
1117  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1118 
1119 #ifdef DEAL_II_WITH_MPI
1120 
1121  // TODO: For now, we implement this mutex with a blocking barrier
1122  // in the lock and unlock. It needs to be tested, if we can move
1123  // to a nonblocking barrier (code disabled below):
1124 
1125 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1126  const int ierr = MPI_Ibarrier(comm, &request);
1127  AssertThrowMPI(ierr);
1128 # else
1129  const int ierr = MPI_Barrier(comm);
1130  AssertThrowMPI(ierr);
1131 # endif
1132 #endif
1133 
1134  locked = false;
1135  }
1136 
1137 #include "mpi.inst"
1138  } // end of namespace MPI
1139 } // end of namespace Utilities
1140 
1141 DEAL_II_NAMESPACE_CLOSE
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:875
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1475
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1571
std::vector< unsigned int > get_result()
Definition: mpi.cc:288
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:450
#define AssertIndexRange(index, range)
Definition: exceptions.h:1641
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:705
#define AssertThrow(cond, exc)
Definition: exceptions.h:1523
types::global_dof_index size_type
Definition: index_set.h:85
void unlock(MPI_Comm comm)
Definition: mpi.cc:1110
std::size_t size() const
Definition: array_view.h:484
size_type size() const
Definition: index_set.h:1625
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:133
static ::ExceptionBase & ExcMessage(std::string arg1)
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:277
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:264
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1411
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:143
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1014
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:100
void lock(MPI_Comm comm)
Definition: mpi.cc:1076
Definition: cuda.h:31
std::string get_hostname()
Definition: utilities.cc:1073
static void register_request(MPI_Request &request)
Definition: mpi.cc:866
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1699
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:122
T min(const T &t, const MPI_Comm &mpi_communicator)
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
std::vector< unsigned int > sources
Definition: mpi.cc:303
static std::set< MPI_Request * > requests
Definition: mpi.h:895
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:111
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:225
const std::vector< unsigned int > & target
Definition: mpi.cc:298
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:74
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:309
bool job_supports_mpi()
Definition: mpi.cc:998
size_type n_elements() const
Definition: index_set.h:1823
unsigned int min_index
Definition: mpi.h:686
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:696
static ::ExceptionBase & ExcInternalError()