Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
22 #include <deal.II/base/mpi_tags.h>
24 #include <deal.II/base/utilities.h>
25 
29 
30 #include <iostream>
31 #include <numeric>
32 #include <set>
33 #include <vector>
34 
35 #ifdef DEAL_II_WITH_TRILINOS
36 # ifdef DEAL_II_WITH_MPI
39 
40 # include <Epetra_MpiComm.h>
41 # endif
42 #endif
43 
44 #ifdef DEAL_II_WITH_PETSC
47 
48 # include <petscsys.h>
49 #endif
50 
51 #ifdef DEAL_II_WITH_SLEPC
53 
54 # include <slepcsys.h>
55 #endif
56 
57 #ifdef DEAL_II_WITH_P4EST
58 # include <p4est_bits.h>
59 #endif
60 
61 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62 # include <zoltan_cpp.h>
63 #endif
64 
66 
67 
68 namespace Utilities
69 {
70  IndexSet
71  create_evenly_distributed_partitioning(const unsigned int my_partition_id,
72  const unsigned int n_partitions,
73  const IndexSet::size_type total_size)
74  {
75  const unsigned int remain = total_size % n_partitions;
76 
77  const IndexSet::size_type min_size = total_size / n_partitions;
78 
80  min_size * my_partition_id + std::min(my_partition_id, remain);
81  const IndexSet::size_type end =
82  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
83  IndexSet result(total_size);
84  result.add_range(begin, end);
85  return result;
86  }
87 
88  namespace MPI
89  {
90  MinMaxAvg
91  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
92  {
93  MinMaxAvg result;
95  ArrayView<MinMaxAvg>(result),
96  mpi_communicator);
97 
98  return result;
99  }
100 
101 
102 
103  std::vector<MinMaxAvg>
104  min_max_avg(const std::vector<double> &my_values,
105  const MPI_Comm & mpi_communicator)
106  {
107  std::vector<MinMaxAvg> results(my_values.size());
108  min_max_avg(my_values, results, mpi_communicator);
109 
110  return results;
111  }
112 
113 
114 
115 #ifdef DEAL_II_WITH_MPI
116  unsigned int
117  n_mpi_processes(const MPI_Comm &mpi_communicator)
118  {
119  int n_jobs = 1;
120  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
121  AssertThrowMPI(ierr);
122 
123  return n_jobs;
124  }
125 
126 
127  unsigned int
128  this_mpi_process(const MPI_Comm &mpi_communicator)
129  {
130  int rank = 0;
131  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
132  AssertThrowMPI(ierr);
133 
134  return rank;
135  }
136 
137 
138  MPI_Comm
139  duplicate_communicator(const MPI_Comm &mpi_communicator)
140  {
141  MPI_Comm new_communicator;
142  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
143  AssertThrowMPI(ierr);
144  return new_communicator;
145  }
146 
147 
148 
149  void
150  free_communicator(MPI_Comm &mpi_communicator)
151  {
152  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
153  const int ierr = MPI_Comm_free(&mpi_communicator);
154  AssertThrowMPI(ierr);
155  }
156 
157 
158 
159  int
160  create_group(const MPI_Comm & comm,
161  const MPI_Group &group,
162  const int tag,
163  MPI_Comm * new_comm)
164  {
165 # if DEAL_II_MPI_VERSION_GTE(3, 0)
166  return MPI_Comm_create_group(comm, group, tag, new_comm);
167 # else
168  int rank;
169  int ierr = MPI_Comm_rank(comm, &rank);
170  AssertThrowMPI(ierr);
171 
172  int grp_rank;
173  ierr = MPI_Group_rank(group, &grp_rank);
174  AssertThrowMPI(ierr);
175  if (grp_rank == MPI_UNDEFINED)
176  {
177  *new_comm = MPI_COMM_NULL;
178  return MPI_SUCCESS;
179  }
180 
181  int grp_size;
182  ierr = MPI_Group_size(group, &grp_size);
183  AssertThrowMPI(ierr);
184 
185  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
186  AssertThrowMPI(ierr);
187 
188  MPI_Group parent_grp;
189  ierr = MPI_Comm_group(comm, &parent_grp);
190  AssertThrowMPI(ierr);
191 
192  std::vector<int> pids(grp_size);
193  std::vector<int> grp_pids(grp_size);
194  std::iota(grp_pids.begin(), grp_pids.end(), 0);
195  ierr = MPI_Group_translate_ranks(
196  group, grp_size, grp_pids.data(), parent_grp, pids.data());
197  AssertThrowMPI(ierr);
198  ierr = MPI_Group_free(&parent_grp);
199  AssertThrowMPI(ierr);
200 
201  MPI_Comm comm_old = *new_comm;
202  MPI_Comm ic;
203  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
204  {
205  const int gid = grp_rank / merge_sz;
206  comm_old = *new_comm;
207  if (gid % 2 == 0)
208  {
209  if ((gid + 1) * merge_sz < grp_size)
210  {
211  ierr = (MPI_Intercomm_create(
212  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
213  AssertThrowMPI(ierr);
214  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
215  AssertThrowMPI(ierr);
216  }
217  }
218  else
219  {
220  ierr = MPI_Intercomm_create(
221  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
222  AssertThrowMPI(ierr);
223  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
224  AssertThrowMPI(ierr);
225  }
226  if (*new_comm != comm_old)
227  {
228  ierr = MPI_Comm_free(&ic);
229  AssertThrowMPI(ierr);
230  ierr = MPI_Comm_free(&comm_old);
231  AssertThrowMPI(ierr);
232  }
233  }
234 
235  return MPI_SUCCESS;
236 # endif
237  }
238 
239 
240 
241  std::vector<IndexSet>
243  const IndexSet::size_type local_size)
244  {
245  const unsigned int n_proc = n_mpi_processes(comm);
246  const std::vector<IndexSet::size_type> sizes =
247  all_gather(comm, local_size);
248  const auto total_size =
249  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
250 
251  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
252 
254  for (unsigned int i = 0; i < n_proc; ++i)
255  {
256  res[i].add_range(begin, begin + sizes[i]);
257  begin = begin + sizes[i];
258  }
259 
260  return res;
261  }
262 
263  IndexSet
265  const IndexSet::size_type total_size)
266  {
267  const unsigned int this_proc = this_mpi_process(comm);
268  const unsigned int n_proc = n_mpi_processes(comm);
269 
271  n_proc,
272  total_size);
273  }
274 
275 
276 
282  : public ConsensusAlgorithms::Process<unsigned int, unsigned int>
283  {
284  public:
285  ConsensusAlgorithmsProcessTargets(const std::vector<unsigned int> &target)
286  : target(target)
287  {}
288 
289  using T1 = unsigned int;
290  using T2 = unsigned int;
291 
292  virtual void
293  answer_request(const unsigned int other_rank,
294  const std::vector<T1> &,
295  std::vector<T2> &) override
296  {
297  this->sources.push_back(other_rank);
298  }
299 
305  virtual std::vector<unsigned int>
306  compute_targets() override
307  {
308  return target;
309  }
310 
316  std::vector<unsigned int>
318  {
319  std::sort(sources.begin(), sources.end());
320  return sources;
321  }
322 
323  private:
327  const std::vector<unsigned int> &target;
328 
332  std::vector<unsigned int> sources;
333  };
334 
335 
336 
337  std::vector<unsigned int>
339  const MPI_Comm & mpi_comm,
340  const std::vector<unsigned int> &destinations)
341  {
342  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
343  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
344  (void)myid;
345  (void)n_procs;
346 
347  for (const unsigned int destination : destinations)
348  {
349  (void)destination;
350  AssertIndexRange(destination, n_procs);
351  Assert(destination != myid,
352  ExcMessage(
353  "There is no point in communicating with ourselves."));
354  }
355 
356 # if DEAL_II_MPI_VERSION_GTE(3, 0)
357 
358  ConsensusAlgorithmsProcessTargets process(destinations);
361  consensus_algorithm(process, mpi_comm);
362  consensus_algorithm.run();
363  return process.get_result();
364 
365 # elif DEAL_II_MPI_VERSION_GTE(2, 2)
366 
367  static CollectiveMutex mutex;
368  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
369 
370  const int mpi_tag =
372 
373  // Calculate the number of messages to send to each process
374  std::vector<unsigned int> dest_vector(n_procs);
375  for (const auto &el : destinations)
376  ++dest_vector[el];
377 
378  // Find how many processes will send to this one
379  // by reducing with sum and then scattering the
380  // results over all processes
381  unsigned int n_recv_from;
382  const int ierr = MPI_Reduce_scatter_block(
383  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
384 
385  AssertThrowMPI(ierr);
386 
387  // Send myid to every process in `destinations` vector...
388  std::vector<MPI_Request> send_requests(destinations.size());
389  for (const auto &el : destinations)
390  {
391  const int ierr =
392  MPI_Isend(&myid,
393  1,
394  MPI_UNSIGNED,
395  el,
396  mpi_tag,
397  mpi_comm,
398  send_requests.data() + (&el - destinations.data()));
399  AssertThrowMPI(ierr);
400  }
401 
402 
403  // Receive `n_recv_from` times from the processes
404  // who communicate with this one. Store the obtained id's
405  // in the resulting vector
406  std::vector<unsigned int> origins(n_recv_from);
407  for (auto &el : origins)
408  {
409  const int ierr = MPI_Recv(&el,
410  1,
411  MPI_UNSIGNED,
412  MPI_ANY_SOURCE,
413  mpi_tag,
414  mpi_comm,
415  MPI_STATUS_IGNORE);
416  AssertThrowMPI(ierr);
417  }
418 
419  if (destinations.size() > 0)
420  {
421  const int ierr = MPI_Waitall(destinations.size(),
422  send_requests.data(),
423  MPI_STATUSES_IGNORE);
424  AssertThrowMPI(ierr);
425  }
426 
427  return origins;
428 # else
429  // let all processors communicate the maximal number of destinations
430  // they have
431  const unsigned int max_n_destinations =
432  Utilities::MPI::max(destinations.size(), mpi_comm);
433 
434  if (max_n_destinations == 0)
435  // all processes have nothing to send/receive:
436  return std::vector<unsigned int>();
437 
438  // now that we know the number of data packets every processor wants to
439  // send, set up a buffer with the maximal size and copy our destinations
440  // in there, padded with -1's
441  std::vector<unsigned int> my_destinations(max_n_destinations,
443  std::copy(destinations.begin(),
444  destinations.end(),
445  my_destinations.begin());
446 
447  // now exchange these (we could communicate less data if we used
448  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
449  // processors in this case, which is more expensive than the reduction
450  // operation above in MPI_Allreduce)
451  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
452  const int ierr = MPI_Allgather(my_destinations.data(),
453  max_n_destinations,
454  MPI_UNSIGNED,
455  all_destinations.data(),
456  max_n_destinations,
457  MPI_UNSIGNED,
458  mpi_comm);
459  AssertThrowMPI(ierr);
460 
461  // now we know who is going to communicate with whom. collect who is
462  // going to communicate with us!
463  std::vector<unsigned int> origins;
464  for (unsigned int i = 0; i < n_procs; ++i)
465  for (unsigned int j = 0; j < max_n_destinations; ++j)
466  if (all_destinations[i * max_n_destinations + j] == myid)
467  origins.push_back(i);
468  else if (all_destinations[i * max_n_destinations + j] ==
470  break;
471 
472  return origins;
473 # endif
474  }
475 
476 
477 
478  unsigned int
480  const MPI_Comm & mpi_comm,
481  const std::vector<unsigned int> &destinations)
482  {
483  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
484 
485  for (const unsigned int destination : destinations)
486  {
487  (void)destination;
488  AssertIndexRange(destination, n_procs);
489  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
490  ExcMessage(
491  "There is no point in communicating with ourselves."));
492  }
493 
494  // Calculate the number of messages to send to each process
495  std::vector<unsigned int> dest_vector(n_procs);
496  for (const auto &el : destinations)
497  ++dest_vector[el];
498 
499 # if DEAL_II_MPI_VERSION_GTE(2, 2)
500  // Find out how many processes will send to this one
501  // MPI_Reduce_scatter(_block) does exactly this
502  unsigned int n_recv_from = 0;
503 
504  const int ierr = MPI_Reduce_scatter_block(
505  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
506 
507  AssertThrowMPI(ierr);
508 
509  return n_recv_from;
510 # else
511  // Find out how many processes will send to this one
512  // by reducing with sum and then scattering the
513  // results over all processes
514  std::vector<unsigned int> buffer(dest_vector.size());
515  unsigned int n_recv_from = 0;
516 
517  MPI_Reduce(dest_vector.data(),
518  buffer.data(),
519  dest_vector.size(),
520  MPI_UNSIGNED,
521  MPI_SUM,
522  0,
523  mpi_comm);
524  MPI_Scatter(buffer.data(),
525  1,
526  MPI_UNSIGNED,
527  &n_recv_from,
528  1,
529  MPI_UNSIGNED,
530  0,
531  mpi_comm);
532 
533  return n_recv_from;
534 # endif
535  }
536 
537 
538 
539  namespace
540  {
541  // custom MIP_Op for calculate_collective_mpi_min_max_avg
542  void
543  max_reduce(const void *in_lhs_,
544  void * inout_rhs_,
545  int * len,
546  MPI_Datatype *)
547  {
548  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
549  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
550 
551  for (int i = 0; i < *len; i++)
552  {
553  inout_rhs[i].sum += in_lhs[i].sum;
554  if (inout_rhs[i].min > in_lhs[i].min)
555  {
556  inout_rhs[i].min = in_lhs[i].min;
557  inout_rhs[i].min_index = in_lhs[i].min_index;
558  }
559  else if (inout_rhs[i].min == in_lhs[i].min)
560  {
561  // choose lower cpu index when tied to make operator commutative
562  if (inout_rhs[i].min_index > in_lhs[i].min_index)
563  inout_rhs[i].min_index = in_lhs[i].min_index;
564  }
565 
566  if (inout_rhs[i].max < in_lhs[i].max)
567  {
568  inout_rhs[i].max = in_lhs[i].max;
569  inout_rhs[i].max_index = in_lhs[i].max_index;
570  }
571  else if (inout_rhs[i].max == in_lhs[i].max)
572  {
573  // choose lower cpu index when tied to make operator commutative
574  if (inout_rhs[i].max_index > in_lhs[i].max_index)
575  inout_rhs[i].max_index = in_lhs[i].max_index;
576  }
577  }
578  }
579  } // namespace
580 
581 
582 
583  void
585  const ArrayView<MinMaxAvg> & result,
586  const MPI_Comm & mpi_communicator)
587  {
588  // If MPI was not started, we have a serial computation and cannot run
589  // the other MPI commands
590  if (job_supports_mpi() == false ||
591  Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
592  {
593  for (unsigned int i = 0; i < my_values.size(); i++)
594  {
595  result[i].sum = my_values[i];
596  result[i].avg = my_values[i];
597  result[i].min = my_values[i];
598  result[i].max = my_values[i];
599  result[i].min_index = 0;
600  result[i].max_index = 0;
601  }
602  return;
603  }
604 
605  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
606  Utilities::MPI::max(my_values.size(), mpi_communicator));
607 
608  AssertDimension(my_values.size(), result.size());
609 
610 
611 
612  // To avoid uninitialized values on some MPI implementations, provide
613  // result with a default value already...
614  MinMaxAvg dummy = {0.,
617  0,
618  0,
619  0.};
620 
621  for (auto &i : result)
622  i = dummy;
623 
624  const unsigned int my_id =
625  ::Utilities::MPI::this_mpi_process(mpi_communicator);
626  const unsigned int numproc =
627  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
628 
629  MPI_Op op;
630  int ierr =
631  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
632  true,
633  &op);
634  AssertThrowMPI(ierr);
635 
636  std::vector<MinMaxAvg> in(my_values.size());
637 
638  for (unsigned int i = 0; i < my_values.size(); i++)
639  {
640  in[i].sum = in[i].min = in[i].max = my_values[i];
641  in[i].min_index = in[i].max_index = my_id;
642  }
643 
644  MPI_Datatype type;
645  int lengths[] = {3, 2, 1};
646  MPI_Aint displacements[] = {0,
647  offsetof(MinMaxAvg, min_index),
648  offsetof(MinMaxAvg, avg)};
649  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
650 
651  ierr = MPI_Type_create_struct(3, lengths, displacements, types, &type);
652  AssertThrowMPI(ierr);
653 
654  ierr = MPI_Type_commit(&type);
655  AssertThrowMPI(ierr);
656  ierr = MPI_Allreduce(
657  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
658  AssertThrowMPI(ierr);
659 
660  ierr = MPI_Type_free(&type);
661  AssertThrowMPI(ierr);
662 
663  ierr = MPI_Op_free(&op);
664  AssertThrowMPI(ierr);
665 
666  for (auto &r : result)
667  r.avg = r.sum / numproc;
668  }
669 
670 
671 #else
672 
673  unsigned int
674  n_mpi_processes(const MPI_Comm &)
675  {
676  return 1;
677  }
678 
679 
680 
681  unsigned int
682  this_mpi_process(const MPI_Comm &)
683  {
684  return 0;
685  }
686 
687 
688 
689  std::vector<IndexSet>
690  create_ascending_partitioning(const MPI_Comm & /*comm*/,
691  const IndexSet::size_type local_size)
692  {
693  return std::vector<IndexSet>(1, complete_index_set(local_size));
694  }
695 
696  IndexSet
698  const IndexSet::size_type total_size)
699  {
700  return complete_index_set(total_size);
701  }
702 
703 
704 
705  MPI_Comm
706  duplicate_communicator(const MPI_Comm &mpi_communicator)
707  {
708  return mpi_communicator;
709  }
710 
711 
712 
713  void
714  free_communicator(MPI_Comm & /*mpi_communicator*/)
715  {}
716 
717 
718 
719  void
720  min_max_avg(const ArrayView<const double> &my_values,
721  const ArrayView<MinMaxAvg> & result,
722  const MPI_Comm &)
723  {
724  AssertDimension(my_values.size(), result.size());
725 
726  for (unsigned int i = 0; i < my_values.size(); i++)
727  {
728  result[i].sum = my_values[i];
729  result[i].avg = my_values[i];
730  result[i].min = my_values[i];
731  result[i].max = my_values[i];
732  result[i].min_index = 0;
733  result[i].max_index = 0;
734  }
735  }
736 
737 #endif
738 
739 
740 
742  char **& argv,
743  const unsigned int max_num_threads)
744  {
745  static bool constructor_has_already_run = false;
746  (void)constructor_has_already_run;
747  Assert(constructor_has_already_run == false,
748  ExcMessage("You can only create a single object of this class "
749  "in a program since it initializes the MPI system."));
750 
751 
752  int ierr = 0;
753 #ifdef DEAL_II_WITH_MPI
754  // if we have PETSc, we will initialize it and let it handle MPI.
755  // Otherwise, we will do it.
756  int MPI_has_been_started = 0;
757  ierr = MPI_Initialized(&MPI_has_been_started);
758  AssertThrowMPI(ierr);
759  AssertThrow(MPI_has_been_started == 0,
760  ExcMessage("MPI error. You can only start MPI once!"));
761 
762  int provided;
763  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
764  // we might use several threads but never call two MPI functions at the
765  // same time. For an explanation see on why we do this see
766  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
767  int wanted = MPI_THREAD_SERIALIZED;
768  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
769  AssertThrowMPI(ierr);
770 
771  // disable for now because at least some implementations always return
772  // MPI_THREAD_SINGLE.
773  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
774  // ExcMessage("MPI reports that we are not allowed to use multiple
775  // threads."));
776 #else
777  // make sure the compiler doesn't warn about these variables
778  (void)argc;
779  (void)argv;
780  (void)ierr;
781 #endif
782 
783  // we are allowed to call MPI_Init ourselves and PETScInitialize will
784  // detect this. This allows us to use MPI_Init_thread instead.
785 #ifdef DEAL_II_WITH_PETSC
786 # ifdef DEAL_II_WITH_SLEPC
787  // Initialize SLEPc (with PETSc):
788  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
790 # else
791  // or just initialize PETSc alone:
792  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
793  AssertThrow(ierr == 0, ExcPETScError(ierr));
794 # endif
795 
796  // Disable PETSc exception handling. This just prints a large wall
797  // of text that is not particularly helpful for what we do:
798  PetscPopSignalHandler();
799 #endif
800 
801  // Initialize zoltan
802 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
803  float version;
804  Zoltan_Initialize(argc, argv, &version);
805 #endif
806 
807 #ifdef DEAL_II_WITH_P4EST
808  // Initialize p4est and libsc components
809 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
810 # else
811  // This feature is broken in version 2.0.0 for calls to
812  // MPI_Comm_create_group (see cburstedde/p4est#30).
813  // Disabling it leads to more verbose p4est error messages
814  // which should be fine.
815  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
816 # endif
817  p4est_init(nullptr, SC_LP_SILENT);
818 #endif
819 
820  constructor_has_already_run = true;
821 
822 
823  // Now also see how many threads we'd like to run
824  if (max_num_threads != numbers::invalid_unsigned_int)
825  {
826  // set maximum number of threads (also respecting the environment
827  // variable that the called function evaluates) based on what the
828  // user asked
829  MultithreadInfo::set_thread_limit(max_num_threads);
830  }
831  else
832  // user wants automatic choice
833  {
834 #ifdef DEAL_II_WITH_MPI
835  // we need to figure out how many MPI processes there are on the
836  // current node, as well as how many CPU cores we have. for the
837  // first task, check what get_hostname() returns and then do an
838  // allgather so each processor gets the answer
839  //
840  // in calculating the length of the string, don't forget the
841  // terminating \0 on C-style strings
842  const std::string hostname = Utilities::System::get_hostname();
843  const unsigned int max_hostname_size =
844  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
845  std::vector<char> hostname_array(max_hostname_size);
846  std::copy(hostname.c_str(),
847  hostname.c_str() + hostname.size() + 1,
848  hostname_array.begin());
849 
850  std::vector<char> all_hostnames(max_hostname_size *
851  MPI::n_mpi_processes(MPI_COMM_WORLD));
852  const int ierr = MPI_Allgather(hostname_array.data(),
853  max_hostname_size,
854  MPI_CHAR,
855  all_hostnames.data(),
856  max_hostname_size,
857  MPI_CHAR,
858  MPI_COMM_WORLD);
859  AssertThrowMPI(ierr);
860 
861  // search how often our own hostname appears and the how-manyth
862  // instance the current process represents
863  unsigned int n_local_processes = 0;
864  unsigned int nth_process_on_host = 0;
865  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
866  ++i)
867  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
868  hostname)
869  {
870  ++n_local_processes;
871  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
872  ++nth_process_on_host;
873  }
874  Assert(nth_process_on_host > 0, ExcInternalError());
875 
876 
877  // compute how many cores each process gets. if the number does not
878  // divide evenly, then we get one more core if we are among the
879  // first few processes
880  //
881  // if the number would be zero, round up to one since every process
882  // needs to have at least one thread
883  const unsigned int n_threads =
884  std::max(MultithreadInfo::n_cores() / n_local_processes +
885  (nth_process_on_host <=
886  MultithreadInfo::n_cores() % n_local_processes ?
887  1 :
888  0),
889  1U);
890 #else
891  const unsigned int n_threads = MultithreadInfo::n_cores();
892 #endif
893 
894  // finally set this number of threads
896  }
897  }
898 
899 
900 
901  void
903  {
904  // insert if it is not in the set already:
905  requests.insert(&request);
906  }
907 
908 
909 
910  void
912  {
913  Assert(
914  requests.find(&request) != requests.end(),
915  ExcMessage(
916  "You tried to call unregister_request() with an invalid request."));
917 
918  requests.erase(&request);
919  }
920 
921 
922 
923  std::set<MPI_Request *> MPI_InitFinalize::requests;
924 
925 
926 
928  {
929  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
930  // are no longer used at this point. this is relevant because the static
931  // object destructors run for these vectors at the end of the program
932  // would run after MPI_Finalize is called, leading to errors
933 
934 #ifdef DEAL_II_WITH_MPI
935  // Before exiting, wait for nonblocking communication to complete:
936  for (auto request : requests)
937  {
938  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
939  AssertThrowMPI(ierr);
940  }
941 
942  // Start with deal.II MPI vectors and delete vectors from the pools:
944  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
946  release_unused_memory();
948  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
950  release_unused_memory();
951 
952  // Next with Trilinos:
953 # if defined(DEAL_II_WITH_TRILINOS)
955  TrilinosWrappers::MPI::Vector>::release_unused_memory();
957  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
958 # endif
959 #endif
960 
961 
962  // Now deal with PETSc (with or without MPI). Only delete the vectors if
963  // finalize hasn't been called yet, otherwise this will lead to errors.
964 #ifdef DEAL_II_WITH_PETSC
965  if ((PetscInitializeCalled == PETSC_TRUE) &&
966  (PetscFinalizeCalled == PETSC_FALSE))
967  {
969  PETScWrappers::MPI::Vector>::release_unused_memory();
971  PETScWrappers::MPI::BlockVector>::release_unused_memory();
972 
973 # ifdef DEAL_II_WITH_SLEPC
974  // and now end SLEPc (with PETSc)
975  SlepcFinalize();
976 # else
977  // or just end PETSc.
978  PetscFinalize();
979 # endif
980  }
981 #endif
982 
983 // There is a similar issue with CUDA: The destructor of static objects might
984 // run after the CUDA driver is unloaded. Hence, also release all memory
985 // related to CUDA vectors.
986 #ifdef DEAL_II_WITH_CUDA
989  release_unused_memory();
992  release_unused_memory();
993 #endif
994 
995 #ifdef DEAL_II_WITH_P4EST
996  // now end p4est and libsc
997  // Note: p4est has no finalize function
998  sc_finalize();
999 #endif
1000 
1001 
1002  // only MPI_Finalize if we are running with MPI. We also need to do this
1003  // when running PETSc, because we initialize MPI ourselves before
1004  // calling PetscInitialize
1005 #ifdef DEAL_II_WITH_MPI
1006  if (job_supports_mpi() == true)
1007  {
1008 # if __cpp_lib_uncaught_exceptions >= 201411
1009  // std::uncaught_exception() is deprecated in c++17
1010  if (std::uncaught_exceptions() > 0)
1011 # else
1012  if (std::uncaught_exception() == true)
1013 # endif
1014  {
1015  // do not try to call MPI_Finalize to avoid a deadlock.
1016  }
1017  else
1018  {
1019  const int ierr = MPI_Finalize();
1020  (void)ierr;
1021  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1022  }
1023  }
1024 #endif
1025  }
1026 
1027 
1028 
1029  bool
1031  {
1032 #ifdef DEAL_II_WITH_MPI
1033  int MPI_has_been_started = 0;
1034  const int ierr = MPI_Initialized(&MPI_has_been_started);
1035  AssertThrowMPI(ierr);
1036 
1037  return (MPI_has_been_started > 0);
1038 #else
1039  return false;
1040 #endif
1041  }
1042 
1043 
1044 
1045  std::vector<unsigned int>
1046  compute_index_owner(const IndexSet &owned_indices,
1047  const IndexSet &indices_to_look_up,
1048  const MPI_Comm &comm)
1049  {
1050  Assert(owned_indices.size() == indices_to_look_up.size(),
1051  ExcMessage("IndexSets have to have the same sizes."));
1052 
1053  Assert(
1054  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1055  ExcMessage("IndexSets have to have the same size on all processes."));
1056 
1057  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1058 
1059  // Step 1: setup dictionary
1060  // The input owned_indices can be partitioned arbitrarily. In the
1061  // dictionary, the index set is statically repartitioned among the
1062  // processes again and extended with information with the actual owner
1063  // of that the index.
1065  owned_indices, indices_to_look_up, comm, owning_ranks);
1066 
1067  // Step 2: read dictionary
1068  // Communicate with the process who owns the index in the static
1069  // partition (i.e. in the dictionary). This process returns the actual
1070  // owner of the index.
1072  std::pair<types::global_dof_index, types::global_dof_index>,
1073  unsigned int>
1074  consensus_algorithm(process, comm);
1075  consensus_algorithm.run();
1076 
1077  return owning_ranks;
1078  }
1079 
1080 
1081 
1083  : locked(false)
1084  , request(MPI_REQUEST_NULL)
1085  {
1087  }
1088 
1089 
1090 
1092  {
1093  Assert(
1094  !locked,
1095  ExcMessage(
1096  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1097 
1099  }
1100 
1101 
1102 
1103  void
1105  {
1106  (void)comm;
1107 
1108  Assert(
1109  !locked,
1110  ExcMessage(
1111  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1112 
1113 #ifdef DEAL_II_WITH_MPI
1114 
1115  // TODO: For now, we implement this mutex with a blocking barrier
1116  // in the lock and unlock. It needs to be tested, if we can move
1117  // to a nonblocking barrier (code disabled below).
1118 
1119  const int ierr = MPI_Barrier(comm);
1120  AssertThrowMPI(ierr);
1121 
1122 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1123  // wait for non-blocking barrier to finish. This is a noop the
1124  // first time we lock().
1125  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1126  AssertThrowMPI(ierr);
1127 # else
1128  // nothing to do as blocking barrier already completed
1129 # endif
1130 #endif
1131 
1132  locked = true;
1133  }
1134 
1135 
1136 
1137  void
1139  {
1140  (void)comm;
1141 
1142  Assert(
1143  locked,
1144  ExcMessage(
1145  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1146 
1147 #ifdef DEAL_II_WITH_MPI
1148 
1149  // TODO: For now, we implement this mutex with a blocking barrier
1150  // in the lock and unlock. It needs to be tested, if we can move
1151  // to a nonblocking barrier (code disabled below):
1152 
1153 # if 0 && DEAL_II_MPI_VERSION_GTE(3, 0)
1154  const int ierr = MPI_Ibarrier(comm, &request);
1155  AssertThrowMPI(ierr);
1156 # else
1157  const int ierr = MPI_Barrier(comm);
1158  AssertThrowMPI(ierr);
1159 # endif
1160 #endif
1161 
1162  locked = false;
1163  }
1164 
1165 
1166  template std::vector<unsigned int>
1167  compute_set_union(const std::vector<unsigned int> &vec,
1168  const MPI_Comm & comm);
1169 
1170 
1171  template std::set<unsigned int>
1172  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm &comm);
1173 
1174 #include "mpi.inst"
1175  } // end of namespace MPI
1176 } // end of namespace Utilities
1177 
IndexSet
Definition: index_set.h:74
TrilinosWrappers::MPI::Vector
Definition: trilinos_vector.h:400
LinearAlgebra::distributed::Vector< double >
Utilities::MPI::internal::ComputeIndexOwner::ConsensusAlgorithmsPayload
Definition: mpi_compute_index_owner_internal.h:538
Utilities::MPI::create_evenly_distributed_partitioning
IndexSet create_evenly_distributed_partitioning(const MPI_Comm &comm, const IndexSet::size_type total_size)
Definition: mpi.cc:264
ArrayView
Definition: array_view.h:77
IndexSet::size
size_type size() const
Definition: index_set.h:1635
SLEPcWrappers::SolverBase::ExcSLEPcError
static ::ExceptionBase & ExcSLEPcError(int arg1)
utilities.h
LAPACKSupport::U
static const char U
Definition: lapack_support.h:167
Utilities::MPI::ConsensusAlgorithmsProcessTargets::get_result
std::vector< unsigned int > get_result()
Definition: mpi.cc:317
AssertIndexRange
#define AssertIndexRange(index, range)
Definition: exceptions.h:1649
Utilities::create_evenly_distributed_partitioning
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const IndexSet::size_type total_size)
Definition: mpi.cc:71
Utilities::MPI::MPI_InitFinalize::~MPI_InitFinalize
~MPI_InitFinalize()
Definition: mpi.cc:927
TrilinosWrappers::MPI::BlockVector
Definition: trilinos_parallel_block_vector.h:75
Utilities::MPI::ConsensusAlgorithmsProcessTargets::target
const std::vector< unsigned int > & target
Definition: mpi.cc:327
multithread_info.h
MPI_Comm
ArrayView::size
std::size_t size() const
Definition: array_view.h:484
Utilities::MPI::compute_n_point_to_point_communications
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:479
MultithreadInfo::n_cores
static unsigned int n_cores()
Definition: multithread_info.cc:109
la_parallel_vector.h
Utilities::MPI::internal::Tags::compute_point_to_point_communication_pattern
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
LinearAlgebra::CUDAWrappers::kernel::set
__global__ void set(Number *val, const Number s, const size_type N)
Utilities::MPI::ConsensusAlgorithms::Selector
Definition: mpi_consensus_algorithms.h:464
Utilities::MPI::CollectiveMutex::unlock
void unlock(MPI_Comm comm)
Definition: mpi.cc:1138
GrowingVectorMemory
Definition: vector_memory.h:320
IndexSet::n_elements
size_type n_elements() const
Definition: index_set.h:1833
Utilities::MPI::MinMaxAvg
Definition: mpi.h:687
Utilities::MPI::MPI_InitFinalize::unregister_request
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:911
Utilities::MPI::free_communicator
void free_communicator(MPI_Comm &mpi_communicator)
Definition: mpi.cc:150
Utilities::MPI::compute_set_union
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm &comm)
Utilities::MPI::ConsensusAlgorithmsProcessTargets::compute_targets
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:306
StandardExceptions::ExcMessage
static ::ExceptionBase & ExcMessage(std::string arg1)
mpi_tags.h
TrilinosWrappers::internal::begin
VectorType::value_type * begin(VectorType &V)
Definition: trilinos_sparse_matrix.cc:51
slepc_solver.h
mpi.h
index_set.h
Utilities::MPI::ConsensusAlgorithmsProcessTargets
Definition: mpi.cc:281
la_parallel_block_vector.h
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
Utilities::MPI::CollectiveMutex::lock
void lock(MPI_Comm comm)
Definition: mpi.cc:1104
petsc_block_vector.h
Utilities::MPI::CollectiveMutex
Definition: mpi.h:322
Utilities::MPI::create_group
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:160
TrilinosWrappers::internal::end
VectorType::value_type * end(VectorType &V)
Definition: trilinos_sparse_matrix.cc:65
Utilities::System::get_hostname
std::string get_hostname()
Definition: utilities.cc:1005
AssertDimension
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1579
AssertThrowMPI
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1707
Utilities::MPI::CollectiveMutex::~CollectiveMutex
~CollectiveMutex()
Definition: mpi.cc:1091
IndexSet::size_type
types::global_dof_index size_type
Definition: index_set.h:85
types
Definition: types.h:31
Utilities::MPI::CollectiveMutex::CollectiveMutex
CollectiveMutex()
Definition: mpi.cc:1082
Utilities::MPI::compute_index_owner
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1046
Utilities::MPI::ConsensusAlgorithms::NBX
Definition: mpi_consensus_algorithms.h:228
Utilities::MPI::ConsensusAlgorithmsProcessTargets::ConsensusAlgorithmsProcessTargets
ConsensusAlgorithmsProcessTargets(const std::vector< unsigned int > &target)
Definition: mpi.cc:285
PETScWrappers::MPI::BlockVector
Definition: petsc_block_vector.h:61
exceptions.h
PETScWrappers::MPI::Vector
Definition: petsc_vector.h:158
complete_index_set
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1014
Utilities::MPI::ConsensusAlgorithmsProcessTargets::answer_request
virtual void answer_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:293
internal::dummy
const types::global_dof_index * dummy()
Definition: dof_handler.cc:59
unsigned int
Utilities::MPI::MPI_InitFinalize::register_request
static void register_request(MPI_Request &request)
Definition: mpi.cc:902
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
Utilities::MPI::duplicate_communicator
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:139
Utilities::MPI::MPI_InitFinalize::MPI_InitFinalize
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:741
TrilinosWrappers::gid
int gid(const Epetra_BlockMap &map, int i)
Definition: trilinos_vector.h:199
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
LACExceptions::ExcPETScError
Definition: exceptions.h:56
numbers::invalid_unsigned_int
static const unsigned int invalid_unsigned_int
Definition: types.h:191
Utilities::MPI::CollectiveMutex::locked
bool locked
Definition: mpi.h:394
StandardExceptions::ExcMPI
Definition: exceptions.h:1168
Utilities::MPI::min_max_avg
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:91
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
Utilities::MPI::compute_point_to_point_communication_pattern
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:338
Utilities::MPI::ConsensusAlgorithmsProcessTargets::sources
std::vector< unsigned int > sources
Definition: mpi.cc:332
petsc_vector.h
vector_memory.h
Utilities::MPI::MPI_InitFinalize::requests
static std::set< MPI_Request * > requests
Definition: mpi.h:926
AssertNothrow
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1483
Utilities::MPI::create_ascending_partitioning
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type local_size)
Definition: mpi.cc:242
Utilities::MPI::all_gather
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
Utilities::MPI::ConsensusAlgorithmsProcessTargets::T1
unsigned int T1
Definition: mpi.cc:289
trilinos_vector.h
Utilities::MPI::job_supports_mpi
bool job_supports_mpi()
Definition: mpi.cc:1030
MPI_Request
AssertThrow
#define AssertThrow(cond, exc)
Definition: exceptions.h:1531
Utilities
Definition: cuda.h:31
IndexSet::add_range
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1674
Utilities::MPI::CollectiveMutex::request
MPI_Request request
Definition: mpi.h:399
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
internal::VectorOperations::copy
void copy(const T *begin, const T *end, U *dest)
Definition: vector_operations_internal.h:67
trilinos_parallel_block_vector.h
Utilities::MPI::CollectiveMutex::ScopedLock
Definition: mpi.h:330
MultithreadInfo::set_thread_limit
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
Definition: multithread_info.cc:116
int
mpi_compute_index_owner_internal.h
Utilities::MPI::ConsensusAlgorithms::Process
Definition: mpi_consensus_algorithms.h:64