Reference documentation for deal.II version GIT 0b65fff18a 2023-09-27 19:30:02+00:00
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
mpi.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2023 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
23 #include <deal.II/base/mpi_tags.h>
25 #include <deal.II/base/utilities.h>
26 
32 
33 #include <boost/serialization/utility.hpp>
34 
35 #include <Kokkos_Core.hpp>
36 
37 #include <iostream>
38 #include <limits>
39 #include <numeric>
40 #include <set>
41 #include <vector>
42 
43 #ifdef DEAL_II_WITH_TRILINOS
44 # ifdef DEAL_II_WITH_MPI
47 
48 # include <Epetra_MpiComm.h>
49 # endif
50 #endif
51 
52 #ifdef DEAL_II_WITH_PETSC
55 
56 # include <petscsys.h>
57 #endif
58 
59 #ifdef DEAL_II_WITH_SLEPC
61 
62 # include <slepcsys.h>
63 #endif
64 
65 #ifdef DEAL_II_WITH_P4EST
66 # include <p4est_bits.h>
67 #endif
68 
69 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
70 # include <zoltan_cpp.h>
71 #endif
72 
74 
75 
76 namespace Utilities
77 {
78  IndexSet
80  const unsigned int my_partition_id,
81  const unsigned int n_partitions,
82  const types::global_dof_index total_size)
83  {
84  static_assert(std::is_same_v<types::global_dof_index, IndexSet::size_type>,
85  "IndexSet::size_type must match types::global_dof_index for "
86  "using this function");
87  const unsigned int remain = total_size % n_partitions;
88 
89  const IndexSet::size_type min_size = total_size / n_partitions;
90 
92  min_size * my_partition_id + std::min(my_partition_id, remain);
93  const IndexSet::size_type end =
94  min_size * (my_partition_id + 1) + std::min(my_partition_id + 1, remain);
95  IndexSet result(total_size);
96  result.add_range(begin, end);
97  return result;
98  }
99 
100  namespace MPI
101  {
102 #ifdef DEAL_II_WITH_MPI
103  // Provide definitions of template variables for all valid instantiations.
104  template const MPI_Datatype mpi_type_id_for_type<bool>;
105  template const MPI_Datatype mpi_type_id_for_type<char>;
106  template const MPI_Datatype mpi_type_id_for_type<signed char>;
107  template const MPI_Datatype mpi_type_id_for_type<short>;
108  template const MPI_Datatype mpi_type_id_for_type<int>;
109  template const MPI_Datatype mpi_type_id_for_type<long int>;
110  template const MPI_Datatype mpi_type_id_for_type<unsigned char>;
111  template const MPI_Datatype mpi_type_id_for_type<unsigned short>;
112  template const MPI_Datatype mpi_type_id_for_type<unsigned long int>;
113  template const MPI_Datatype mpi_type_id_for_type<unsigned long long int>;
114  template const MPI_Datatype mpi_type_id_for_type<float>;
115  template const MPI_Datatype mpi_type_id_for_type<double>;
116  template const MPI_Datatype mpi_type_id_for_type<long double>;
117  template const MPI_Datatype mpi_type_id_for_type<std::complex<float>>;
118  template const MPI_Datatype mpi_type_id_for_type<std::complex<double>>;
119 #endif
120 
121 
122  MinMaxAvg
123  min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
124  {
125  MinMaxAvg result;
127  ArrayView<MinMaxAvg>(result),
128  mpi_communicator);
129 
130  return result;
131  }
132 
133 
134 
135  std::vector<MinMaxAvg>
136  min_max_avg(const std::vector<double> &my_values,
137  const MPI_Comm mpi_communicator)
138  {
139  std::vector<MinMaxAvg> results(my_values.size());
140  min_max_avg(my_values, results, mpi_communicator);
141 
142  return results;
143  }
144 
145 
146 
147 #ifdef DEAL_II_WITH_MPI
148  unsigned int
149  n_mpi_processes(const MPI_Comm mpi_communicator)
150  {
151  if (job_supports_mpi())
152  {
153  int n_jobs = 1;
154  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
155  AssertThrowMPI(ierr);
156  return n_jobs;
157  }
158  else
159  return 1;
160  }
161 
162 
163  unsigned int
164  this_mpi_process(const MPI_Comm mpi_communicator)
165  {
166  if (job_supports_mpi())
167  {
168  int rank = 0;
169  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
170  AssertThrowMPI(ierr);
171  return rank;
172  }
173  else
174  return 0;
175  }
176 
177 
178 
179  std::vector<unsigned int>
180  mpi_processes_within_communicator(const MPI_Comm comm_large,
181  const MPI_Comm comm_small)
182  {
183  if (Utilities::MPI::job_supports_mpi() == false)
184  return std::vector<unsigned int>{0};
185 
186  const unsigned int rank = Utilities::MPI::this_mpi_process(comm_large);
187  const unsigned int size = Utilities::MPI::n_mpi_processes(comm_small);
188 
189  std::vector<unsigned int> ranks(size);
190  const int ierr = MPI_Allgather(
191  &rank, 1, MPI_UNSIGNED, ranks.data(), 1, MPI_UNSIGNED, comm_small);
192  AssertThrowMPI(ierr);
193 
194  return ranks;
195  }
196 
197 
198 
199  MPI_Comm
200  duplicate_communicator(const MPI_Comm mpi_communicator)
201  {
202  MPI_Comm new_communicator;
203  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
204  AssertThrowMPI(ierr);
205  return new_communicator;
206  }
207 
208 
209 
210  void
211  free_communicator(MPI_Comm mpi_communicator)
212  {
213  // MPI_Comm_free will set the argument to MPI_COMM_NULL automatically.
214  const int ierr = MPI_Comm_free(&mpi_communicator);
215  AssertThrowMPI(ierr);
216  }
217 
218 
219 
220  int
221  create_group(const MPI_Comm comm,
222  const MPI_Group &group,
223  const int tag,
224  MPI_Comm *new_comm)
225  {
226  const int ierr = MPI_Comm_create_group(comm, group, tag, new_comm);
227  AssertThrowMPI(ierr);
228  return ierr;
229  }
230 
231 
232 
233  std::vector<IndexSet>
235  const MPI_Comm comm,
236  const types::global_dof_index locally_owned_size)
237  {
238  static_assert(
239  std::is_same_v<types::global_dof_index, IndexSet::size_type>,
240  "IndexSet::size_type must match types::global_dof_index for "
241  "using this function");
242  const unsigned int n_proc = n_mpi_processes(comm);
243  const std::vector<IndexSet::size_type> sizes =
244  all_gather(comm, locally_owned_size);
245  const auto total_size =
246  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
247 
248  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
249 
251  for (unsigned int i = 0; i < n_proc; ++i)
252  {
253  res[i].add_range(begin, begin + sizes[i]);
254  begin = begin + sizes[i];
255  }
256 
257  return res;
258  }
259 
260 
261 
262  IndexSet
264  const MPI_Comm comm,
265  const types::global_dof_index total_size)
266  {
267  const unsigned int this_proc = this_mpi_process(comm);
268  const unsigned int n_proc = n_mpi_processes(comm);
269 
271  n_proc,
272  total_size);
273  }
274 
275 
276 
277  std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>
278  create_mpi_data_type_n_bytes(const std::size_t n_bytes)
279  {
280  MPI_Datatype result;
281  int ierr = LargeCount::Type_contiguous_c(n_bytes, MPI_BYTE, &result);
282  AssertThrowMPI(ierr);
283  ierr = MPI_Type_commit(&result);
284  AssertThrowMPI(ierr);
285 
286 # ifdef DEBUG
287  MPI_Count size64;
288  ierr = MPI_Type_size_x(result, &size64);
289  AssertThrowMPI(ierr);
290 
291  Assert(size64 == static_cast<MPI_Count>(n_bytes), ExcInternalError());
292 # endif
293 
294  // Now put the new data type into a std::unique_ptr with a custom
295  // deleter. We call the std::unique_ptr constructor that as first
296  // argument takes a pointer (here, a pointer to a copy of the `result`
297  // object, and as second argument a pointer-to-function, for which
298  // we here use a lambda function without captures that acts as the
299  // 'deleter' object: it calls `MPI_Type_free` and then deletes the
300  // pointer. To avoid a compiler warning about a null this pointer
301  // in the lambda (which don't make sense: the lambda doesn't store
302  // anything), we create the deleter first.
303  auto deleter = [](MPI_Datatype *p) {
304  if (p != nullptr)
305  {
306  const int ierr = MPI_Type_free(p);
307  (void)ierr;
308  AssertNothrow(ierr == MPI_SUCCESS, ExcMPI(ierr));
309 
310  delete p;
311  }
312  };
313 
314  return std::unique_ptr<MPI_Datatype, void (*)(MPI_Datatype *)>(
315  new MPI_Datatype(result), deleter);
316  }
317 
318 
319 
320  std::vector<unsigned int>
322  const MPI_Comm mpi_comm,
323  const std::vector<unsigned int> &destinations)
324  {
325  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
326  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
327  (void)myid;
328  (void)n_procs;
329 
330  for (const unsigned int destination : destinations)
331  {
332  (void)destination;
333  AssertIndexRange(destination, n_procs);
334  }
335 
336 
337  // Have a little function that checks if destinations provided
338  // to the current process are unique. The way it does this is
339  // to create a sorted list of destinations and then walk through
340  // the list and look at successive elements -- if we find the
341  // same number twice, we know that the destinations were not
342  // unique
343  const bool my_destinations_are_unique = [destinations]() {
344  if (destinations.empty())
345  return true;
346  else
347  {
348  std::vector<unsigned int> my_destinations = destinations;
349  std::sort(my_destinations.begin(), my_destinations.end());
350  return (std::adjacent_find(my_destinations.begin(),
351  my_destinations.end()) ==
352  my_destinations.end());
353  }
354  }();
355 
356  // If all processes report that they have unique destinations,
357  // then we can short-cut the process using a consensus algorithm (which
358  // is implemented only for the case of unique destinations):
359  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
360  1)
361  {
362  return ConsensusAlgorithms::nbx<char, char>(
363  destinations, {}, {}, {}, mpi_comm);
364  }
365 
366  // So we need to run a different algorithm, specifically one that
367  // requires more memory -- MPI_Reduce_scatter_block will require memory
368  // proportional to the number of processes involved; that function is
369  // available for MPI 2.2 or later:
370  static CollectiveMutex mutex;
371  CollectiveMutex::ScopedLock lock(mutex, mpi_comm);
372 
373  const int mpi_tag =
375 
376  // Calculate the number of messages to send to each process
377  std::vector<unsigned int> dest_vector(n_procs);
378  for (const auto &el : destinations)
379  ++dest_vector[el];
380 
381  // Find how many processes will send to this one
382  // by reducing with sum and then scattering the
383  // results over all processes
384  unsigned int n_recv_from;
385  const int ierr = MPI_Reduce_scatter_block(
386  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
387 
388  AssertThrowMPI(ierr);
389 
390  // Send myid to every process in `destinations` vector...
391  std::vector<MPI_Request> send_requests(destinations.size());
392  for (const auto &el : destinations)
393  {
394  const int ierr =
395  MPI_Isend(&myid,
396  1,
397  MPI_UNSIGNED,
398  el,
399  mpi_tag,
400  mpi_comm,
401  send_requests.data() + (&el - destinations.data()));
402  AssertThrowMPI(ierr);
403  }
404 
405 
406  // Receive `n_recv_from` times from the processes
407  // who communicate with this one. Store the obtained id's
408  // in the resulting vector
409  std::vector<unsigned int> origins(n_recv_from);
410  for (auto &el : origins)
411  {
412  const int ierr = MPI_Recv(&el,
413  1,
414  MPI_UNSIGNED,
415  MPI_ANY_SOURCE,
416  mpi_tag,
417  mpi_comm,
418  MPI_STATUS_IGNORE);
419  AssertThrowMPI(ierr);
420  }
421 
422  if (destinations.size() > 0)
423  {
424  const int ierr = MPI_Waitall(destinations.size(),
425  send_requests.data(),
426  MPI_STATUSES_IGNORE);
427  AssertThrowMPI(ierr);
428  }
429 
430  return origins;
431  }
432 
433 
434 
435  unsigned int
437  const MPI_Comm mpi_comm,
438  const std::vector<unsigned int> &destinations)
439  {
440  // Have a little function that checks if destinations provided
441  // to the current process are unique:
442  const bool my_destinations_are_unique = [destinations]() {
443  std::vector<unsigned int> my_destinations = destinations;
444  const unsigned int n_destinations = my_destinations.size();
445  std::sort(my_destinations.begin(), my_destinations.end());
446  my_destinations.erase(std::unique(my_destinations.begin(),
447  my_destinations.end()),
448  my_destinations.end());
449  return (my_destinations.size() == n_destinations);
450  }();
451 
452  // If all processes report that they have unique destinations,
453  // then we can short-cut the process using a consensus algorithm:
454 
455  if (Utilities::MPI::min((my_destinations_are_unique ? 1 : 0), mpi_comm) ==
456  1)
457  {
458  return ConsensusAlgorithms::nbx<char, char>(
459  destinations, {}, {}, {}, mpi_comm)
460  .size();
461  }
462  else
463  {
464  const unsigned int n_procs =
466 
467  for (const unsigned int destination : destinations)
468  {
469  (void)destination;
470  AssertIndexRange(destination, n_procs);
471  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
472  ExcMessage(
473  "There is no point in communicating with ourselves."));
474  }
475 
476  // Calculate the number of messages to send to each process
477  std::vector<unsigned int> dest_vector(n_procs);
478  for (const auto &el : destinations)
479  ++dest_vector[el];
480 
481  // Find out how many processes will send to this one
482  // MPI_Reduce_scatter(_block) does exactly this
483  unsigned int n_recv_from = 0;
484 
485  const int ierr = MPI_Reduce_scatter_block(dest_vector.data(),
486  &n_recv_from,
487  1,
488  MPI_UNSIGNED,
489  MPI_SUM,
490  mpi_comm);
491 
492  AssertThrowMPI(ierr);
493 
494  return n_recv_from;
495  }
496  }
497 
498 
499 
500  namespace
501  {
502  // custom MIP_Op for calculate_collective_mpi_min_max_avg
503  void
504  max_reduce(const void *in_lhs_,
505  void *inout_rhs_,
506  int *len,
507  MPI_Datatype *)
508  {
509  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
510  MinMaxAvg *inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
511 
512  for (int i = 0; i < *len; ++i)
513  {
514  inout_rhs[i].sum += in_lhs[i].sum;
515  if (inout_rhs[i].min > in_lhs[i].min)
516  {
517  inout_rhs[i].min = in_lhs[i].min;
518  inout_rhs[i].min_index = in_lhs[i].min_index;
519  }
520  else if (inout_rhs[i].min == in_lhs[i].min)
521  {
522  // choose lower cpu index when tied to make operator commutative
523  if (inout_rhs[i].min_index > in_lhs[i].min_index)
524  inout_rhs[i].min_index = in_lhs[i].min_index;
525  }
526 
527  if (inout_rhs[i].max < in_lhs[i].max)
528  {
529  inout_rhs[i].max = in_lhs[i].max;
530  inout_rhs[i].max_index = in_lhs[i].max_index;
531  }
532  else if (inout_rhs[i].max == in_lhs[i].max)
533  {
534  // choose lower cpu index when tied to make operator commutative
535  if (inout_rhs[i].max_index > in_lhs[i].max_index)
536  inout_rhs[i].max_index = in_lhs[i].max_index;
537  }
538  }
539  }
540  } // namespace
541 
542 
543 
544  void
546  const ArrayView<MinMaxAvg> &result,
547  const MPI_Comm mpi_communicator)
548  {
549  // If MPI was not started, we have a serial computation and cannot run
550  // the other MPI commands
551  if (Utilities::MPI::n_mpi_processes(mpi_communicator) <= 1)
552  {
553  for (unsigned int i = 0; i < my_values.size(); ++i)
554  {
555  result[i].sum = my_values[i];
556  result[i].avg = my_values[i];
557  result[i].min = my_values[i];
558  result[i].max = my_values[i];
559  result[i].min_index = 0;
560  result[i].max_index = 0;
561  }
562  return;
563  }
564 
565  /*
566  * A custom MPI datatype handle describing the memory layout of the
567  * MinMaxAvg struct. Initialized on first pass control reaches the
568  * static variable. So hopefully not initialized too early.
569  */
570  static MPI_Datatype type = []() {
571  MPI_Datatype type;
572 
573  int lengths[] = {3, 2, 1};
574 
575  MPI_Aint displacements[] = {0,
576  offsetof(MinMaxAvg, min_index),
577  offsetof(MinMaxAvg, avg)};
578 
579  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT, MPI_DOUBLE};
580 
581  int ierr =
582  MPI_Type_create_struct(3, lengths, displacements, types, &type);
583  AssertThrowMPI(ierr);
584 
585  ierr = MPI_Type_commit(&type);
586  AssertThrowMPI(ierr);
587 
588  /* Ensure that we free the allocated datatype again at the end of
589  * the program run just before we call MPI_Finalize():*/
590  MPI_InitFinalize::signals.at_mpi_finalize.connect([type]() mutable {
591  int ierr = MPI_Type_free(&type);
592  AssertThrowMPI(ierr);
593  });
594 
595  return type;
596  }();
597 
598  /*
599  * A custom MPI op handle for our max_reduce function.
600  * Initialized on first pass control reaches the static variable. So
601  * hopefully not initialized too early.
602  */
603  static MPI_Op op = []() {
604  MPI_Op op;
605 
606  int ierr =
607  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
608  static_cast<int>(true),
609  &op);
610  AssertThrowMPI(ierr);
611 
612  /* Ensure that we free the allocated op again at the end of the
613  * program run just before we call MPI_Finalize():*/
614  MPI_InitFinalize::signals.at_mpi_finalize.connect([op]() mutable {
615  int ierr = MPI_Op_free(&op);
616  AssertThrowMPI(ierr);
617  });
618 
619  return op;
620  }();
621 
622  AssertDimension(Utilities::MPI::min(my_values.size(), mpi_communicator),
623  Utilities::MPI::max(my_values.size(), mpi_communicator));
624 
625  AssertDimension(my_values.size(), result.size());
626 
627  // To avoid uninitialized values on some MPI implementations, provide
628  // result with a default value already...
629  MinMaxAvg dummy = {0.,
631  std::numeric_limits<double>::lowest(),
632  0,
633  0,
634  0.};
635 
636  for (auto &i : result)
637  i = dummy;
638 
639  const unsigned int my_id =
640  ::Utilities::MPI::this_mpi_process(mpi_communicator);
641  const unsigned int numproc =
642  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
643 
644  std::vector<MinMaxAvg> in(my_values.size());
645 
646  for (unsigned int i = 0; i < my_values.size(); ++i)
647  {
648  in[i].sum = in[i].min = in[i].max = my_values[i];
649  in[i].min_index = in[i].max_index = my_id;
650  }
651 
652  int ierr = MPI_Allreduce(
653  in.data(), result.data(), my_values.size(), type, op, mpi_communicator);
654  AssertThrowMPI(ierr);
655 
656  for (auto &r : result)
657  r.avg = r.sum / numproc;
658  }
659 
660 
661 #else
662 
663  unsigned int
664  n_mpi_processes(const MPI_Comm)
665  {
666  return 1;
667  }
668 
669 
670 
671  unsigned int
672  this_mpi_process(const MPI_Comm)
673  {
674  return 0;
675  }
676 
677 
678 
679  std::vector<unsigned int>
680  mpi_processes_within_communicator(const MPI_Comm, const MPI_Comm)
681  {
682  return std::vector<unsigned int>{0};
683  }
684 
685 
686 
687  std::vector<IndexSet>
689  const MPI_Comm /*comm*/,
690  const types::global_dof_index locally_owned_size)
691  {
692  return std::vector<IndexSet>(1, complete_index_set(locally_owned_size));
693  }
694 
695  IndexSet
697  const MPI_Comm /*comm*/,
698  const types::global_dof_index total_size)
699  {
700  return complete_index_set(total_size);
701  }
702 
703 
704 
705  MPI_Comm
706  duplicate_communicator(const MPI_Comm mpi_communicator)
707  {
708  return mpi_communicator;
709  }
710 
711 
712 
713  void
714  free_communicator(MPI_Comm /*mpi_communicator*/)
715  {}
716 
717 
718 
719  void
720  min_max_avg(const ArrayView<const double> &my_values,
721  const ArrayView<MinMaxAvg> &result,
722  const MPI_Comm)
723  {
724  AssertDimension(my_values.size(), result.size());
725 
726  for (unsigned int i = 0; i < my_values.size(); ++i)
727  {
728  result[i].sum = my_values[i];
729  result[i].avg = my_values[i];
730  result[i].min = my_values[i];
731  result[i].max = my_values[i];
732  result[i].min_index = 0;
733  result[i].max_index = 0;
734  }
735  }
736 
737 #endif
738 
739  /* Force initialization of static struct: */
740  MPI_InitFinalize::Signals MPI_InitFinalize::signals =
741  MPI_InitFinalize::Signals();
742 
743 
745  char **&argv,
746  const unsigned int max_num_threads)
747  {
748  static bool constructor_has_already_run = false;
749  (void)constructor_has_already_run;
750  Assert(constructor_has_already_run == false,
751  ExcMessage("You can only create a single object of this class "
752  "in a program since it initializes the MPI system."));
753 
754 
755  int ierr = 0;
756 #ifdef DEAL_II_WITH_MPI
757  // if we have PETSc, we will initialize it and let it handle MPI.
758  // Otherwise, we will do it.
759  int MPI_has_been_started = 0;
760  ierr = MPI_Initialized(&MPI_has_been_started);
761  AssertThrowMPI(ierr);
762  AssertThrow(MPI_has_been_started == 0,
763  ExcMessage("MPI error. You can only start MPI once!"));
764 
765  int provided;
766  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
767  // we might use several threads but never call two MPI functions at the
768  // same time. For an explanation see on why we do this see
769  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
770  int wanted = MPI_THREAD_SERIALIZED;
771  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
772  AssertThrowMPI(ierr);
773 
774  // disable for now because at least some implementations always return
775  // MPI_THREAD_SINGLE.
776  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
777  // ExcMessage("MPI reports that we are not allowed to use multiple
778  // threads."));
779 #else
780  // make sure the compiler doesn't warn about these variables
781  (void)argc;
782  (void)argv;
783  (void)ierr;
784 #endif
785 
786  // Initialize Kokkos
787  {
788  // argv has argc+1 elements and the last one is a nullptr. For appending
789  // one element we thus create a new argv by copying the first argc
790  // elements, append the new option, and then a nullptr.
791  //
792  // We do get in trouble, though, if a user program is called with
793  // '--help' as a command line argument. This '--help' gets passed on to
794  // Kokkos, which promptly responds with a lengthy message that the user
795  // likely did not intend. As a consequence, filter out this specific
796  // flag.
797  std::vector<char *> argv_new;
798  for (auto *const arg : make_array_view(&argv[0], &argv[0] + argc))
799  if (strcmp(arg, "--help") != 0)
800  argv_new.push_back(arg);
801 
802  std::stringstream threads_flag;
803 #if KOKKOS_VERSION >= 30700
804  threads_flag << "--kokkos-num-threads=" << MultithreadInfo::n_threads();
805 #else
806  threads_flag << "--kokkos-threads=" << MultithreadInfo::n_threads();
807 #endif
808  const std::string threads_flag_string = threads_flag.str();
809  argv_new.push_back(const_cast<char *>(threads_flag_string.c_str()));
810  argv_new.push_back(nullptr);
811 
812  // The first argument in Kokkos::initialize is of type int&. Hence, we
813  // need to define a new variable to pass to it (instead of using argc+1
814  // inline).
815  int argc_new = argv_new.size() - 1;
816  Kokkos::initialize(argc_new, argv_new.data());
817  }
818 
819  // we are allowed to call MPI_Init ourselves and PETScInitialize will
820  // detect this. This allows us to use MPI_Init_thread instead.
821 #ifdef DEAL_II_WITH_PETSC
822  PetscErrorCode pierr;
823 # ifdef DEAL_II_WITH_SLEPC
824  // Initialize SLEPc (with PETSc):
825  finalize_petscslepc = SlepcInitializeCalled ? false : true;
826  pierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
828 # else
829  // or just initialize PETSc alone:
830  finalize_petscslepc = PetscInitializeCalled ? false : true;
831  pierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
832  AssertThrow(pierr == 0, ExcPETScError(pierr));
833 # endif
834 
835  // Disable PETSc exception handling. This just prints a large wall
836  // of text that is not particularly helpful for what we do:
837  pierr = PetscPopSignalHandler();
838  AssertThrow(pierr == 0, ExcPETScError(pierr));
839 #endif
840 
841  // Initialize zoltan
842 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
843  float version;
844  Zoltan_Initialize(argc, argv, &version);
845 #endif
846 
847 #ifdef DEAL_II_WITH_P4EST
848  // Initialize p4est and libsc components
849 # if DEAL_II_P4EST_VERSION_GTE(2, 5, 0, 0)
850  // This feature is broken in version 2.0.0 for calls to
851  // MPI_Comm_create_group (see cburstedde/p4est#30).
852  // Disabling it leads to more verbose p4est error messages
853  // which should be fine.
854  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
855 # endif
856  p4est_init(nullptr, SC_LP_SILENT);
857 #endif
858 
859  constructor_has_already_run = true;
860 
861 
862  // Now also see how many threads we'd like to run
863  if (max_num_threads != numbers::invalid_unsigned_int)
864  {
865  // set maximum number of threads (also respecting the environment
866  // variable that the called function evaluates) based on what the
867  // user asked
868  MultithreadInfo::set_thread_limit(max_num_threads);
869  }
870  else
871  // user wants automatic choice
872  {
873 #ifdef DEAL_II_WITH_MPI
874  // we need to figure out how many MPI processes there are on the
875  // current node, as well as how many CPU cores we have. for the
876  // first task, check what get_hostname() returns and then do an
877  // allgather so each processor gets the answer
878  //
879  // in calculating the length of the string, don't forget the
880  // terminating \0 on C-style strings
881  const std::string hostname = Utilities::System::get_hostname();
882  const unsigned int max_hostname_size =
883  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
884  std::vector<char> hostname_array(max_hostname_size);
885  std::copy(hostname.c_str(),
886  hostname.c_str() + hostname.size() + 1,
887  hostname_array.begin());
888 
889  std::vector<char> all_hostnames(max_hostname_size *
890  MPI::n_mpi_processes(MPI_COMM_WORLD));
891  const int ierr = MPI_Allgather(hostname_array.data(),
892  max_hostname_size,
893  MPI_CHAR,
894  all_hostnames.data(),
895  max_hostname_size,
896  MPI_CHAR,
897  MPI_COMM_WORLD);
898  AssertThrowMPI(ierr);
899 
900  // search how often our own hostname appears and the how-manyth
901  // instance the current process represents
902  unsigned int n_local_processes = 0;
903  unsigned int nth_process_on_host = 0;
904  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
905  ++i)
906  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
907  hostname)
908  {
909  ++n_local_processes;
910  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
911  ++nth_process_on_host;
912  }
913  Assert(nth_process_on_host > 0, ExcInternalError());
914 
915 
916  // compute how many cores each process gets. if the number does not
917  // divide evenly, then we get one more core if we are among the
918  // first few processes
919  //
920  // if the number would be zero, round up to one since every process
921  // needs to have at least one thread
922  const unsigned int n_threads =
923  std::max(MultithreadInfo::n_cores() / n_local_processes +
924  (nth_process_on_host <=
925  MultithreadInfo::n_cores() % n_local_processes ?
926  1 :
927  0),
928  1U);
929 #else
930  const unsigned int n_threads = MultithreadInfo::n_cores();
931 #endif
932 
933  // finally set this number of threads
935  }
936 
937  // As a final step call the at_mpi_init() signal handler.
939  }
940 
941 
942 
943  void
945  {
946  // insert if it is not in the set already:
947  requests.insert(&request);
948  }
949 
950 
951 
952  void
954  {
955  Assert(
956  requests.find(&request) != requests.end(),
957  ExcMessage(
958  "You tried to call unregister_request() with an invalid request."));
959 
960  requests.erase(&request);
961  }
962 
963 
964 
965  std::set<MPI_Request *> MPI_InitFinalize::requests;
966 
967 
968 
970  {
971  // First, call the at_mpi_finalize() signal handler.
973 
974  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
975  // are no longer used at this point. this is relevant because the static
976  // object destructors run for these vectors at the end of the program
977  // would run after MPI_Finalize is called, leading to errors
978 
979 #ifdef DEAL_II_WITH_MPI
980  // Before exiting, wait for nonblocking communication to complete:
981  for (auto *request : requests)
982  {
983  const int ierr = MPI_Wait(request, MPI_STATUS_IGNORE);
984  AssertThrowMPI(ierr);
985  }
986 
987  // Start with deal.II MPI vectors and delete vectors from the pools:
989  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
991  release_unused_memory();
993  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
995  release_unused_memory();
996 
997  // Next with Trilinos:
998 # ifdef DEAL_II_WITH_TRILINOS
1000  TrilinosWrappers::MPI::Vector>::release_unused_memory();
1002  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
1003 # endif
1004 #endif
1005 
1006 
1007  // Now deal with PETSc (with or without MPI). Only delete the vectors if
1008  // finalize hasn't been called yet, otherwise this will lead to errors.
1009 #ifdef DEAL_II_WITH_PETSC
1010  if (!PetscFinalizeCalled)
1011  {
1013  PETScWrappers::MPI::Vector>::release_unused_memory();
1015  PETScWrappers::MPI::BlockVector>::release_unused_memory();
1016  }
1017 # ifdef DEAL_II_WITH_SLEPC
1018  // and now end SLEPc with PETSc if we did so
1019  if (finalize_petscslepc)
1020  {
1021  PetscErrorCode ierr = SlepcFinalize();
1022  AssertThrow(ierr == 0,
1024  }
1025 # else
1026  // or just end PETSc if we did so
1027  if (finalize_petscslepc)
1028  {
1029  PetscErrorCode ierr = PetscFinalize();
1030  AssertThrow(ierr == 0, ExcPETScError(ierr));
1031  }
1032 # endif
1033 #endif
1034 
1035 #ifdef DEAL_II_WITH_P4EST
1036  // now end p4est and libsc
1037  // Note: p4est has no finalize function
1038  sc_finalize();
1039 #endif
1040 
1041 
1042  // Finalize Kokkos
1043  Kokkos::finalize();
1044 
1045  // only MPI_Finalize if we are running with MPI. We also need to do this
1046  // when running PETSc, because we initialize MPI ourselves before
1047  // calling PetscInitialize
1048 #ifdef DEAL_II_WITH_MPI
1049  if (job_supports_mpi() == true)
1050  {
1051 # if __cpp_lib_uncaught_exceptions >= 201411
1052  // std::uncaught_exception() is deprecated in c++17
1053  if (std::uncaught_exceptions() > 0)
1054 # else
1055  if (std::uncaught_exception() == true)
1056 # endif
1057  {
1058  // do not try to call MPI_Finalize to avoid a deadlock.
1059  }
1060  else
1061  {
1062  const int ierr = MPI_Finalize();
1063  (void)ierr;
1064  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
1065  }
1066  }
1067 #endif
1068  }
1069 
1070 
1071 
1072  bool
1074  {
1075 #ifdef DEAL_II_WITH_MPI
1076  int MPI_has_been_started = 0;
1077  const int ierr = MPI_Initialized(&MPI_has_been_started);
1078  AssertThrowMPI(ierr);
1079 
1080  return (MPI_has_been_started > 0);
1081 #else
1082  return false;
1083 #endif
1084  }
1085 
1086 
1087 
1088  std::vector<unsigned int>
1089  compute_index_owner(const IndexSet &owned_indices,
1090  const IndexSet &indices_to_look_up,
1091  const MPI_Comm comm)
1092  {
1093  Assert(owned_indices.size() == indices_to_look_up.size(),
1094  ExcMessage("IndexSets have to have the same sizes."));
1095 
1096  Assert(
1097  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1098  ExcMessage("IndexSets have to have the same size on all processes."));
1099 
1100  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1101 
1102  // Step 1: setup dictionary
1103  // The input owned_indices can be partitioned arbitrarily. In the
1104  // dictionary, the index set is statically repartitioned among the
1105  // processes again and extended with information with the actual owner
1106  // of that the index.
1108  owned_indices, indices_to_look_up, comm, owning_ranks);
1109 
1110  // Step 2: read dictionary
1111  // Communicate with the process who owns the index in the static
1112  // partition (i.e. in the dictionary). This process returns the actual
1113  // owner of the index.
1115  std::vector<
1116  std::pair<types::global_dof_index, types::global_dof_index>>,
1117  std::vector<unsigned int>>
1118  consensus_algorithm;
1119  consensus_algorithm.run(process, comm);
1120 
1121  return owning_ranks;
1122  }
1123 
1124 
1125 
1126  namespace internal
1127  {
1128  namespace CollectiveMutexImplementation
1129  {
1134  void
1136  {
1137 #ifdef DEAL_II_WITH_MPI
1138 # if __cpp_lib_uncaught_exceptions >= 201411
1139  // std::uncaught_exception() is deprecated in c++17
1140  if (std::uncaught_exceptions() != 0)
1141 # else
1142  if (std::uncaught_exception() == true)
1143 # endif
1144  {
1145  std::cerr
1146  << "---------------------------------------------------------\n"
1147  << "An exception was thrown inside a section of the program\n"
1148  << "guarded by a CollectiveMutex.\n"
1149  << "Because a CollectiveMutex guards critical communication\n"
1150  << "handling the exception would likely\n"
1151  << "deadlock because only the current process is aware of the\n"
1152  << "exception. To prevent this deadlock, the program will be\n"
1153  << "aborted.\n"
1154  << "---------------------------------------------------------"
1155  << std::endl;
1156 
1157  MPI_Abort(MPI_COMM_WORLD, 1);
1158  }
1159 #endif
1160  }
1161  } // namespace CollectiveMutexImplementation
1162  } // namespace internal
1163 
1164 
1165 
1167  : locked(false)
1168  , request(MPI_REQUEST_NULL)
1169  {
1171  }
1172 
1173 
1174 
1176  {
1177  // First check if this destructor is called during exception handling
1178  // if so, abort.
1180 
1181  Assert(
1182  !locked,
1183  ExcMessage(
1184  "Error: MPI::CollectiveMutex is still locked while being destroyed!"));
1185 
1187  }
1188 
1189 
1190 
1191  void
1192  CollectiveMutex::lock(const MPI_Comm comm)
1193  {
1194  (void)comm;
1195 
1196  Assert(
1197  !locked,
1198  ExcMessage(
1199  "Error: MPI::CollectiveMutex needs to be unlocked before lock()"));
1200 
1201 #ifdef DEAL_II_WITH_MPI
1202 
1203  if (job_supports_mpi())
1204  {
1205  // TODO: For now, we implement this mutex with a blocking barrier in
1206  // the lock and unlock. It needs to be tested, if we can move to a
1207  // nonblocking barrier (code disabled below).
1208 
1209  const int ierr = MPI_Barrier(comm);
1210  AssertThrowMPI(ierr);
1211 
1212 # if 0
1213  // wait for non-blocking barrier to finish. This is a noop the
1214  // first time we lock().
1215  const int ierr = MPI_Wait(&request, MPI_STATUS_IGNORE);
1216  AssertThrowMPI(ierr);
1217 # else
1218  // nothing to do as blocking barrier already completed
1219 # endif
1220  }
1221 #endif
1222 
1223  locked = true;
1224  }
1225 
1226 
1227 
1228  void
1230  {
1231  (void)comm;
1232 
1233  // First check if this function is called during exception handling
1234  // if so, abort. This can happen if a ScopedLock is destroyed.
1236 
1237  Assert(
1238  locked,
1239  ExcMessage(
1240  "Error: MPI::CollectiveMutex needs to be locked before unlock()"));
1241 
1242 #ifdef DEAL_II_WITH_MPI
1243 
1244  if (job_supports_mpi())
1245  {
1246  // TODO: For now, we implement this mutex with a blocking barrier
1247  // in the lock and unlock. It needs to be tested, if we can move
1248  // to a nonblocking barrier (code disabled below):
1249 # if 0
1250  const int ierr = MPI_Ibarrier(comm, &request);
1251  AssertThrowMPI(ierr);
1252 # else
1253  const int ierr = MPI_Barrier(comm);
1254  AssertThrowMPI(ierr);
1255 # endif
1256  }
1257 #endif
1258 
1259  locked = false;
1260  }
1261 
1262 
1263 #ifndef DOXYGEN
1264  // explicit instantiations
1265 
1266  // booleans aren't in MPI_SCALARS
1267  template bool
1268  reduce(const bool &,
1269  const MPI_Comm,
1270  const std::function<bool(const bool &, const bool &)> &,
1271  const unsigned int);
1272 
1273  template std::vector<bool>
1274  reduce(const std::vector<bool> &,
1275  const MPI_Comm,
1276  const std::function<std::vector<bool>(const std::vector<bool> &,
1277  const std::vector<bool> &)> &,
1278  const unsigned int);
1279 
1280  template bool
1281  all_reduce(const bool &,
1282  const MPI_Comm,
1283  const std::function<bool(const bool &, const bool &)> &);
1284 
1285  template std::vector<bool>
1286  all_reduce(
1287  const std::vector<bool> &,
1288  const MPI_Comm,
1289  const std::function<std::vector<bool>(const std::vector<bool> &,
1290  const std::vector<bool> &)> &);
1291 
1292  // We need an explicit instantiation of this for the same reason as the
1293  // other types described in mpi.inst.in
1294  template void
1295  internal::all_reduce<bool>(const MPI_Op &,
1296  const ArrayView<const bool> &,
1297  const MPI_Comm,
1298  const ArrayView<bool> &);
1299 
1300 
1301  template bool
1302  logical_or<bool>(const bool &, const MPI_Comm);
1303 
1304 
1305  template void
1306  logical_or<bool>(const ArrayView<const bool> &,
1307  const MPI_Comm,
1308  const ArrayView<bool> &);
1309 
1310 
1311  template std::vector<unsigned int>
1312  compute_set_union(const std::vector<unsigned int> &vec,
1313  const MPI_Comm comm);
1314 
1315 
1316  template std::set<unsigned int>
1317  compute_set_union(const std::set<unsigned int> &set, const MPI_Comm comm);
1318 #endif
1319 
1320 #include "mpi.inst"
1321  } // end of namespace MPI
1322 } // end of namespace Utilities
1323 
ArrayView< typename std::remove_reference< typename std::iterator_traits< Iterator >::reference >::type, MemorySpaceType > make_array_view(const Iterator begin, const Iterator end)
Definition: array_view.h:838
value_type * data() const noexcept
Definition: array_view.h:550
std::size_t size() const
Definition: array_view.h:573
size_type size() const
Definition: index_set.h:1696
size_type n_elements() const
Definition: index_set.h:1854
void add_range(const size_type begin, const size_type end)
Definition: index_set.h:1723
types::global_dof_index size_type
Definition: index_set.h:77
static unsigned int n_cores()
static unsigned int n_threads()
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
void lock(const MPI_Comm comm)
Definition: mpi.cc:1192
void unlock(const MPI_Comm comm)
Definition: mpi.cc:1229
virtual std::vector< unsigned int > run(const std::vector< unsigned int > &targets, const std::function< RequestType(const unsigned int)> &create_request, const std::function< AnswerType(const unsigned int, const RequestType &)> &answer_request, const std::function< void(const unsigned int, const AnswerType &)> &process_answer, const MPI_Comm comm) override
static void unregister_request(MPI_Request &request)
Definition: mpi.cc:953
static std::set< MPI_Request * > requests
Definition: mpi.h:1207
static Signals signals
Definition: mpi.h:1201
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:744
static void register_request(MPI_Request &request)
Definition: mpi.cc:944
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:477
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:478
__global__ void set(Number *val, const Number s, const size_type N)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcSLEPcError(int arg1)
#define Assert(cond, exc)
Definition: exceptions.h:1616
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1789
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1916
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1658
#define AssertIndexRange(index, range)
Definition: exceptions.h:1857
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
Definition: exceptions.h:1705
IndexSet complete_index_set(const IndexSet::size_type N)
Definition: index_set.h:1124
VectorType::value_type * begin(VectorType &V)
VectorType::value_type * end(VectorType &V)
int Type_contiguous_c(MPI_Count count, MPI_Datatype oldtype, MPI_Datatype *newtype)
@ compute_point_to_point_communication_pattern
Utilities::MPI::compute_point_to_point_communication_pattern()
Definition: mpi_tags.h:57
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm comm, const types::global_dof_index locally_owned_size)
Definition: mpi.cc:234
template const MPI_Datatype mpi_type_id_for_type< int >
Definition: mpi.cc:108
std::vector< T > compute_set_union(const std::vector< T > &vec, const MPI_Comm comm)
std::vector< T > all_gather(const MPI_Comm comm, const T &object_to_send)
template const MPI_Datatype mpi_type_id_for_type< unsigned char >
Definition: mpi.cc:110
std::unique_ptr< MPI_Datatype, void(*)(MPI_Datatype *)> create_mpi_data_type_n_bytes(const std::size_t n_bytes)
Definition: mpi.cc:278
template const MPI_Datatype mpi_type_id_for_type< signed char >
Definition: mpi.cc:106
template const MPI_Datatype mpi_type_id_for_type< bool >
Definition: mpi.cc:104
template const MPI_Datatype mpi_type_id_for_type< unsigned short >
Definition: mpi.cc:111
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition: mpi.cc:149
T max(const T &t, const MPI_Comm mpi_communicator)
T min(const T &t, const MPI_Comm mpi_communicator)
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm comm)
Definition: mpi.cc:1089
template const MPI_Datatype mpi_type_id_for_type< short >
Definition: mpi.cc:107
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition: mpi.cc:164
T all_reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner)
IndexSet create_evenly_distributed_partitioning(const MPI_Comm comm, const types::global_dof_index total_size)
Definition: mpi.cc:263
template const MPI_Datatype mpi_type_id_for_type< double >
Definition: mpi.cc:115
template const MPI_Datatype mpi_type_id_for_type< char >
Definition: mpi.cc:105
template const MPI_Datatype mpi_type_id_for_type< long double >
Definition: mpi.cc:116
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:321
template const MPI_Datatype mpi_type_id_for_type< unsigned long long int >
Definition: mpi.cc:113
template const MPI_Datatype mpi_type_id_for_type< unsigned long int >
Definition: mpi.cc:112
int create_group(const MPI_Comm comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:221
MPI_Comm duplicate_communicator(const MPI_Comm mpi_communicator)
Definition: mpi.cc:200
T reduce(const T &local_value, const MPI_Comm comm, const std::function< T(const T &, const T &)> &combiner, const unsigned int root_process=0)
bool job_supports_mpi()
Definition: mpi.cc:1073
template const MPI_Datatype mpi_type_id_for_type< long int >
Definition: mpi.cc:109
template const MPI_Datatype mpi_type_id_for_type< float >
Definition: mpi.cc:114
void free_communicator(MPI_Comm mpi_communicator)
Definition: mpi.cc:211
std::vector< unsigned int > mpi_processes_within_communicator(const MPI_Comm comm_large, const MPI_Comm comm_small)
Definition: mpi.cc:180
unsigned int compute_n_point_to_point_communications(const MPI_Comm mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:436
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm mpi_communicator)
Definition: mpi.cc:123
std::string get_hostname()
Definition: utilities.cc:999
IndexSet create_evenly_distributed_partitioning(const unsigned int my_partition_id, const unsigned int n_partitions, const types::global_dof_index total_size)
Definition: mpi.cc:79
void copy(const T *begin, const T *end, U *dest)
static const unsigned int invalid_unsigned_int
Definition: types.h:213
Definition: types.h:33
unsigned int global_dof_index
Definition: types.h:82
****code *  *  MPI_Finalize()
boost::signals2::signal< void()> at_mpi_init
Definition: mpi.h:1190
boost::signals2::signal< void()> at_mpi_finalize
Definition: mpi.h:1198
const MPI_Comm comm