Reference documentation for deal.II version Git a5ed68a04a 2019-09-22 06:50:58 -0600
\(\newcommand{\dealcoloneq}{\mathrel{\vcenter{:}}=}\)
mpi.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2005 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/exceptions.h>
18 #include <deal.II/base/index_set.h>
19 #include <deal.II/base/mpi.h>
20 #include <deal.II/base/mpi.templates.h>
21 #include <deal.II/base/mpi_compute_index_owner_internal.h>
22 #include <deal.II/base/multithread_info.h>
23 #include <deal.II/base/utilities.h>
24 
25 #include <deal.II/lac/la_parallel_block_vector.h>
26 #include <deal.II/lac/la_parallel_vector.h>
27 #include <deal.II/lac/vector_memory.h>
28 
29 #include <iostream>
30 #include <numeric>
31 #include <set>
32 #include <vector>
33 
34 #ifdef DEAL_II_WITH_TRILINOS
35 # ifdef DEAL_II_WITH_MPI
36 # include <deal.II/lac/trilinos_parallel_block_vector.h>
37 # include <deal.II/lac/trilinos_vector.h>
38 # include <deal.II/lac/vector_memory.h>
39 
40 # include <Epetra_MpiComm.h>
41 # endif
42 #endif
43 
44 #ifdef DEAL_II_WITH_PETSC
45 # include <deal.II/lac/petsc_block_vector.h>
46 # include <deal.II/lac/petsc_vector.h>
47 
48 # include <petscsys.h>
49 #endif
50 
51 #ifdef DEAL_II_WITH_SLEPC
52 # include <deal.II/lac/slepc_solver.h>
53 
54 # include <slepcsys.h>
55 #endif
56 
57 #ifdef DEAL_II_WITH_P4EST
58 # include <p4est_bits.h>
59 #endif
60 
61 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
62 # include <zoltan_cpp.h>
63 #endif
64 
65 DEAL_II_NAMESPACE_OPEN
66 
67 
68 namespace Utilities
69 {
70  namespace MPI
71  {
72 #ifdef DEAL_II_WITH_MPI
73  unsigned int
74  n_mpi_processes(const MPI_Comm &mpi_communicator)
75  {
76  int n_jobs = 1;
77  const int ierr = MPI_Comm_size(mpi_communicator, &n_jobs);
78  AssertThrowMPI(ierr);
79 
80  return n_jobs;
81  }
82 
83 
84  unsigned int
85  this_mpi_process(const MPI_Comm &mpi_communicator)
86  {
87  int rank = 0;
88  const int ierr = MPI_Comm_rank(mpi_communicator, &rank);
89  AssertThrowMPI(ierr);
90 
91  return rank;
92  }
93 
94 
95  MPI_Comm
96  duplicate_communicator(const MPI_Comm &mpi_communicator)
97  {
98  MPI_Comm new_communicator;
99  const int ierr = MPI_Comm_dup(mpi_communicator, &new_communicator);
100  AssertThrowMPI(ierr);
101  return new_communicator;
102  }
103 
104 
105 
106  int
107  create_group(const MPI_Comm & comm,
108  const MPI_Group &group,
109  const int tag,
110  MPI_Comm * new_comm)
111  {
112 # if DEAL_II_MPI_VERSION_GTE(3, 0)
113  return MPI_Comm_create_group(comm, group, tag, new_comm);
114 # else
115  int rank;
116  int ierr = MPI_Comm_rank(comm, &rank);
117  AssertThrowMPI(ierr);
118 
119  int grp_rank;
120  ierr = MPI_Group_rank(group, &grp_rank);
121  AssertThrowMPI(ierr);
122  if (grp_rank == MPI_UNDEFINED)
123  {
124  *new_comm = MPI_COMM_NULL;
125  return MPI_SUCCESS;
126  }
127 
128  int grp_size;
129  ierr = MPI_Group_size(group, &grp_size);
130  AssertThrowMPI(ierr);
131 
132  ierr = MPI_Comm_dup(MPI_COMM_SELF, new_comm);
133  AssertThrowMPI(ierr);
134 
135  MPI_Group parent_grp;
136  ierr = MPI_Comm_group(comm, &parent_grp);
137  AssertThrowMPI(ierr);
138 
139  std::vector<int> pids(grp_size);
140  std::vector<int> grp_pids(grp_size);
141  std::iota(grp_pids.begin(), grp_pids.end(), 0);
142  ierr = MPI_Group_translate_ranks(
143  group, grp_size, grp_pids.data(), parent_grp, pids.data());
144  AssertThrowMPI(ierr);
145  ierr = MPI_Group_free(&parent_grp);
146  AssertThrowMPI(ierr);
147 
148  MPI_Comm comm_old = *new_comm;
149  MPI_Comm ic;
150  for (int merge_sz = 1; merge_sz < grp_size; merge_sz *= 2)
151  {
152  const int gid = grp_rank / merge_sz;
153  comm_old = *new_comm;
154  if (gid % 2 == 0)
155  {
156  if ((gid + 1) * merge_sz < grp_size)
157  {
158  ierr = (MPI_Intercomm_create(
159  *new_comm, 0, comm, pids[(gid + 1) * merge_sz], tag, &ic));
160  AssertThrowMPI(ierr);
161  ierr = MPI_Intercomm_merge(ic, 0 /* LOW */, new_comm);
162  AssertThrowMPI(ierr);
163  }
164  }
165  else
166  {
167  ierr = MPI_Intercomm_create(
168  *new_comm, 0, comm, pids[(gid - 1) * merge_sz], tag, &ic);
169  AssertThrowMPI(ierr);
170  ierr = MPI_Intercomm_merge(ic, 1 /* HIGH */, new_comm);
171  AssertThrowMPI(ierr);
172  }
173  if (*new_comm != comm_old)
174  {
175  ierr = MPI_Comm_free(&ic);
176  AssertThrowMPI(ierr);
177  ierr = MPI_Comm_free(&comm_old);
178  AssertThrowMPI(ierr);
179  }
180  }
181 
182  return MPI_SUCCESS;
183 # endif
184  }
185 
186 
187 
188  std::vector<IndexSet>
189  create_ascending_partitioning(const MPI_Comm & comm,
190  const IndexSet::size_type &local_size)
191  {
192  const unsigned int n_proc = n_mpi_processes(comm);
193  const std::vector<IndexSet::size_type> sizes =
194  all_gather(comm, local_size);
195  const auto total_size =
196  std::accumulate(sizes.begin(), sizes.end(), IndexSet::size_type(0));
197 
198  std::vector<IndexSet> res(n_proc, IndexSet(total_size));
199 
200  IndexSet::size_type begin = 0;
201  for (unsigned int i = 0; i < n_proc; ++i)
202  {
203  res[i].add_range(begin, begin + sizes[i]);
204  begin = begin + sizes[i];
205  }
206 
207  return res;
208  }
209 
210 
211 
212  std::vector<unsigned int>
214  const MPI_Comm & mpi_comm,
215  const std::vector<unsigned int> &destinations)
216  {
217  const unsigned int myid = Utilities::MPI::this_mpi_process(mpi_comm);
218  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
219 
220  for (const unsigned int destination : destinations)
221  {
222  (void)destination;
223  Assert(destination < n_procs, ExcIndexRange(destination, 0, n_procs));
224  Assert(destination != myid,
225  ExcMessage(
226  "There is no point in communicating with ourselves."));
227  }
228 
229 # if DEAL_II_MPI_VERSION_GTE(2, 2)
230  // Calculate the number of messages to send to each process
231  std::vector<unsigned int> dest_vector(n_procs);
232  for (const auto &el : destinations)
233  ++dest_vector[el];
234 
235  // Find how many processes will send to this one
236  // by reducing with sum and then scattering the
237  // results over all processes
238  unsigned int n_recv_from;
239  const int ierr = MPI_Reduce_scatter_block(
240  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
241 
242  AssertThrowMPI(ierr);
243 
244  // Send myid to every process in `destinations` vector...
245  std::vector<MPI_Request> send_requests(destinations.size());
246  for (const auto &el : destinations)
247  MPI_Isend(&myid,
248  1,
249  MPI_UNSIGNED,
250  el,
251  32766,
252  mpi_comm,
253  send_requests.data() + (&el - destinations.data()));
254 
255  // if no one to receive from, return an empty vector
256  if (n_recv_from == 0)
257  return std::vector<unsigned int>();
258 
259  // ...otherwise receive `n_recv_from` times from the processes
260  // who communicate with this one. Store the obtained id's
261  // in the resulting vector
262  std::vector<unsigned int> origins(n_recv_from);
263  for (auto &el : origins)
264  MPI_Recv(&el,
265  1,
266  MPI_UNSIGNED,
267  MPI_ANY_SOURCE,
268  32766,
269  mpi_comm,
270  MPI_STATUS_IGNORE);
271 
272  MPI_Waitall(destinations.size(),
273  send_requests.data(),
274  MPI_STATUSES_IGNORE);
275  return origins;
276 # else
277  // let all processors communicate the maximal number of destinations
278  // they have
279  const unsigned int max_n_destinations =
280  Utilities::MPI::max(destinations.size(), mpi_comm);
281 
282  if (max_n_destinations == 0)
283  // all processes have nothing to send/receive:
284  return std::vector<unsigned int>();
285 
286  // now that we know the number of data packets every processor wants to
287  // send, set up a buffer with the maximal size and copy our destinations
288  // in there, padded with -1's
289  std::vector<unsigned int> my_destinations(max_n_destinations,
291  std::copy(destinations.begin(),
292  destinations.end(),
293  my_destinations.begin());
294 
295  // now exchange these (we could communicate less data if we used
296  // MPI_Allgatherv, but we'd have to communicate my_n_destinations to all
297  // processors in this case, which is more expensive than the reduction
298  // operation above in MPI_Allreduce)
299  std::vector<unsigned int> all_destinations(max_n_destinations * n_procs);
300  const int ierr = MPI_Allgather(my_destinations.data(),
301  max_n_destinations,
302  MPI_UNSIGNED,
303  all_destinations.data(),
304  max_n_destinations,
305  MPI_UNSIGNED,
306  mpi_comm);
307  AssertThrowMPI(ierr);
308 
309  // now we know who is going to communicate with whom. collect who is
310  // going to communicate with us!
311  std::vector<unsigned int> origins;
312  for (unsigned int i = 0; i < n_procs; ++i)
313  for (unsigned int j = 0; j < max_n_destinations; ++j)
314  if (all_destinations[i * max_n_destinations + j] == myid)
315  origins.push_back(i);
316  else if (all_destinations[i * max_n_destinations + j] ==
318  break;
319 
320  return origins;
321 # endif
322  }
323 
324 
325 
326  unsigned int
328  const MPI_Comm & mpi_comm,
329  const std::vector<unsigned int> &destinations)
330  {
331  const unsigned int n_procs = Utilities::MPI::n_mpi_processes(mpi_comm);
332 
333  for (const unsigned int destination : destinations)
334  {
335  (void)destination;
336  Assert(destination < n_procs, ExcIndexRange(destination, 0, n_procs));
337  Assert(destination != Utilities::MPI::this_mpi_process(mpi_comm),
338  ExcMessage(
339  "There is no point in communicating with ourselves."));
340  }
341 
342  // Calculate the number of messages to send to each process
343  std::vector<unsigned int> dest_vector(n_procs);
344  for (const auto &el : destinations)
345  ++dest_vector[el];
346 
347 # if DEAL_II_MPI_VERSION_GTE(2, 2)
348  // Find out how many processes will send to this one
349  // MPI_Reduce_scatter(_block) does exactly this
350  unsigned int n_recv_from = 0;
351 
352  const int ierr = MPI_Reduce_scatter_block(
353  dest_vector.data(), &n_recv_from, 1, MPI_UNSIGNED, MPI_SUM, mpi_comm);
354 
355  AssertThrowMPI(ierr);
356 
357  return n_recv_from;
358 # else
359  // Find out how many processes will send to this one
360  // by reducing with sum and then scattering the
361  // results over all processes
362  std::vector<unsigned int> buffer(dest_vector.size());
363  unsigned int n_recv_from = 0;
364 
365  MPI_Reduce(dest_vector.data(),
366  buffer.data(),
367  dest_vector.size(),
368  MPI_UNSIGNED,
369  MPI_SUM,
370  0,
371  mpi_comm);
372  MPI_Scatter(buffer.data(),
373  1,
374  MPI_UNSIGNED,
375  &n_recv_from,
376  1,
377  MPI_UNSIGNED,
378  0,
379  mpi_comm);
380 
381  return n_recv_from;
382 # endif
383  }
384 
385 
386 
387  namespace
388  {
389  // custom MIP_Op for calculate_collective_mpi_min_max_avg
390  void
391  max_reduce(const void *in_lhs_,
392  void * inout_rhs_,
393  int * len,
394  MPI_Datatype *)
395  {
396  (void)len;
397  const MinMaxAvg *in_lhs = static_cast<const MinMaxAvg *>(in_lhs_);
398  MinMaxAvg * inout_rhs = static_cast<MinMaxAvg *>(inout_rhs_);
399 
400  Assert(*len == 1, ExcInternalError());
401 
402  inout_rhs->sum += in_lhs->sum;
403  if (inout_rhs->min > in_lhs->min)
404  {
405  inout_rhs->min = in_lhs->min;
406  inout_rhs->min_index = in_lhs->min_index;
407  }
408  else if (inout_rhs->min == in_lhs->min)
409  {
410  // choose lower cpu index when tied to make operator commutative
411  if (inout_rhs->min_index > in_lhs->min_index)
412  inout_rhs->min_index = in_lhs->min_index;
413  }
414 
415  if (inout_rhs->max < in_lhs->max)
416  {
417  inout_rhs->max = in_lhs->max;
418  inout_rhs->max_index = in_lhs->max_index;
419  }
420  else if (inout_rhs->max == in_lhs->max)
421  {
422  // choose lower cpu index when tied to make operator commutative
423  if (inout_rhs->max_index > in_lhs->max_index)
424  inout_rhs->max_index = in_lhs->max_index;
425  }
426  }
427  } // namespace
428 
429 
430 
431  MinMaxAvg
432  min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
433  {
434  // If MPI was not started, we have a serial computation and cannot run
435  // the other MPI commands
436  if (job_supports_mpi() == false)
437  {
438  MinMaxAvg result;
439  result.sum = my_value;
440  result.avg = my_value;
441  result.min = my_value;
442  result.max = my_value;
443  result.min_index = 0;
444  result.max_index = 0;
445 
446  return result;
447  }
448 
449  // To avoid uninitialized values on some MPI implementations, provide
450  // result with a default value already...
451  MinMaxAvg result = {0.,
452  std::numeric_limits<double>::max(),
453  -std::numeric_limits<double>::max(),
454  0,
455  0,
456  0.};
457 
458  const unsigned int my_id =
459  ::Utilities::MPI::this_mpi_process(mpi_communicator);
460  const unsigned int numproc =
461  ::Utilities::MPI::n_mpi_processes(mpi_communicator);
462 
463  MPI_Op op;
464  int ierr =
465  MPI_Op_create(reinterpret_cast<MPI_User_function *>(&max_reduce),
466  true,
467  &op);
468  AssertThrowMPI(ierr);
469 
470  MinMaxAvg in;
471  in.sum = in.min = in.max = my_value;
472  in.min_index = in.max_index = my_id;
473 
474  MPI_Datatype type;
475  int lengths[] = {3, 2};
476  MPI_Aint displacements[] = {0, offsetof(MinMaxAvg, min_index)};
477  MPI_Datatype types[] = {MPI_DOUBLE, MPI_INT};
478 
479  ierr = MPI_Type_create_struct(2, lengths, displacements, types, &type);
480  AssertThrowMPI(ierr);
481 
482  ierr = MPI_Type_commit(&type);
483  AssertThrowMPI(ierr);
484  ierr = MPI_Allreduce(&in, &result, 1, type, op, mpi_communicator);
485  AssertThrowMPI(ierr);
486 
487  ierr = MPI_Type_free(&type);
488  AssertThrowMPI(ierr);
489 
490  ierr = MPI_Op_free(&op);
491  AssertThrowMPI(ierr);
492 
493  result.avg = result.sum / numproc;
494 
495  return result;
496  }
497 
498 #else
499 
500  unsigned int
501  n_mpi_processes(const MPI_Comm &)
502  {
503  return 1;
504  }
505 
506 
507 
508  unsigned int
509  this_mpi_process(const MPI_Comm &)
510  {
511  return 0;
512  }
513 
514 
515  std::vector<IndexSet>
516  create_ascending_partitioning(const MPI_Comm & /*comm*/,
517  const IndexSet::size_type &local_size)
518  {
519  return std::vector<IndexSet>(1, complete_index_set(local_size));
520  }
521 
522 
523  MPI_Comm
524  duplicate_communicator(const MPI_Comm &mpi_communicator)
525  {
526  return mpi_communicator;
527  }
528 
529 
530 
531  MinMaxAvg
532  min_max_avg(const double my_value, const MPI_Comm &)
533  {
534  MinMaxAvg result;
535 
536  result.sum = my_value;
537  result.avg = my_value;
538  result.min = my_value;
539  result.max = my_value;
540  result.min_index = 0;
541  result.max_index = 0;
542 
543  return result;
544  }
545 
546 #endif
547 
548 
549 
551  char **& argv,
552  const unsigned int max_num_threads)
553  {
554  static bool constructor_has_already_run = false;
555  (void)constructor_has_already_run;
556  Assert(constructor_has_already_run == false,
557  ExcMessage("You can only create a single object of this class "
558  "in a program since it initializes the MPI system."));
559 
560 
561  int ierr = 0;
562 #ifdef DEAL_II_WITH_MPI
563  // if we have PETSc, we will initialize it and let it handle MPI.
564  // Otherwise, we will do it.
565  int MPI_has_been_started = 0;
566  ierr = MPI_Initialized(&MPI_has_been_started);
567  AssertThrowMPI(ierr);
568  AssertThrow(MPI_has_been_started == 0,
569  ExcMessage("MPI error. You can only start MPI once!"));
570 
571  int provided;
572  // this works like ierr = MPI_Init (&argc, &argv); but tells MPI that
573  // we might use several threads but never call two MPI functions at the
574  // same time. For an explanation see on why we do this see
575  // http://www.open-mpi.org/community/lists/users/2010/03/12244.php
576  int wanted = MPI_THREAD_SERIALIZED;
577  ierr = MPI_Init_thread(&argc, &argv, wanted, &provided);
578  AssertThrowMPI(ierr);
579 
580  // disable for now because at least some implementations always return
581  // MPI_THREAD_SINGLE.
582  // Assert(max_num_threads==1 || provided != MPI_THREAD_SINGLE,
583  // ExcMessage("MPI reports that we are not allowed to use multiple
584  // threads."));
585 #else
586  // make sure the compiler doesn't warn about these variables
587  (void)argc;
588  (void)argv;
589  (void)ierr;
590 #endif
591 
592  // we are allowed to call MPI_Init ourselves and PETScInitialize will
593  // detect this. This allows us to use MPI_Init_thread instead.
594 #ifdef DEAL_II_WITH_PETSC
595 # ifdef DEAL_II_WITH_SLEPC
596  // Initialize SLEPc (with PETSc):
597  ierr = SlepcInitialize(&argc, &argv, nullptr, nullptr);
599 # else
600  // or just initialize PETSc alone:
601  ierr = PetscInitialize(&argc, &argv, nullptr, nullptr);
602  AssertThrow(ierr == 0, ExcPETScError(ierr));
603 # endif
604 
605  // Disable PETSc exception handling. This just prints a large wall
606  // of text that is not particularly helpful for what we do:
607  PetscPopSignalHandler();
608 #endif
609 
610  // Initialize zoltan
611 #ifdef DEAL_II_TRILINOS_WITH_ZOLTAN
612  float version;
613  Zoltan_Initialize(argc, argv, &version);
614 #endif
615 
616 #ifdef DEAL_II_WITH_P4EST
617  // Initialize p4est and libsc components
618 # if DEAL_II_P4EST_VERSION_GTE(2, 0, 0, 0)
619 # else
620  // This feature is broken in version 2.0.0 for calls to
621  // MPI_Comm_create_group (see cburstedde/p4est#30).
622  // Disabling it leads to more verbose p4est error messages
623  // which should be fine.
624  sc_init(MPI_COMM_WORLD, 0, 0, nullptr, SC_LP_SILENT);
625 # endif
626  p4est_init(nullptr, SC_LP_SILENT);
627 #endif
628 
629  constructor_has_already_run = true;
630 
631 
632  // Now also see how many threads we'd like to run
633  if (max_num_threads != numbers::invalid_unsigned_int)
634  {
635  // set maximum number of threads (also respecting the environment
636  // variable that the called function evaluates) based on what the
637  // user asked
638  MultithreadInfo::set_thread_limit(max_num_threads);
639  }
640  else
641  // user wants automatic choice
642  {
643 #ifdef DEAL_II_WITH_MPI
644  // we need to figure out how many MPI processes there are on the
645  // current node, as well as how many CPU cores we have. for the
646  // first task, check what get_hostname() returns and then do an
647  // allgather so each processor gets the answer
648  //
649  // in calculating the length of the string, don't forget the
650  // terminating \0 on C-style strings
651  const std::string hostname = Utilities::System::get_hostname();
652  const unsigned int max_hostname_size =
653  Utilities::MPI::max(hostname.size() + 1, MPI_COMM_WORLD);
654  std::vector<char> hostname_array(max_hostname_size);
655  std::copy(hostname.c_str(),
656  hostname.c_str() + hostname.size() + 1,
657  hostname_array.begin());
658 
659  std::vector<char> all_hostnames(max_hostname_size *
660  MPI::n_mpi_processes(MPI_COMM_WORLD));
661  const int ierr = MPI_Allgather(hostname_array.data(),
662  max_hostname_size,
663  MPI_CHAR,
664  all_hostnames.data(),
665  max_hostname_size,
666  MPI_CHAR,
667  MPI_COMM_WORLD);
668  AssertThrowMPI(ierr);
669 
670  // search how often our own hostname appears and the how-manyth
671  // instance the current process represents
672  unsigned int n_local_processes = 0;
673  unsigned int nth_process_on_host = 0;
674  for (unsigned int i = 0; i < MPI::n_mpi_processes(MPI_COMM_WORLD);
675  ++i)
676  if (std::string(all_hostnames.data() + i * max_hostname_size) ==
677  hostname)
678  {
679  ++n_local_processes;
680  if (i <= MPI::this_mpi_process(MPI_COMM_WORLD))
681  ++nth_process_on_host;
682  }
683  Assert(nth_process_on_host > 0, ExcInternalError());
684 
685 
686  // compute how many cores each process gets. if the number does not
687  // divide evenly, then we get one more core if we are among the
688  // first few processes
689  //
690  // if the number would be zero, round up to one since every process
691  // needs to have at least one thread
692  const unsigned int n_threads =
693  std::max(MultithreadInfo::n_cores() / n_local_processes +
694  (nth_process_on_host <=
695  MultithreadInfo::n_cores() % n_local_processes ?
696  1 :
697  0),
698  1U);
699 #else
700  const unsigned int n_threads = MultithreadInfo::n_cores();
701 #endif
702 
703  // finally set this number of threads
705  }
706  }
707 
708 
710  {
711  // make memory pool release all PETSc/Trilinos/MPI-based vectors that
712  // are no longer used at this point. this is relevant because the static
713  // object destructors run for these vectors at the end of the program
714  // would run after MPI_Finalize is called, leading to errors
715 
716 #ifdef DEAL_II_WITH_MPI
717  // Start with the deal.II MPI vectors (need to do this before finalizing
718  // PETSc because it finalizes MPI). Delete vectors from the pools:
720  LinearAlgebra::distributed::Vector<double>>::release_unused_memory();
722  release_unused_memory();
724  LinearAlgebra::distributed::Vector<float>>::release_unused_memory();
726  release_unused_memory();
727 
728  // Next with Trilinos:
729 # if defined(DEAL_II_WITH_TRILINOS)
731  TrilinosWrappers::MPI::Vector>::release_unused_memory();
733  TrilinosWrappers::MPI::BlockVector>::release_unused_memory();
734 # endif
735 #endif
736 
737 
738  // Now deal with PETSc (with or without MPI). Only delete the vectors if
739  // finalize hasn't been called yet, otherwise this will lead to errors.
740 #ifdef DEAL_II_WITH_PETSC
741  if ((PetscInitializeCalled == PETSC_TRUE) &&
742  (PetscFinalizeCalled == PETSC_FALSE))
743  {
745  PETScWrappers::MPI::Vector>::release_unused_memory();
747  PETScWrappers::MPI::BlockVector>::release_unused_memory();
748 
749 # ifdef DEAL_II_WITH_SLEPC
750  // and now end SLEPc (with PETSc)
751  SlepcFinalize();
752 # else
753  // or just end PETSc.
754  PetscFinalize();
755 # endif
756  }
757 #endif
758 
759 // There is a similar issue with CUDA: The destructor of static objects might
760 // run after the CUDA driver is unloaded. Hence, also release all memory
761 // related to CUDA vectors.
762 #ifdef DEAL_II_WITH_CUDA
765  release_unused_memory();
768  release_unused_memory();
769 #endif
770 
771 #ifdef DEAL_II_WITH_P4EST
772  // now end p4est and libsc
773  // Note: p4est has no finalize function
774  sc_finalize();
775 #endif
776 
777 
778  // only MPI_Finalize if we are running with MPI. We also need to do this
779  // when running PETSc, because we initialize MPI ourselves before
780  // calling PetscInitialize
781 #ifdef DEAL_II_WITH_MPI
782  if (job_supports_mpi() == true)
783  {
784 # if __cpp_lib_uncaught_exceptions >= 201411
785  // std::uncaught_exception() is deprecated in c++17
786  if (std::uncaught_exceptions() > 0)
787 # else
788  if (std::uncaught_exception() == true)
789 # endif
790  {
791  std::cerr
792  << "ERROR: Uncaught exception in MPI_InitFinalize on proc "
793  << this_mpi_process(MPI_COMM_WORLD)
794  << ". Skipping MPI_Finalize() to avoid a deadlock."
795  << std::endl;
796  }
797  else
798  {
799  const int ierr = MPI_Finalize();
800  (void)ierr;
801  AssertNothrow(ierr == MPI_SUCCESS, ::ExcMPI(ierr));
802  }
803  }
804 #endif
805  }
806 
807 
808 
809  bool
811  {
812 #ifdef DEAL_II_WITH_MPI
813  int MPI_has_been_started = 0;
814  const int ierr = MPI_Initialized(&MPI_has_been_started);
815  AssertThrowMPI(ierr);
816 
817  return (MPI_has_been_started > 0);
818 #else
819  return false;
820 #endif
821  }
822 
823  template <typename T1, typename T2>
824  void
826  const std::vector<T1> &,
827  std::vector<T2> &)
828  {
829  // noting to do
830  }
831 
832 
833 
834  template <typename T1, typename T2>
835  void
837  std::vector<T1> &)
838  {
839  // noting to do
840  }
841 
842 
843 
844  template <typename T1, typename T2>
845  void
847  std::vector<T2> &)
848  {
849  // noting to do
850  }
851 
852 
853 
854  template <typename T1, typename T2>
855  void
857  const int,
858  const std::vector<T2> &)
859  {
860  // noting to do
861  }
862 
863 
864 
865  template <typename T1, typename T2>
868  const MPI_Comm & comm)
869  : process(process)
870  , comm(comm)
871  , my_rank(this_mpi_process(comm))
872  , n_procs(n_mpi_processes(comm))
873  {}
874 
875 
876 
877  template <typename T1, typename T2>
880  const MPI_Comm & comm)
881  : ConsensusAlgorithm<T1, T2>(process, comm)
882  {}
883 
884 
885 
886  template <typename T1, typename T2>
887  void
889  {
890  // 1) send requests and start receiving the answers
892 
893  // 2) answer requests and check if all requests of this process have been
894  // answered
895  while (!check_own_state())
897 
898  // 3) signal to all other processes that all requests of this process have
899  // been answered
900  signal_finish();
901 
902  // 4) nevertheless, this process has to keep on answering (potential)
903  // incoming requests until all processes have received the
904  // answer to all requests
905  while (!check_global_state())
907 
908  // 5) process the answer to all requests
910  }
911 
912 
913 
914  template <typename T1, typename T2>
915  bool
917  {
918 #ifdef DEAL_II_WITH_MPI
919  int all_receive_requests_are_done;
920  const auto ierr = MPI_Testall(recv_requests.size(),
921  recv_requests.data(),
922  &all_receive_requests_are_done,
923  MPI_STATUSES_IGNORE);
924  AssertThrowMPI(ierr);
925 
926  return all_receive_requests_are_done;
927 #else
928  return true;
929 #endif
930  }
931 
932 
933 
934  template <typename T1, typename T2>
935  void
937  {
938 #ifdef DEAL_II_WITH_MPI
939 # if DEAL_II_MPI_VERSION_GTE(3, 0)
940  const auto ierr = MPI_Ibarrier(this->comm, &barrier_request);
941  AssertThrowMPI(ierr);
942 # else
943  AssertThrow(
944  false,
945  ExcMessage(
946  "ConsensusAlgorithm_NBX uses MPI 3.0 features. You should compile with at least MPI 3.0."));
947 # endif
948 #endif
949  }
950 
951 
952 
953  template <typename T1, typename T2>
954  bool
956  {
957 #ifdef DEAL_II_WITH_MPI
958  int all_ranks_reached_barrier;
959  const auto ierr = MPI_Test(&barrier_request,
960  &all_ranks_reached_barrier,
961  MPI_STATUSES_IGNORE);
962  AssertThrowMPI(ierr);
963  return all_ranks_reached_barrier;
964 #else
965  return true;
966 #endif
967  }
968 
969 
970 
971  template <typename T1, typename T2>
972  void
974  {
975 #ifdef DEAL_II_WITH_MPI
976  // check if there is a request pending
977  MPI_Status status;
978  int request_is_pending;
979  const auto ierr = MPI_Iprobe(
980  MPI_ANY_SOURCE, tag_request, this->comm, &request_is_pending, &status);
981  AssertThrowMPI(ierr);
982 
983  if (request_is_pending) // request is pending
984  {
985  // get rank of requesting process
986  const auto other_rank = status.MPI_SOURCE;
987 
988 # ifdef DEBUG
989  Assert(requesting_processes.find(other_rank) ==
990  requesting_processes.end(),
991  ExcMessage("Process is requesting a second time!"));
992  requesting_processes.insert(other_rank);
993 # endif
994 
995  std::vector<T1> buffer_recv;
996  // get size of of incoming message
997  int number_amount;
998  auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
999  AssertThrowMPI(ierr);
1000 
1001  // allocate memory for incoming message
1002  Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
1003  buffer_recv.resize(number_amount / sizeof(T1));
1004  ierr = MPI_Recv(buffer_recv.data(),
1005  number_amount,
1006  MPI_BYTE,
1007  other_rank,
1008  tag_request,
1009  this->comm,
1010  &status);
1011  AssertThrowMPI(ierr);
1012 
1013  // allocate memory for answer message
1014  request_buffers.emplace_back();
1015  request_requests.emplace_back(new MPI_Request);
1016 
1017  // process request
1018  auto &request_buffer = request_buffers.back();
1019  this->process.process_request(other_rank,
1020  buffer_recv,
1021  request_buffer);
1022 
1023  // start to send answer back
1024  ierr = MPI_Isend(request_buffer.data(),
1025  request_buffer.size() * sizeof(T2),
1026  MPI_BYTE,
1027  other_rank,
1028  tag_delivery,
1029  this->comm,
1030  request_requests.back().get());
1031  AssertThrowMPI(ierr);
1032  }
1033 #endif
1034  }
1035 
1036 
1037 
1038  template <typename T1, typename T2>
1039  void
1041  {
1042 #ifdef DEAL_II_WITH_MPI
1043  // 1)
1044  targets = this->process.compute_targets();
1045  const auto n_targets = targets.size();
1046 
1047  // 2) allocate memory
1048  recv_buffers.resize(n_targets);
1049  recv_requests.resize(n_targets);
1050  send_requests.resize(n_targets);
1051  send_buffers.resize(n_targets);
1052 
1053  {
1054  // 4) send and receive
1055  for (unsigned int i = 0; i < n_targets; i++)
1056  {
1057  const unsigned int rank = targets[i];
1058  const unsigned int index = i;
1059 
1060  // translate index set to a list of pairs
1061  auto &send_buffer = send_buffers[index];
1062  this->process.pack_recv_buffer(rank, send_buffer);
1063 
1064  // start to send data
1065  auto ierr = MPI_Isend(send_buffer.data(),
1066  send_buffer.size() * sizeof(T1),
1067  MPI_BYTE,
1068  rank,
1069  tag_request,
1070  this->comm,
1071  &send_requests[index]);
1072  AssertThrowMPI(ierr);
1073 
1074  // start to receive data
1075  auto &recv_buffer = recv_buffers[index];
1076  this->process.prepare_recv_buffer(rank, recv_buffer);
1077  ierr = MPI_Irecv(recv_buffer.data(),
1078  recv_buffer.size() * sizeof(T2),
1079  MPI_BYTE,
1080  rank,
1081  tag_delivery,
1082  this->comm,
1083  &recv_requests[index]);
1084  AssertThrowMPI(ierr);
1085  }
1086  }
1087 #endif
1088  }
1089 
1090 
1091 
1092  template <typename T1, typename T2>
1093  void
1095  {
1096 #ifdef DEAL_II_WITH_MPI
1097  // clean up
1098  {
1099  auto ierr = MPI_Waitall(send_requests.size(),
1100  send_requests.data(),
1101  MPI_STATUSES_IGNORE);
1102  AssertThrowMPI(ierr);
1103 
1104  ierr = MPI_Waitall(recv_requests.size(),
1105  recv_requests.data(),
1106  MPI_STATUSES_IGNORE);
1107  AssertThrowMPI(ierr);
1108 
1109  ierr = MPI_Wait(&barrier_request, MPI_STATUS_IGNORE);
1110  AssertThrowMPI(ierr);
1111 
1112  for (auto &i : request_requests)
1113  {
1114  const auto ierr = MPI_Wait(i.get(), MPI_STATUS_IGNORE);
1115  AssertThrowMPI(ierr);
1116  }
1117 
1118 # ifdef DEBUG
1119  // note: IBarrier seems to make problem during testing, this additional
1120  // Barrier seems to help
1121  MPI_Barrier(this->comm);
1122 # endif
1123  }
1124 
1125  // unpack data
1126  {
1127  for (unsigned int i = 0; i < targets.size(); i++)
1128  this->process.unpack_recv_buffer(targets[i], recv_buffers[i]);
1129  }
1130 #endif
1131  }
1132 
1133 
1134 
1140  : public ConsensusAlgorithmProcess<int, int>
1141  {
1142  public:
1143  ConsensusAlgorithmProcessTargets(std::vector<unsigned int> &target)
1144  : target(target)
1145  {}
1146 
1147  using T1 = int;
1148  using T2 = int;
1149 
1150  virtual void
1151  process_request(const unsigned int other_rank,
1152  const std::vector<T1> &,
1153  std::vector<T2> &) override
1154  {
1155  this->sources.push_back(other_rank);
1156  }
1157 
1163  virtual std::vector<unsigned int>
1164  compute_targets() override
1165  {
1166  return target;
1167  }
1168 
1174  std::vector<unsigned int>
1176  {
1177  std::sort(sources.begin(), sources.end());
1178  return sources;
1179  }
1180 
1181  private:
1185  const std::vector<unsigned int> &target;
1186 
1190  std::vector<unsigned int> sources;
1191  };
1192 
1193 
1194 
1195  template <typename T1, typename T2>
1198  const MPI_Comm & comm)
1199  : ConsensusAlgorithm<T1, T2>(process, comm)
1200  {}
1201 
1202 
1203 
1204  template <typename T1, typename T2>
1205  void
1207  {
1208  // 1) send requests and start receiving the answers
1209  // especially determine how many requests are expected
1210  const unsigned int n_requests = start_communication();
1211 
1212  // 2) answer requests
1213  for (unsigned int request = 0; request < n_requests; request++)
1214  process_requests(request);
1215 
1216  // 3) process answers
1218  }
1219 
1220 
1221 
1222  template <typename T1, typename T2>
1223  void
1225  {
1226 #ifdef DEAL_II_WITH_MPI
1227  MPI_Status status;
1228  MPI_Probe(MPI_ANY_SOURCE, tag_request, this->comm, &status);
1229 
1230  // get rank of incoming message
1231  const auto other_rank = status.MPI_SOURCE;
1232 
1233  std::vector<T1> buffer_recv;
1234 
1235  // get size of incoming message
1236  int number_amount;
1237  auto ierr = MPI_Get_count(&status, MPI_BYTE, &number_amount);
1238  AssertThrowMPI(ierr);
1239 
1240  // allocate memory for incoming message
1241  Assert(number_amount % sizeof(T1) == 0, ExcInternalError());
1242  buffer_recv.resize(number_amount / sizeof(T1));
1243  ierr = MPI_Recv(buffer_recv.data(),
1244  number_amount,
1245  MPI_BYTE,
1246  other_rank,
1247  tag_request,
1248  this->comm,
1249  &status);
1250  AssertThrowMPI(ierr);
1251 
1252  // process request
1253  auto &request_buffer = requests_buffers[index];
1254  this->process.process_request(other_rank, buffer_recv, request_buffer);
1255 
1256  // start to send answer back
1257  ierr = MPI_Isend(request_buffer.data(),
1258  request_buffer.size() * sizeof(T2),
1259  MPI_BYTE,
1260  other_rank,
1261  tag_delivery,
1262  this->comm,
1263  &requests_answers[index]);
1264  AssertThrowMPI(ierr);
1265 #else
1266  (void)index;
1267 #endif
1268  }
1269 
1270 
1271 
1272  template <typename T1, typename T2>
1273  unsigned int
1275  {
1276 #ifdef DEAL_II_WITH_MPI
1277  // 1) determine with which processes this process wants to communicate
1278  targets = this->process.compute_targets();
1279 
1280  // 2) determine who wants to communicate with this process
1281  const bool use_nbx = false;
1282  if (!use_nbx)
1283  {
1284  sources =
1286  }
1287  else
1288  {
1290  ConsensusAlgorithm_NBX<ConsensusAlgorithmProcessTargets::T1,
1291  ConsensusAlgorithmProcessTargets::T2>
1292  consensus_algorithm(process, this->comm);
1293  consensus_algorithm.run();
1294  sources = process.get_result();
1295  }
1296 
1297  const auto n_targets = targets.size();
1298  const auto n_sources = sources.size();
1299 
1300  // 2) allocate memory
1301  recv_buffers.resize(n_targets);
1302  send_buffers.resize(n_targets);
1303  send_and_recv_buffers.resize(2 * n_targets);
1304 
1305  requests_answers.resize(n_sources);
1306  requests_buffers.resize(n_sources);
1307 
1308  // 4) send and receive
1309  for (unsigned int i = 0; i < n_targets; i++)
1310  {
1311  const unsigned int rank = targets[i];
1312 
1313  // pack data which should be sent
1314  auto &send_buffer = send_buffers[i];
1315  this->process.pack_recv_buffer(rank, send_buffer);
1316 
1317  // start to send data
1318  auto ierr = MPI_Isend(send_buffer.data(),
1319  send_buffer.size() * sizeof(T1),
1320  MPI_BYTE,
1321  rank,
1322  tag_request,
1323  this->comm,
1324  &send_and_recv_buffers[n_targets + i]);
1325  AssertThrowMPI(ierr);
1326 
1327  // start to receive data
1328  auto &recv_buffer = recv_buffers[i];
1329  this->process.prepare_recv_buffer(rank, recv_buffer);
1330  ierr = MPI_Irecv(recv_buffer.data(),
1331  recv_buffer.size() * sizeof(T2),
1332  MPI_BYTE,
1333  rank,
1334  tag_delivery,
1335  this->comm,
1336  &send_and_recv_buffers[i]);
1337  AssertThrowMPI(ierr);
1338  }
1339 
1340  return sources.size();
1341 #else
1342  return 0;
1343 #endif
1344  }
1345 
1346 
1347 
1348  template <typename T1, typename T2>
1349  void
1351  {
1352 #ifdef DEAL_II_WITH_MPI
1353  // finalize all MPI_Requests
1354  MPI_Waitall(send_and_recv_buffers.size(),
1355  send_and_recv_buffers.data(),
1356  MPI_STATUSES_IGNORE);
1357  MPI_Waitall(requests_answers.size(),
1358  requests_answers.data(),
1359  MPI_STATUSES_IGNORE);
1360 
1361  // unpack received data
1362  for (unsigned int i = 0; i < targets.size(); i++)
1363  this->process.unpack_recv_buffer(targets[i], recv_buffers[i]);
1364 #endif
1365  }
1366 
1367 
1368 
1369  template <typename T1, typename T2>
1372  const MPI_Comm & comm)
1373  : ConsensusAlgorithm<T1, T2>(process, comm)
1374  {
1375  // Depending on the number of processes we switch between implementations.
1376  // We reduce the threshold for debug mode to be able to test also the
1377  // non-blocking implementation. This feature is tested by:
1378  // tests/multigrid/transfer_matrix_free_06.with_mpi=true.with_p4est=true.with_trilinos=true.mpirun=15.output
1379 #ifdef DEAL_II_WITH_MPI
1380 # if DEAL_II_MPI_VERSION_GTE(3, 0)
1381 # ifdef DEBUG
1382  if (Utilities::MPI::n_mpi_processes(comm) > 14)
1383 # else
1384  if (Utilities::MPI::n_mpi_processes(comm) > 99)
1385 # endif
1386  consensus_algo.reset(new ConsensusAlgorithm_NBX<T1, T2>(process, comm));
1387  else
1388 # endif
1389 #endif
1390  consensus_algo.reset(new ConsensusAlgorithm_PEX<T1, T2>(process, comm));
1391  }
1392 
1393 
1394 
1395  template <typename T1, typename T2>
1396  void
1398  {
1399  consensus_algo->run();
1400  }
1401 
1402 
1403 
1404  std::vector<unsigned int>
1405  compute_index_owner(const IndexSet &owned_indices,
1406  const IndexSet &indices_to_look_up,
1407  const MPI_Comm &comm)
1408  {
1409  Assert(owned_indices.size() == indices_to_look_up.size(),
1410  ExcMessage("IndexSets have to have the same sizes."));
1411 
1412  Assert(
1413  owned_indices.size() == Utilities::MPI::max(owned_indices.size(), comm),
1414  ExcMessage("IndexSets have to have the same size on all processes."));
1415 
1416  std::vector<unsigned int> owning_ranks(indices_to_look_up.n_elements());
1417 
1418  // Step 1: setup dictionary
1419  // The input owned_indices can be partitioned arbitrarily. In the
1420  // dictionary, the index set is statically repartitioned among the
1421  // processes again and extended with information with the actual owner
1422  // of that the index.
1424  owned_indices, indices_to_look_up, comm, owning_ranks);
1425 
1426  // Step 2: read dictionary
1427  // Communicate with the process who owns the index in the static
1428  // partition (i.e. in the dictionary). This process returns the actual
1429  // owner of the index.
1431  std::pair<types::global_dof_index, types::global_dof_index>,
1432  unsigned int>
1433  consensus_algorithm(process, comm);
1434  consensus_algorithm.run();
1435 
1436  return owning_ranks;
1437  }
1438 
1439  template class ConsensusAlgorithmSelector<
1440  std::pair<types::global_dof_index, types::global_dof_index>,
1441  unsigned int>;
1442 
1443 #include "mpi.inst"
1444  } // end of namespace MPI
1445 } // end of namespace Utilities
1446 
1447 DEAL_II_NAMESPACE_CLOSE
const MPI_Comm & comm
Definition: mpi.h:855
std::vector< unsigned int > sources
Definition: mpi.h:1065
static const unsigned int invalid_unsigned_int
Definition: types.h:187
#define AssertNothrow(cond, exc)
Definition: exceptions.h:1471
std::vector< unsigned int > get_result()
Definition: mpi.cc:1175
std::vector< unsigned int > targets
Definition: mpi.h:1060
virtual std::vector< unsigned int > compute_targets()=0
unsigned int compute_n_point_to_point_communications(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:327
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:1077
static unsigned int n_cores()
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:550
std::set< unsigned int > requesting_processes
Definition: mpi.h:959
#define AssertThrow(cond, exc)
Definition: exceptions.h:1519
types::global_dof_index size_type
Definition: index_set.h:85
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
virtual void prepare_recv_buffer(const int other_rank, std::vector< T2 > &recv_buffer)
Definition: mpi.cc:846
virtual void run() override
Definition: mpi.cc:1397
ConsensusAlgorithmSelector(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1370
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:923
size_type size() const
Definition: index_set.h:1600
static ::ExceptionBase & ExcMessage(std::string arg1)
std::vector< MPI_Request > recv_requests
Definition: mpi.h:939
virtual std::vector< unsigned int > compute_targets() override
Definition: mpi.cc:1164
std::vector< std::vector< T2 > > recv_buffers
Definition: mpi.h:933
virtual void pack_recv_buffer(const int other_rank, std::vector< T1 > &send_buffer)
Definition: mpi.cc:836
Definition: types.h:31
#define Assert(cond, exc)
Definition: exceptions.h:1407
std::vector< MPI_Request > send_requests
Definition: mpi.h:928
virtual void run() override
Definition: mpi.cc:888
std::vector< std::vector< T1 > > send_buffers
Definition: mpi.h:1072
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:107
std::vector< std::vector< T2 > > requests_buffers
Definition: mpi.h:1087
std::vector< unsigned int > compute_index_owner(const IndexSet &owned_indices, const IndexSet &indices_to_look_up, const MPI_Comm &comm)
Definition: mpi.cc:1405
std::vector< MPI_Request > requests_answers
Definition: mpi.h:1092
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:74
virtual void unpack_recv_buffer(const int other_rank, const std::vector< T2 > &recv_buffer)
Definition: mpi.cc:856
std::vector< std::shared_ptr< MPI_Request > > request_requests
Definition: mpi.h:949
ConsensusAlgorithm_NBX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:878
Definition: cuda.h:31
std::string get_hostname()
Definition: utilities.cc:997
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1695
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:96
ConsensusAlgorithmProcess< T1, T2 > & process
Definition: mpi.h:850
std::vector< MPI_Request > send_and_recv_buffers
Definition: mpi.h:1082
static void set_thread_limit(const unsigned int max_threads=numbers::invalid_unsigned_int)
std::vector< unsigned int > sources
Definition: mpi.cc:1190
virtual void process_request(const unsigned int other_rank, const std::vector< T1 > &buffer_recv, std::vector< T2 > &request_buffer)
Definition: mpi.cc:825
ConsensusAlgorithm_PEX(ConsensusAlgorithmProcess< T1, T2 > &process, const MPI_Comm &comm)
Definition: mpi.cc:1196
virtual void run() override
Definition: mpi.cc:1206
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
std::vector< unsigned int > targets
Definition: mpi.h:918
virtual void process_request(const unsigned int other_rank, const std::vector< T1 > &, std::vector< T2 > &) override
Definition: mpi.cc:1151
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
static ::ExceptionBase & ExcSLEPcError(int arg1)
std::vector< IndexSet > create_ascending_partitioning(const MPI_Comm &comm, const IndexSet::size_type &local_size)
Definition: mpi.cc:189
const std::vector< unsigned int > & target
Definition: mpi.cc:1185
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:432
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:213
bool job_supports_mpi()
Definition: mpi.cc:810
size_type n_elements() const
Definition: index_set.h:1799
unsigned int min_index
Definition: mpi.h:498
T max(const T &t, const MPI_Comm &mpi_communicator)
std::vector< std::vector< T2 > > request_buffers
Definition: mpi.h:944
unsigned int max_index
Definition: mpi.h:508
static ::ExceptionBase & ExcInternalError()