Reference documentation for deal.II version 9.0.0
mpi.h
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2011 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_mpi_h
17 #define dealii_mpi_h
18 
19 #include <deal.II/base/config.h>
20 #include <deal.II/base/array_view.h>
21 
22 #include <vector>
23 #include <map>
24 
25 #if !defined(DEAL_II_WITH_MPI) && !defined(DEAL_II_WITH_PETSC)
26 // without MPI, we would still like to use
27 // some constructs with MPI data
28 // types. Therefore, create some dummies
29 typedef int MPI_Comm;
30 typedef int MPI_Datatype;
31 typedef int MPI_Op;
32 # ifndef MPI_COMM_WORLD
33 # define MPI_COMM_WORLD 0
34 # endif
35 # ifndef MPI_COMM_SELF
36 # define MPI_COMM_SELF 0
37 # endif
38 # ifndef MPI_MIN
39 # define MPI_MIN 0
40 # endif
41 # ifndef MPI_MAX
42 # define MPI_MAX 0
43 # endif
44 # ifndef MPI_SUM
45 # define MPI_SUM 0
46 # endif
47 #endif
48 
49 DEAL_II_NAMESPACE_OPEN
50 
51 
52 //Forward type declarations to allow MPI sums over tensorial types
53 template <int rank, int dim, typename Number> class Tensor;
54 template <int rank, int dim, typename Number> class SymmetricTensor;
55 template <typename Number> class SparseMatrix;
56 
57 namespace Utilities
58 {
66  namespace MPI
67  {
76  unsigned int n_mpi_processes (const MPI_Comm &mpi_communicator);
77 
86  unsigned int this_mpi_process (const MPI_Comm &mpi_communicator);
87 
109  std::vector<unsigned int>
110  compute_point_to_point_communication_pattern (const MPI_Comm &mpi_comm,
111  const std::vector<unsigned int> &destinations);
112 
126  MPI_Comm duplicate_communicator (const MPI_Comm &mpi_communicator);
127 
155 #ifdef DEAL_II_WITH_MPI
156  int create_group(const MPI_Comm &comm,
157  const MPI_Group &group,
158  const int tag,
159  MPI_Comm *new_comm);
160 #endif
161 
181  template <typename T>
182  T sum (const T &t,
183  const MPI_Comm &mpi_communicator);
184 
194  template <typename T, typename U>
195  void sum (const T &values,
196  const MPI_Comm &mpi_communicator,
197  U &sums);
198 
208  template <typename T>
209  void sum (const ArrayView<const T> &values,
210  const MPI_Comm &mpi_communicator,
211  const ArrayView<T> &sums);
212 
218  template <int rank, int dim, typename Number>
221  const MPI_Comm &mpi_communicator);
222 
228  template <int rank, int dim, typename Number>
230  sum (const Tensor<rank,dim,Number> &local,
231  const MPI_Comm &mpi_communicator);
232 
241  template <typename Number>
242  void
243  sum (const SparseMatrix<Number> &local,
244  const MPI_Comm &mpi_communicator,
245  SparseMatrix<Number> &global);
246 
266  template <typename T>
267  T max (const T &t,
268  const MPI_Comm &mpi_communicator);
269 
279  template <typename T, typename U>
280  void max (const T &values,
281  const MPI_Comm &mpi_communicator,
282  U &maxima);
283 
293  template <typename T>
294  void max (const ArrayView<const T> &values,
295  const MPI_Comm &mpi_communicator,
296  const ArrayView<T> &maxima);
297 
317  template <typename T>
318  T min (const T &t,
319  const MPI_Comm &mpi_communicator);
320 
330  template <typename T, typename U>
331  void min (const T &values,
332  const MPI_Comm &mpi_communicator,
333  U &minima);
334 
344  template <typename T>
345  void min (const ArrayView<const T> &values,
346  const MPI_Comm &mpi_communicator,
347  const ArrayView<T> &minima);
348 
363  struct MinMaxAvg
364  {
369  double sum;
370 
375  double min;
376 
381  double max;
382 
391  unsigned int min_index;
392 
401  unsigned int max_index;
402 
407  double avg;
408  };
409 
424  MinMaxAvg
425  min_max_avg (const double my_value,
426  const MPI_Comm &mpi_communicator);
427 
472  {
473  public:
519  MPI_InitFinalize (int &argc,
520  char ** &argv,
521  const unsigned int max_num_threads = numbers::invalid_unsigned_int);
522 
528  };
529 
541  bool job_supports_mpi ();
542 
559  template <typename T>
560  std::map<unsigned int, T>
561  some_to_some(const MPI_Comm &comm,
562  const std::map <unsigned int, T> &objects_to_send);
563 
579  template <typename T>
580  std::vector<T>
581  all_gather(const MPI_Comm &comm,
582  const T &object_to_send);
583 
601  template <typename T>
602  std::vector<T>
603  gather(const MPI_Comm &comm,
604  const T &object_to_send,
605  const unsigned int root_process=0);
606 
607 #ifndef DOXYGEN
608  // declaration for an internal function that lives in mpi.templates.h
609  namespace internal
610  {
611  template <typename T>
612  void all_reduce (const MPI_Op &mpi_op,
613  const ArrayView<const T> &values,
614  const MPI_Comm &mpi_communicator,
615  const ArrayView<T> &output);
616  }
617 
618  // Since these depend on N they must live in the header file
619  template <typename T, unsigned int N>
620  void sum (const T (&values)[N],
621  const MPI_Comm &mpi_communicator,
622  T (&sums)[N])
623  {
624  internal::all_reduce(MPI_SUM, ArrayView<const T>(values, N),
625  mpi_communicator, ArrayView<T>(sums, N));
626  }
627 
628  template <typename T, unsigned int N>
629  void max (const T (&values)[N],
630  const MPI_Comm &mpi_communicator,
631  T (&maxima)[N])
632  {
633  internal::all_reduce(MPI_MAX, ArrayView<const T>(values, N),
634  mpi_communicator, ArrayView<T>(maxima, N));
635  }
636 
637  template <typename T, unsigned int N>
638  void min (const T (&values)[N],
639  const MPI_Comm &mpi_communicator,
640  T (&minima)[N])
641  {
642  internal::all_reduce(MPI_MIN, ArrayView<const T>(values, N),
643  mpi_communicator, ArrayView<T>(minima, N));
644  }
645 
646  template<typename T>
647  std::map<unsigned int, T>
648  some_to_some(const MPI_Comm &comm,
649  const std::map<unsigned int, T> &objects_to_send)
650  {
651 #ifndef DEAL_II_WITH_MPI
652  (void)comm;
653  Assert(objects_to_send.size() == 0, ExcMessage("Cannot send to more than one processor."));
654  Assert(objects_to_send.find(0) != objects_to_send.end() || objects_to_send.size() == 0,
655  ExcMessage("Can only send to myself or to nobody."));
656  return objects_to_send;
657 #else
658 
659  std::vector<unsigned int> send_to(objects_to_send.size());
660  {
661  unsigned int i=0;
662  for (const auto &m: objects_to_send)
663  send_to[i++] = m.first;
664  }
665  AssertDimension(send_to.size(), objects_to_send.size());
666 
667  const auto receive_from =
669 
670  // Sending buffers
671  std::vector<std::vector<char> > buffers_to_send(send_to.size());
672  std::vector<MPI_Request> buffer_send_requests(send_to.size());
673  {
674  unsigned int i = 0;
675  for (const auto &rank_obj : objects_to_send)
676  {
677  const auto &rank = rank_obj.first;
678  buffers_to_send[i] = Utilities::pack(rank_obj.second);
679  const int ierr = MPI_Isend(buffers_to_send[i].data(),
680  buffers_to_send[i].size(), MPI_CHAR,
681  rank, 21, comm, &buffer_send_requests[i]);
682  AssertThrowMPI(ierr);
683  ++i;
684  }
685  }
686 
687  // Receiving buffers
688  std::map<unsigned int, T> received_objects;
689  {
690  std::vector<char> buffer;
691  // We do this on a first come/first served basis
692  for (unsigned int i = 0; i<receive_from.size(); ++i)
693  {
694  // Probe what's going on. Take data from the first available sender
695  MPI_Status status;
696  int ierr = MPI_Probe(MPI_ANY_SOURCE, 21, comm, &status);
697  AssertThrowMPI(ierr);
698 
699  // Length of the message
700  int len;
701  ierr = MPI_Get_count(&status, MPI_CHAR, &len);
702  AssertThrowMPI(ierr);
703  buffer.resize(len);
704 
705  // Source rank
706  const unsigned int rank = status.MPI_SOURCE;
707 
708  // Actually receive the message
709  ierr = MPI_Recv(buffer.data(), len, MPI_CHAR,
710  rank, 21, comm, MPI_STATUS_IGNORE);
711  AssertThrowMPI(ierr);
712  Assert(received_objects.find(rank) == received_objects.end(),
713  ExcInternalError("I should not receive again from this rank"));
714  received_objects[rank] = Utilities::unpack<T>(buffer);
715  }
716  }
717 
718  // Wait to have sent all objects.
719  MPI_Waitall(send_to.size(), buffer_send_requests.data(),MPI_STATUSES_IGNORE);
720 
721  return received_objects;
722 #endif // deal.II with MPI
723  }
724 
725  template<typename T>
726  std::vector<T> all_gather(const MPI_Comm &comm,
727  const T &object)
728  {
729 #ifndef DEAL_II_WITH_MPI
730  (void)comm;
731  std::vector<T> v(1, object);
732  return v;
733 #else
734  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
735 
736  std::vector<char> buffer = Utilities::pack(object);
737 
738  int n_local_data = buffer.size();
739 
740  // Vector to store the size of loc_data_array for every process
741  std::vector<int> size_all_data(n_procs,0);
742 
743  // Exchanging the size of each buffer
744  MPI_Allgather(&n_local_data, 1, MPI_INT,
745  &(size_all_data[0]), 1, MPI_INT,
746  comm);
747 
748  // Now computing the displacement, relative to recvbuf,
749  // at which to store the incoming buffer
750  std::vector<int> rdispls(n_procs);
751  rdispls[0] = 0;
752  for (unsigned int i=1; i < n_procs; ++i)
753  rdispls[i] = rdispls[i-1] + size_all_data[i-1];
754 
755  // Step 3: exchange the buffer:
756  std::vector<char> received_unrolled_buffer(rdispls.back() + size_all_data.back());
757 
758  MPI_Allgatherv(buffer.data(), n_local_data, MPI_CHAR,
759  received_unrolled_buffer.data(), size_all_data.data(),
760  rdispls.data(), MPI_CHAR, comm);
761 
762  std::vector<T> received_objects(n_procs);
763  for (unsigned int i= 0; i < n_procs; ++i)
764  {
765  std::vector<char> local_buffer(received_unrolled_buffer.begin()+rdispls[i],
766  received_unrolled_buffer.begin()+rdispls[i]+size_all_data[i]);
767  received_objects[i] = Utilities::unpack<T>(local_buffer);
768  }
769 
770  return received_objects;
771 #endif
772  }
773 
774  template <typename T>
775  std::vector<T>
776  gather(const MPI_Comm &comm,
777  const T &object_to_send,
778  const unsigned int root_process)
779  {
780 #ifndef DEAL_II_WITH_MPI
781  (void)comm;
782  (void)root_process;
783  std::vector<T> v(1, object_to_send);
784  return v;
785 #else
786  const auto n_procs = ::Utilities::MPI::n_mpi_processes(comm);
787  const auto my_rank = ::Utilities::MPI::this_mpi_process(comm);
788 
789  Assert(root_process < n_procs, ExcIndexRange(root_process,0,n_procs));
790 
791  std::vector<char> buffer = Utilities::pack(object_to_send);
792  int n_local_data = buffer.size();
793 
794  // Vector to store the size of loc_data_array for every process
795  // only the root process needs to allocate memory for that purpose
796  std::vector<int> size_all_data;
797  if (my_rank==root_process)
798  size_all_data.resize(n_procs,0);
799 
800  // Exchanging the size of each buffer
801  int ierr = MPI_Gather(&n_local_data, 1, MPI_INT,
802  size_all_data.data(), 1, MPI_INT,
803  root_process, comm);
804  AssertThrowMPI(ierr);
805 
806  // Now computing the displacement, relative to recvbuf,
807  // at which to store the incoming buffer; only for root
808  std::vector<int> rdispls;
809  if (my_rank==root_process)
810  {
811  rdispls.resize(n_procs,0);
812  for (unsigned int i=1; i<n_procs; ++i)
813  rdispls[i] = rdispls[i-1] + size_all_data[i-1];
814  }
815  // exchange the buffer:
816  std::vector<char> received_unrolled_buffer;
817  if (my_rank==root_process)
818  received_unrolled_buffer.resize(rdispls.back() + size_all_data.back());
819 
820  ierr = MPI_Gatherv(buffer.data(), n_local_data, MPI_CHAR,
821  received_unrolled_buffer.data(), size_all_data.data(),
822  rdispls.data(), MPI_CHAR,
823  root_process, comm);
824  AssertThrowMPI(ierr);
825 
826  std::vector<T> received_objects;
827 
828  if (my_rank==root_process)
829  {
830  received_objects.resize(n_procs);
831 
832  for (unsigned int i=0; i<n_procs; ++i)
833  {
834  const std::vector<char> local_buffer(received_unrolled_buffer.begin()+rdispls[i],
835  received_unrolled_buffer.begin()+rdispls[i]+size_all_data[i]);
836  received_objects[i] = Utilities::unpack<T>(local_buffer);
837  }
838  }
839  return received_objects;
840 #endif
841  }
842 
843 #endif
844  } // end of namespace MPI
845 } // end of namespace Utilities
846 
847 
848 DEAL_II_NAMESPACE_CLOSE
849 
850 #endif
static const unsigned int invalid_unsigned_int
Definition: types.h:173
#define AssertDimension(dim1, dim2)
Definition: exceptions.h:1248
MPI_InitFinalize(int &argc, char **&argv, const unsigned int max_num_threads=numbers::invalid_unsigned_int)
Definition: mpi.cc:378
static ::ExceptionBase & ExcIndexRange(int arg1, int arg2, int arg3)
static ::ExceptionBase & ExcMessage(std::string arg1)
T sum(const T &t, const MPI_Comm &mpi_communicator)
#define Assert(cond, exc)
Definition: exceptions.h:1142
std::vector< T > gather(const MPI_Comm &comm, const T &object_to_send, const unsigned int root_process=0)
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:95
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
MPI_Comm duplicate_communicator(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:85
Definition: mpi.h:53
size_t pack(const T &object, std::vector< char > &dest_buffer)
Definition: utilities.h:1003
T min(const T &t, const MPI_Comm &mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
std::vector< T > all_gather(const MPI_Comm &comm, const T &object_to_send)
MinMaxAvg min_max_avg(const double my_value, const MPI_Comm &mpi_communicator)
Definition: mpi.cc:275
std::vector< unsigned int > compute_point_to_point_communication_pattern(const MPI_Comm &mpi_comm, const std::vector< unsigned int > &destinations)
Definition: mpi.cc:174
bool job_supports_mpi()
Definition: mpi.cc:613
std::map< unsigned int, T > some_to_some(const MPI_Comm &comm, const std::map< unsigned int, T > &objects_to_send)
unsigned int min_index
Definition: mpi.h:391
T max(const T &t, const MPI_Comm &mpi_communicator)
unsigned int max_index
Definition: mpi.h:401
static ::ExceptionBase & ExcInternalError()