Reference documentation for deal.II version 9.0.0
process_grid.cc
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2017 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE at
12 // the top level of the deal.II distribution.
13 //
14 // ---------------------------------------------------------------------
15 
16 #include <deal.II/base/process_grid.h>
17 
18 #ifdef DEAL_II_WITH_SCALAPACK
19 
20 #include <deal.II/lac/scalapack.templates.h>
21 
22 DEAL_II_NAMESPACE_OPEN
23 
24 namespace
25 {
36  inline
37  std::pair<int,int> compute_processor_grid_sizes(MPI_Comm mpi_comm,
38  const unsigned int m, const unsigned int n,
39  const unsigned int block_size_m, const unsigned int block_size_n)
40  {
41  // Few notes from the ScaLAPACK user guide:
42  // It is possible to predict the best grid shape given the number of processes available:
43  // Pr x Pc <= P
44  // This, however, depends on the task to be done.
45  // LU , QR and QL factorizations perform better for “flat” process grids (Pr < Pc )
46  // For large N, Pc = 2*Pr is a good choice, whereas for small N, one should choose small Pr
47  // Square or near square grids are more optimal for Cholesky factorization.
48  // LQ and RQ factorizations take advantage of “tall” grids (Pr > Pc )
49 
50  // Below we always try to create 2D processor grids:
51 
52  int n_processes;
53  MPI_Comm_size(mpi_comm, &n_processes);
54 
55  // Get the total number of cores we can occupy in a rectangular dense matrix
56  // with rectangular blocks when every core owns only a single block:
57  const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))*
58  int(std::ceil((1.*n)/block_size_n));
59  const int Np = std::min(n_processes_heuristic, n_processes);
60 
61  // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio
62  // Pc =: ratio * Pr
63  // therefore
64  // Np = Pc * Pc / ratio
65  // for quadratic matrices the ratio equals 1
66  const double ratio = double(n)/m;
67  int Pc = std::floor(std::sqrt(ratio * Np));
68 
69  // one could rounds up Pc to the number which has zero remainder from the division of Np
70  // while ( Np % Pc != 0 )
71  // ++Pc;
72  // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes 2x5.
73 
74  // limit our estimate to be in [2, Np]
75  int n_process_columns = std::min (Np, std::max(2, Pc));
76  // finally, get the rows:
77  int n_process_rows = Np / n_process_columns ;
78 
79  Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns,
80  ExcMessage("error in process grid: "+
81  std::to_string(n_process_rows)+"x"+
82  std::to_string(n_process_columns)+
83  "="+
84  std::to_string(n_process_rows*n_process_columns)+
85  " out of " +
86  std::to_string(n_processes)));
87 
88  return std::make_pair(n_process_rows,n_process_columns);
89 
90  // For example,
91  // 320x320 with 32x32 blocks and 16 cores:
92  // Pc = 1.0 * Pr => 4x4 grid
93  // Pc = 0.5 * Pr => 8x2 grid
94  // Pc = 2.0 * Pr => 3x5 grid
95  }
96 }
97 
98 namespace Utilities
99 {
100  namespace MPI
101  {
102 
103  ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
104  const std::pair<unsigned int,unsigned int> &grid_dimensions)
105  :
106  mpi_communicator(mpi_comm),
107  this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator)),
108  n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator)),
109  n_process_rows(grid_dimensions.first),
110  n_process_columns(grid_dimensions.second)
111  {
112  Assert (grid_dimensions.first > 0,
113  ExcMessage("Number of process grid rows has to be positive."));
114  Assert (grid_dimensions.second > 0,
115  ExcMessage("Number of process grid columns has to be positive."));
116 
117  Assert (grid_dimensions.first*grid_dimensions.second <= n_mpi_processes,
118  ExcMessage("Size of process grid is larger than number of available MPI processes."));
119 
120  // processor grid order.
121  const bool column_major = false;
122 
123  // Initialize Cblas context from the provided communicator
124  blacs_context = Csys2blacs_handle(mpi_communicator);
125  const char *order = ( column_major ? "Col" : "Row" );
126  // Note that blacs_context can be modified below. Thus Cblacs2sys_handle
127  // may not return the same MPI communicator.
128  Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns);
129 
130  // Blacs may modify the grid size on processes which are not used
131  // in the grid. So provide copies below:
132  int procrows_ = n_process_rows;
133  int proccols_ = n_process_columns;
134  Cblacs_gridinfo( blacs_context, &procrows_, &proccols_, &this_process_row, &this_process_column );
135 
136  // If this MPI core is not on the grid, flag it as inactive and
137  // skip all jobs
138  // Note that a different condition is used in FORTRAN code here
139  // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
140  if (this_process_row < 0 || this_process_column < 0)
141  mpi_process_is_active = false;
142  else
143  mpi_process_is_active = true;
144 
145  // Create an auxiliary communicator which has root and all inactive cores.
146  // Assume that inactive cores start with id=n_process_rows*n_process_columns
147  const unsigned int n_active_mpi_processes = n_process_rows*n_process_columns;
149  this_mpi_process >= n_active_mpi_processes,
150  ExcInternalError());
151 
152  std::vector<int> inactive_with_root_ranks;
153  inactive_with_root_ranks.push_back(0);
154  for (unsigned int i = n_active_mpi_processes; i < n_mpi_processes; ++i)
155  inactive_with_root_ranks.push_back(i);
156 
157  // Get the group of processes in mpi_communicator
158  int ierr = 0;
159  MPI_Group all_group;
160  ierr = MPI_Comm_group(mpi_communicator, &all_group);
161  AssertThrowMPI(ierr);
162 
163  // Construct the group containing all ranks we need:
164  MPI_Group inactive_with_root_group;
165  const int n = inactive_with_root_ranks.size();
166  ierr = MPI_Group_incl(all_group,
167  n, inactive_with_root_ranks.data(),
168  &inactive_with_root_group);
169  AssertThrowMPI(ierr);
170 
171  // Create the communicator based on the group
172  // Note that on most cores the communicator will be MPI_COMM_NULL.
173  // FIXME: switch to MPI_Comm_create_group for MPI-3 so that only processes within the to-be subgroup call this
174  ierr = MPI_Comm_create(mpi_communicator, inactive_with_root_group,
176  AssertThrowMPI(ierr);
177 
178  ierr = MPI_Group_free(&all_group);
179  AssertThrowMPI(ierr);
180  ierr = MPI_Group_free(&inactive_with_root_group);
181  AssertThrowMPI(ierr);
182 
183  // Double check that the process with rank 0 in subgroup is active:
184 #ifdef DEBUG
185  if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
188 #endif
189  }
190 
191 
192 
193  ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
194  const unsigned int n_rows_matrix,
195  const unsigned int n_columns_matrix,
196  const unsigned int row_block_size,
197  const unsigned int column_block_size)
198  :
199  ProcessGrid(mpi_comm,
200  compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix,
201  row_block_size, column_block_size) )
202  {}
203 
204 
205 
206  ProcessGrid::ProcessGrid(MPI_Comm mpi_comm,
207  const unsigned int n_rows,
208  const unsigned int n_columns)
209  :
210  ProcessGrid(mpi_comm,
211  std::make_pair(n_rows,n_columns))
212  {}
213 
214 
215 
216 
218  {
220  Cblacs_gridexit(blacs_context);
221 
222  if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
223  MPI_Comm_free(&mpi_communicator_inactive_with_root);
224  }
225 
226 
227 
228  template <typename NumberType>
229  void ProcessGrid::send_to_inactive(NumberType *value, const int count) const
230  {
231  Assert (count>0, ExcInternalError());
232  if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
233  {
234  const int ierr =
235  MPI_Bcast(value,count,
236  Utilities::MPI::internal::mpi_type_id (value),
237  0/*from root*/,
239  AssertThrowMPI(ierr);
240  }
241  }
242 
243  }
244 }
245 
246 // instantiations
247 
248 template void Utilities::MPI::ProcessGrid::send_to_inactive<double>(double *, const int) const;
249 template void Utilities::MPI::ProcessGrid::send_to_inactive<float>(float *, const int) const;
250 template void Utilities::MPI::ProcessGrid::send_to_inactive<int>(int *, const int) const;
251 
252 DEAL_II_NAMESPACE_CLOSE
253 
254 #endif // DEAL_II_WITH_SCALAPACK
const unsigned int this_mpi_process
Definition: process_grid.h:155
STL namespace.
MPI_Comm mpi_communicator_inactive_with_root
Definition: process_grid.h:144
static ::ExceptionBase & ExcMessage(std::string arg1)
void send_to_inactive(NumberType *value, const int count=1) const
#define Assert(cond, exc)
Definition: exceptions.h:1142
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:65
Definition: cuda.h:31
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1314
const unsigned int n_mpi_processes
Definition: process_grid.h:160
ProcessGrid(MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:75
static ::ExceptionBase & ExcInternalError()