Reference documentation for deal.II version 9.2.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
process_grid.cc
Go to the documentation of this file.
1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2017 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
17 
18 #ifdef DEAL_II_WITH_SCALAPACK
19 
20 # include <deal.II/lac/scalapack.templates.h>
21 
23 
24 namespace
25 {
36  inline std::pair<int, int>
37  compute_processor_grid_sizes(MPI_Comm mpi_comm,
38  const unsigned int m,
39  const unsigned int n,
40  const unsigned int block_size_m,
41  const unsigned int block_size_n)
42  {
43  // Few notes from the ScaLAPACK user guide:
44  // It is possible to predict the best grid shape given the number of
45  // processes available: Pr x Pc <= P This, however, depends on the task to
46  // be done. LU , QR and QL factorizations perform better for “flat” process
47  // grids (Pr < Pc ) For large N, Pc = 2*Pr is a good choice, whereas for
48  // small N, one should choose small Pr Square or near square grids are more
49  // optimal for Cholesky factorization. LQ and RQ factorizations take
50  // advantage of “tall” grids (Pr > Pc )
51 
52  // Below we always try to create 2D processor grids:
53 
54  const int n_processes = Utilities::MPI::n_mpi_processes(mpi_comm);
55 
56  // Get the total number of cores we can occupy in a rectangular dense matrix
57  // with rectangular blocks when every core owns only a single block:
58  const int n_processes_heuristic = int(std::ceil((1. * m) / block_size_m)) *
59  int(std::ceil((1. * n) / block_size_n));
60  const int Np = std::min(n_processes_heuristic, n_processes);
61 
62  // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio
63  // Pc =: ratio * Pr
64  // therefore
65  // Np = Pc * Pc / ratio
66  // for quadratic matrices the ratio equals 1
67  const double ratio = double(n) / m;
68  int Pc = static_cast<int>(std::sqrt(ratio * Np));
69 
70  // one could rounds up Pc to the number which has zero remainder from the
71  // division of Np while ( Np % Pc != 0 )
72  // ++Pc;
73  // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes
74  // 2x5.
75 
76  // limit our estimate to be in [2, Np]
77  int n_process_columns = std::min(Np, std::max(2, Pc));
78  // finally, get the rows:
79  int n_process_rows = Np / n_process_columns;
80 
81  Assert(n_process_columns >= 1 && n_process_rows >= 1 &&
82  n_processes >= n_process_rows * n_process_columns,
83  ExcMessage(
84  "error in process grid: " + std::to_string(n_process_rows) + "x" +
85  std::to_string(n_process_columns) + "=" +
86  std::to_string(n_process_rows * n_process_columns) + " out of " +
87  std::to_string(n_processes)));
88 
89  return std::make_pair(n_process_rows, n_process_columns);
90 
91  // For example,
92  // 320x320 with 32x32 blocks and 16 cores:
93  // Pc = 1.0 * Pr => 4x4 grid
94  // Pc = 0.5 * Pr => 8x2 grid
95  // Pc = 2.0 * Pr => 3x5 grid
96  }
97 } // namespace
98 
99 namespace Utilities
100 {
101  namespace MPI
102  {
104  MPI_Comm mpi_comm,
105  const std::pair<unsigned int, unsigned int> &grid_dimensions)
106  : mpi_communicator(mpi_comm)
107  , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
108  , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
109  , n_process_rows(grid_dimensions.first)
110  , n_process_columns(grid_dimensions.second)
111  {
112  Assert(grid_dimensions.first > 0,
113  ExcMessage("Number of process grid rows has to be positive."));
114  Assert(grid_dimensions.second > 0,
115  ExcMessage("Number of process grid columns has to be positive."));
116 
117  Assert(
118  grid_dimensions.first * grid_dimensions.second <= n_mpi_processes,
119  ExcMessage(
120  "Size of process grid is larger than number of available MPI processes."));
121 
122  // processor grid order.
123  const bool column_major = false;
124 
125  // Initialize Cblas context from the provided communicator
126  blacs_context = Csys2blacs_handle(mpi_communicator);
127  const char *order = (column_major ? "Col" : "Row");
128  // Note that blacs_context can be modified below. Thus Cblacs2sys_handle
129  // may not return the same MPI communicator.
130  Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns);
131 
132  // Blacs may modify the grid size on processes which are not used
133  // in the grid. So provide copies below:
134  int procrows_ = n_process_rows;
135  int proccols_ = n_process_columns;
136  Cblacs_gridinfo(blacs_context,
137  &procrows_,
138  &proccols_,
141 
142  // If this MPI core is not on the grid, flag it as inactive and
143  // skip all jobs
144  // Note that a different condition is used in FORTRAN code here
145  // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
146  if (this_process_row < 0 || this_process_column < 0)
147  mpi_process_is_active = false;
148  else
149  mpi_process_is_active = true;
150 
151  // Create an auxiliary communicator which has root and all inactive cores.
152  // Assume that inactive cores start with
153  // id=n_process_rows*n_process_columns
154  const unsigned int n_active_mpi_processes =
157  this_mpi_process >= n_active_mpi_processes,
158  ExcInternalError());
159 
160  std::vector<int> inactive_with_root_ranks;
161  inactive_with_root_ranks.push_back(0);
162  for (unsigned int i = n_active_mpi_processes; i < n_mpi_processes; ++i)
163  inactive_with_root_ranks.push_back(i);
164 
165  // Get the group of processes in mpi_communicator
166  int ierr = 0;
167  MPI_Group all_group;
168  ierr = MPI_Comm_group(mpi_communicator, &all_group);
169  AssertThrowMPI(ierr);
170 
171  // Construct the group containing all ranks we need:
172  MPI_Group inactive_with_root_group;
173  const int n = inactive_with_root_ranks.size();
174  ierr = MPI_Group_incl(all_group,
175  n,
176  inactive_with_root_ranks.data(),
177  &inactive_with_root_group);
178  AssertThrowMPI(ierr);
179 
180  // Create the communicator based on inactive_with_root_group.
181  // Note that on all the active MPI processes (except for the one with
182  // rank 0) the resulting MPI_Comm mpi_communicator_inactive_with_root
183  // will be MPI_COMM_NULL.
184  const int mpi_tag =
186 
188  inactive_with_root_group,
189  mpi_tag,
191  AssertThrowMPI(ierr);
192 
193  ierr = MPI_Group_free(&all_group);
194  AssertThrowMPI(ierr);
195  ierr = MPI_Group_free(&inactive_with_root_group);
196  AssertThrowMPI(ierr);
197 
198  // Double check that the process with rank 0 in subgroup is active:
199 # ifdef DEBUG
200  if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
204 # endif
205  }
206 
207 
208 
210  const unsigned int n_rows_matrix,
211  const unsigned int n_columns_matrix,
212  const unsigned int row_block_size,
213  const unsigned int column_block_size)
214  : ProcessGrid(mpi_comm,
215  compute_processor_grid_sizes(mpi_comm,
216  n_rows_matrix,
217  n_columns_matrix,
218  row_block_size,
219  column_block_size))
220  {}
221 
222 
223 
225  const unsigned int n_rows,
226  const unsigned int n_columns)
227  : ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns))
228  {}
229 
230 
231 
233  {
235  Cblacs_gridexit(blacs_context);
236 
237  if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
238  MPI_Comm_free(&mpi_communicator_inactive_with_root);
239  }
240 
241 
242 
243  template <typename NumberType>
244  void
245  ProcessGrid::send_to_inactive(NumberType *value, const int count) const
246  {
247  Assert(count > 0, ExcInternalError());
248  if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
249  {
250  const int ierr =
251  MPI_Bcast(value,
252  count,
253  Utilities::MPI::internal::mpi_type_id(value),
254  0 /*from root*/,
256  AssertThrowMPI(ierr);
257  }
258  }
259 
260  } // namespace MPI
261 } // namespace Utilities
262 
263 // instantiations
264 
265 template void
266 Utilities::MPI::ProcessGrid::send_to_inactive<double>(double *,
267  const int) const;
268 template void
269 Utilities::MPI::ProcessGrid::send_to_inactive<float>(float *, const int) const;
270 template void
271 Utilities::MPI::ProcessGrid::send_to_inactive<int>(int *, const int) const;
272 
274 
275 #endif // DEAL_II_WITH_SCALAPACK
Utilities::MPI::ProcessGrid::n_mpi_processes
const unsigned int n_mpi_processes
Definition: process_grid.h:184
Utilities::MPI::ProcessGrid::n_process_rows
int n_process_rows
Definition: process_grid.h:189
Patterns::Tools::to_string
std::string to_string(const T &t)
Definition: patterns.h:2360
Utilities::MPI::ProcessGrid::mpi_communicator
MPI_Comm mpi_communicator
Definition: process_grid.h:162
MPI_Comm
second
Point< 2 > second
Definition: grid_out.cc:4353
Utilities::MPI::ProcessGrid::this_mpi_process
const unsigned int this_mpi_process
Definition: process_grid.h:179
process_grid.h
Utilities::MPI::ProcessGrid
Definition: process_grid.h:63
StandardExceptions::ExcMessage
static ::ExceptionBase & ExcMessage(std::string arg1)
double
DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_OPEN
Definition: config.h:358
Utilities::MPI::create_group
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
Definition: mpi.cc:160
Utilities::MPI::ProcessGrid::~ProcessGrid
~ProcessGrid()
Definition: process_grid.cc:232
AssertThrowMPI
#define AssertThrowMPI(error_code)
Definition: exceptions.h:1707
Utilities::MPI::internal::Tags::process_grid_constructor
@ process_grid_constructor
ProcessGrid::ProcessGrid.
Definition: mpi_tags.h:122
Utilities::MPI::ProcessGrid::this_process_row
int this_process_row
Definition: process_grid.h:201
Utilities::MPI::ProcessGrid::blacs_context
int blacs_context
Definition: process_grid.h:174
Differentiation::SD::ceil
Expression ceil(const Expression &x)
Definition: symengine_math.cc:301
value
static const bool value
Definition: dof_tools_constraints.cc:433
StandardExceptions::ExcInternalError
static ::ExceptionBase & ExcInternalError()
Utilities::MPI::ProcessGrid::ProcessGrid
ProcessGrid(MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
Definition: process_grid.cc:224
std::sqrt
inline ::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &x)
Definition: vectorization.h:5412
Assert
#define Assert(cond, exc)
Definition: exceptions.h:1419
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:128
Utilities::MPI::ProcessGrid::n_process_columns
int n_process_columns
Definition: process_grid.h:194
Utilities::MPI::ProcessGrid::send_to_inactive
void send_to_inactive(NumberType *value, const int count=1) const
Definition: process_grid.cc:245
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
Definition: mpi.cc:117
Utilities::MPI::min
T min(const T &t, const MPI_Comm &mpi_communicator)
Utilities::MPI::ProcessGrid::mpi_communicator_inactive_with_root
MPI_Comm mpi_communicator_inactive_with_root
Definition: process_grid.h:168
Utilities::MPI::ProcessGrid::mpi_process_is_active
bool mpi_process_is_active
Definition: process_grid.h:213
DEAL_II_NAMESPACE_CLOSE
#define DEAL_II_NAMESPACE_CLOSE
Definition: config.h:359
first
Point< 2 > first
Definition: grid_out.cc:4352
Utilities::MPI::ProcessGrid::this_process_column
int this_process_column
Definition: process_grid.h:208
Utilities
Definition: cuda.h:31
Utilities::MPI::max
T max(const T &t, const MPI_Comm &mpi_communicator)
int