Reference documentation for deal.II version 9.6.0
\(\newcommand{\dealvcentcolon}{\mathrel{\mathop{:}}}\) \(\newcommand{\dealcoloneq}{\dealvcentcolon\mathrel{\mkern-1.2mu}=}\) \(\newcommand{\jump}[1]{\left[\!\left[ #1 \right]\!\right]}\) \(\newcommand{\average}[1]{\left\{\!\left\{ #1 \right\}\!\right\}}\)
Loading...
Searching...
No Matches
process_grid.cc
Go to the documentation of this file.
1// ------------------------------------------------------------------------
2//
3// SPDX-License-Identifier: LGPL-2.1-or-later
4// Copyright (C) 2017 - 2023 by the deal.II authors
5//
6// This file is part of the deal.II library.
7//
8// Part of the source code is dual licensed under Apache-2.0 WITH
9// LLVM-exception OR LGPL-2.1-or-later. Detailed license information
10// governing the source code and code contributions can be found in
11// LICENSE.md and CONTRIBUTING.md at the top level directory of deal.II.
12//
13// ------------------------------------------------------------------------
14
16
17#ifdef DEAL_II_WITH_SCALAPACK
18
19# include <deal.II/lac/scalapack.templates.h>
20
22
23namespace
24{
35 inline std::pair<int, int>
36 compute_processor_grid_sizes(const MPI_Comm mpi_comm,
37 const unsigned int m,
38 const unsigned int n,
39 const unsigned int block_size_m,
40 const unsigned int block_size_n)
41 {
42 // Few notes from the ScaLAPACK user guide:
43 // It is possible to predict the best grid shape given the number of
44 // processes available: Pr x Pc <= P This, however, depends on the task to
45 // be done. LU , QR and QL factorizations perform better for “flat” process
46 // grids (Pr < Pc ) For large N, Pc = 2*Pr is a good choice, whereas for
47 // small N, one should choose small Pr Square or near square grids are more
48 // optimal for Cholesky factorization. LQ and RQ factorizations take
49 // advantage of “tall” grids (Pr > Pc )
50
51 // Below we always try to create 2d processor grids:
52
53 const int n_processes = Utilities::MPI::n_mpi_processes(mpi_comm);
54
55 // Get the total number of cores we can occupy in a rectangular dense matrix
56 // with rectangular blocks when every core owns only a single block:
57 const int n_processes_heuristic = int(std::ceil((1. * m) / block_size_m)) *
58 int(std::ceil((1. * n) / block_size_n));
59 const int Np = std::min(n_processes_heuristic, n_processes);
60
61 // Now we need to split Np into Pr x Pc. Assume we know the shape/ratio
62 // Pc =: ratio * Pr
63 // therefore
64 // Np = Pc * Pc / ratio
65 // for quadratic matrices the ratio equals 1
66 const double ratio = double(n) / m;
67 int Pc = static_cast<int>(std::sqrt(ratio * Np));
68
69 // one could rounds up Pc to the number which has zero remainder from the
70 // division of Np while ( Np % Pc != 0 )
71 // ++Pc;
72 // but this affects the grid shape dramatically, i.e. 10 cores 3x3 becomes
73 // 2x5.
74
75 // limit our estimate to be in [2, Np]
76 int n_process_columns = std::min(Np, std::max(2, Pc));
77 // finally, get the rows:
78 int n_process_rows = Np / n_process_columns;
79
80 Assert(n_process_columns >= 1 && n_process_rows >= 1 &&
81 n_processes >= n_process_rows * n_process_columns,
83 "error in process grid: " + std::to_string(n_process_rows) + "x" +
84 std::to_string(n_process_columns) + "=" +
85 std::to_string(n_process_rows * n_process_columns) + " out of " +
86 std::to_string(n_processes)));
87
88 return std::make_pair(n_process_rows, n_process_columns);
89
90 // For example,
91 // 320x320 with 32x32 blocks and 16 cores:
92 // Pc = 1.0 * Pr => 4x4 grid
93 // Pc = 0.5 * Pr => 8x2 grid
94 // Pc = 2.0 * Pr => 3x5 grid
95 }
96} // namespace
97
98namespace Utilities
99{
100 namespace MPI
101 {
103 const MPI_Comm mpi_comm,
104 const std::pair<unsigned int, unsigned int> &grid_dimensions)
105 : mpi_communicator(mpi_comm)
106 , this_mpi_process(Utilities::MPI::this_mpi_process(mpi_communicator))
107 , n_mpi_processes(Utilities::MPI::n_mpi_processes(mpi_communicator))
108 , n_process_rows(grid_dimensions.first)
109 , n_process_columns(grid_dimensions.second)
110 {
111 Assert(grid_dimensions.first > 0,
112 ExcMessage("Number of process grid rows has to be positive."));
113 Assert(grid_dimensions.second > 0,
114 ExcMessage("Number of process grid columns has to be positive."));
115
116 Assert(
117 grid_dimensions.first * grid_dimensions.second <= n_mpi_processes,
119 "Size of process grid is larger than number of available MPI processes."));
120
121 // processor grid order.
122 const bool column_major = false;
123
124 // Initialize Cblas context from the provided communicator
125 blacs_context = Csys2blacs_handle(mpi_communicator);
126 const char *order = (column_major ? "Col" : "Row");
127 // Note that blacs_context can be modified below. Thus Cblacs2sys_handle
128 // may not return the same MPI communicator.
129 Cblacs_gridinit(&blacs_context, order, n_process_rows, n_process_columns);
130
131 // Blacs may modify the grid size on processes which are not used
132 // in the grid. So provide copies below:
133 int procrows_ = n_process_rows;
134 int proccols_ = n_process_columns;
135 Cblacs_gridinfo(blacs_context,
136 &procrows_,
137 &proccols_,
140
141 // If this MPI core is not on the grid, flag it as inactive and
142 // skip all jobs
143 // Note that a different condition is used in FORTRAN code here
144 // https://stackoverflow.com/questions/18516915/calling-blacs-with-more-processes-than-used
146 mpi_process_is_active = false;
147 else
149
150 // Create an auxiliary communicator which has root and all inactive cores.
151 // Assume that inactive cores start with
152 // id=n_process_rows*n_process_columns
153 const unsigned int n_active_mpi_processes =
156 this_mpi_process >= n_active_mpi_processes,
158
159 std::vector<int> inactive_with_root_ranks;
160 inactive_with_root_ranks.push_back(0);
161 for (unsigned int i = n_active_mpi_processes; i < n_mpi_processes; ++i)
162 inactive_with_root_ranks.push_back(i);
163
164 // Get the group of processes in mpi_communicator
165 int ierr = 0;
166 MPI_Group all_group;
167 ierr = MPI_Comm_group(mpi_communicator, &all_group);
168 AssertThrowMPI(ierr);
169
170 // Construct the group containing all ranks we need:
171 MPI_Group inactive_with_root_group;
172 const int n = inactive_with_root_ranks.size();
173 ierr = MPI_Group_incl(all_group,
174 n,
175 inactive_with_root_ranks.data(),
176 &inactive_with_root_group);
177 AssertThrowMPI(ierr);
178
179 // Create the communicator based on inactive_with_root_group.
180 // Note that on all the active MPI processes (except for the one with
181 // rank 0) the resulting MPI_Comm mpi_communicator_inactive_with_root
182 // will be MPI_COMM_NULL.
183 const int mpi_tag =
185
186 ierr = MPI_Comm_create_group(mpi_communicator,
187 inactive_with_root_group,
188 mpi_tag,
190 AssertThrowMPI(ierr);
191
192 ierr = MPI_Group_free(&all_group);
193 AssertThrowMPI(ierr);
194 ierr = MPI_Group_free(&inactive_with_root_group);
195 AssertThrowMPI(ierr);
196
197 // Double check that the process with rank 0 in subgroup is active:
198# ifdef DEBUG
199 if (mpi_communicator_inactive_with_root != MPI_COMM_NULL &&
203# endif
204 }
205
206
207
209 const unsigned int n_rows_matrix,
210 const unsigned int n_columns_matrix,
211 const unsigned int row_block_size,
212 const unsigned int column_block_size)
213 : ProcessGrid(mpi_comm,
214 compute_processor_grid_sizes(mpi_comm,
215 n_rows_matrix,
216 n_columns_matrix,
217 row_block_size,
218 column_block_size))
219 {}
220
221
222
224 const unsigned int n_rows,
225 const unsigned int n_columns)
226 : ProcessGrid(mpi_comm, std::make_pair(n_rows, n_columns))
227 {}
228
229
230
239
240
241
242 template <typename NumberType>
243 void
244 ProcessGrid::send_to_inactive(NumberType *value, const int count) const
245 {
246 Assert(count > 0, ExcInternalError());
247 if (mpi_communicator_inactive_with_root != MPI_COMM_NULL)
248 {
249 const int ierr =
250 MPI_Bcast(value,
251 count,
252 Utilities::MPI::mpi_type_id_for_type<decltype(*value)>,
253 0 /*from root*/,
255 AssertThrowMPI(ierr);
256 }
257 }
258
259 } // namespace MPI
260} // namespace Utilities
261
262// instantiations
263
264template void
266 const int) const;
267template void
269template void
271
273
274#endif // DEAL_II_WITH_SCALAPACK
ProcessGrid(const MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
MPI_Comm mpi_communicator_inactive_with_root
void send_to_inactive(NumberType *value, const int count=1) const
const unsigned int n_mpi_processes
const unsigned int this_mpi_process
#define DEAL_II_NAMESPACE_OPEN
Definition config.h:503
#define DEAL_II_NAMESPACE_CLOSE
Definition config.h:504
Point< 2 > second
Definition grid_out.cc:4624
Point< 2 > first
Definition grid_out.cc:4623
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
@ process_grid_constructor
ProcessGrid::ProcessGrid.
Definition mpi_tags.h:123
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
Definition mpi.cc:92
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
Definition mpi.cc:107
const MPI_Datatype mpi_type_id_for_type
Definition mpi.h:1674
void free_communicator(MPI_Comm mpi_communicator)
Definition mpi.cc:154
STL namespace.
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)