16 #include <deal.II/base/process_grid.h> 18 #ifdef DEAL_II_WITH_SCALAPACK 20 # include <deal.II/lac/scalapack.templates.h> 22 DEAL_II_NAMESPACE_OPEN
36 inline std::pair<int, int>
37 compute_processor_grid_sizes(MPI_Comm mpi_comm,
40 const unsigned int block_size_m,
41 const unsigned int block_size_n)
58 const int n_processes_heuristic = int(std::ceil((1. * m) / block_size_m)) *
59 int(std::ceil((1. * n) / block_size_n));
60 const int Np = std::min(n_processes_heuristic, n_processes);
67 const double ratio = double(n) / m;
68 int Pc =
static_cast<int>(std::sqrt(ratio * Np));
77 int n_process_columns = std::min(Np, std::max(2, Pc));
79 int n_process_rows = Np / n_process_columns;
81 Assert(n_process_columns >= 1 && n_process_rows >= 1 &&
82 n_processes >= n_process_rows * n_process_columns,
84 "error in process grid: " + std::to_string(n_process_rows) +
"x" +
85 std::to_string(n_process_columns) +
"=" +
86 std::to_string(n_process_rows * n_process_columns) +
" out of " +
87 std::to_string(n_processes)));
89 return std::make_pair(n_process_rows, n_process_columns);
105 const std::pair<unsigned int, unsigned int> &grid_dimensions)
106 : mpi_communicator(mpi_comm)
109 , n_process_rows(grid_dimensions.first)
110 , n_process_columns(grid_dimensions.second)
112 Assert(grid_dimensions.first > 0,
113 ExcMessage(
"Number of process grid rows has to be positive."));
114 Assert(grid_dimensions.second > 0,
115 ExcMessage(
"Number of process grid columns has to be positive."));
120 "Size of process grid is larger than number of available MPI processes."));
123 const bool column_major =
false;
127 const char *order = (column_major ?
"Col" :
"Row");
154 const unsigned int n_active_mpi_processes =
160 std::vector<int> inactive_with_root_ranks;
161 inactive_with_root_ranks.push_back(0);
162 for (
unsigned int i = n_active_mpi_processes; i <
n_mpi_processes; ++i)
163 inactive_with_root_ranks.push_back(i);
172 MPI_Group inactive_with_root_group;
173 const int n = inactive_with_root_ranks.size();
174 ierr = MPI_Group_incl(all_group,
176 inactive_with_root_ranks.data(),
177 &inactive_with_root_group);
185 inactive_with_root_group,
190 ierr = MPI_Group_free(&all_group);
192 ierr = MPI_Group_free(&inactive_with_root_group);
207 const unsigned int n_rows_matrix,
208 const unsigned int n_columns_matrix,
209 const unsigned int row_block_size,
210 const unsigned int column_block_size)
212 compute_processor_grid_sizes(mpi_comm,
222 const unsigned int n_rows,
223 const unsigned int n_columns)
240 template <
typename NumberType>
250 Utilities::MPI::internal::mpi_type_id(value),
263 Utilities::MPI::ProcessGrid::send_to_inactive<double>(
double *,
266 Utilities::MPI::ProcessGrid::send_to_inactive<float>(
float *,
const int)
const;
268 Utilities::MPI::ProcessGrid::send_to_inactive<int>(
int *,
const int)
const;
270 DEAL_II_NAMESPACE_CLOSE
272 #endif // DEAL_II_WITH_SCALAPACK const unsigned int this_mpi_process
MPI_Comm mpi_communicator_inactive_with_root
static ::ExceptionBase & ExcMessage(std::string arg1)
void send_to_inactive(NumberType *value, const int count=1) const
#define Assert(cond, exc)
bool mpi_process_is_active
int create_group(const MPI_Comm &comm, const MPI_Group &group, const int tag, MPI_Comm *new_comm)
MPI_Comm mpi_communicator
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
#define AssertThrowMPI(error_code)
const unsigned int n_mpi_processes
ProcessGrid(MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()