17#ifdef DEAL_II_WITH_SCALAPACK
19# include <deal.II/lac/scalapack.templates.h>
35 inline std::pair<int, int>
36 compute_processor_grid_sizes(
const MPI_Comm mpi_comm,
39 const unsigned int block_size_m,
40 const unsigned int block_size_n)
57 const int n_processes_heuristic =
int(std::ceil((1. * m) / block_size_m)) *
58 int(std::ceil((1. * n) / block_size_n));
59 const int Np =
std::min(n_processes_heuristic, n_processes);
66 const double ratio = double(n) / m;
67 int Pc =
static_cast<int>(
std::sqrt(ratio * Np));
78 int n_process_rows = Np / n_process_columns;
80 Assert(n_process_columns >= 1 && n_process_rows >= 1 &&
81 n_processes >= n_process_rows * n_process_columns,
83 "error in process grid: " + std::to_string(n_process_rows) +
"x" +
84 std::to_string(n_process_columns) +
"=" +
85 std::to_string(n_process_rows * n_process_columns) +
" out of " +
86 std::to_string(n_processes)));
88 return std::make_pair(n_process_rows, n_process_columns);
104 const std::pair<unsigned int, unsigned int> &grid_dimensions)
105 : mpi_communicator(mpi_comm)
108 , n_process_rows(grid_dimensions.
first)
109 , n_process_columns(grid_dimensions.
second)
111 Assert(grid_dimensions.first > 0,
112 ExcMessage(
"Number of process grid rows has to be positive."));
113 Assert(grid_dimensions.second > 0,
114 ExcMessage(
"Number of process grid columns has to be positive."));
119 "Size of process grid is larger than number of available MPI processes."));
122 const bool column_major =
false;
126 const char *order = (column_major ?
"Col" :
"Row");
153 const unsigned int n_active_mpi_processes =
159 std::vector<int> inactive_with_root_ranks;
160 inactive_with_root_ranks.push_back(0);
161 for (
unsigned int i = n_active_mpi_processes; i <
n_mpi_processes; ++i)
162 inactive_with_root_ranks.push_back(i);
171 MPI_Group inactive_with_root_group;
172 const int n = inactive_with_root_ranks.size();
173 ierr = MPI_Group_incl(all_group,
175 inactive_with_root_ranks.data(),
176 &inactive_with_root_group);
187 inactive_with_root_group,
192 ierr = MPI_Group_free(&all_group);
194 ierr = MPI_Group_free(&inactive_with_root_group);
209 const unsigned int n_rows_matrix,
210 const unsigned int n_columns_matrix,
211 const unsigned int row_block_size,
212 const unsigned int column_block_size)
214 compute_processor_grid_sizes(mpi_comm,
224 const unsigned int n_rows,
225 const unsigned int n_columns)
242 template <
typename NumberType>
MPI_Comm mpi_communicator
ProcessGrid(const MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
bool mpi_process_is_active
MPI_Comm mpi_communicator_inactive_with_root
void send_to_inactive(NumberType *value, const int count=1) const
const unsigned int n_mpi_processes
const unsigned int this_mpi_process
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)