18#ifdef DEAL_II_WITH_SCALAPACK
20# include <deal.II/lac/scalapack.templates.h>
36 inline std::pair<int, int>
37 compute_processor_grid_sizes(
const MPI_Comm mpi_comm,
40 const unsigned int block_size_m,
41 const unsigned int block_size_n)
58 const int n_processes_heuristic =
int(std::ceil((1. * m) / block_size_m)) *
59 int(std::ceil((1. * n) / block_size_n));
60 const int Np =
std::min(n_processes_heuristic, n_processes);
67 const double ratio = double(n) / m;
68 int Pc =
static_cast<int>(
std::sqrt(ratio * Np));
79 int n_process_rows = Np / n_process_columns;
81 Assert(n_process_columns >= 1 && n_process_rows >= 1 &&
82 n_processes >= n_process_rows * n_process_columns,
84 "error in process grid: " + std::to_string(n_process_rows) +
"x" +
85 std::to_string(n_process_columns) +
"=" +
86 std::to_string(n_process_rows * n_process_columns) +
" out of " +
87 std::to_string(n_processes)));
89 return std::make_pair(n_process_rows, n_process_columns);
105 const std::pair<unsigned int, unsigned int> &grid_dimensions)
106 : mpi_communicator(mpi_comm)
109 , n_process_rows(grid_dimensions.
first)
110 , n_process_columns(grid_dimensions.
second)
112 Assert(grid_dimensions.first > 0,
113 ExcMessage(
"Number of process grid rows has to be positive."));
114 Assert(grid_dimensions.second > 0,
115 ExcMessage(
"Number of process grid columns has to be positive."));
120 "Size of process grid is larger than number of available MPI processes."));
123 const bool column_major =
false;
127 const char *order = (column_major ?
"Col" :
"Row");
154 const unsigned int n_active_mpi_processes =
160 std::vector<int> inactive_with_root_ranks;
161 inactive_with_root_ranks.push_back(0);
162 for (
unsigned int i = n_active_mpi_processes; i <
n_mpi_processes; ++i)
163 inactive_with_root_ranks.push_back(i);
172 MPI_Group inactive_with_root_group;
173 const int n = inactive_with_root_ranks.size();
174 ierr = MPI_Group_incl(all_group,
176 inactive_with_root_ranks.data(),
177 &inactive_with_root_group);
188 inactive_with_root_group,
193 ierr = MPI_Group_free(&all_group);
195 ierr = MPI_Group_free(&inactive_with_root_group);
210 const unsigned int n_rows_matrix,
211 const unsigned int n_columns_matrix,
212 const unsigned int row_block_size,
213 const unsigned int column_block_size)
215 compute_processor_grid_sizes(mpi_comm,
225 const unsigned int n_rows,
226 const unsigned int n_columns)
243 template <
typename NumberType>
266Utilities::MPI::ProcessGrid::send_to_inactive<double>(
double *,
269Utilities::MPI::ProcessGrid::send_to_inactive<float>(
float *,
const int)
const;
271Utilities::MPI::ProcessGrid::send_to_inactive<int>(
int *,
const int)
const;
MPI_Comm mpi_communicator
ProcessGrid(const MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
bool mpi_process_is_active
MPI_Comm mpi_communicator_inactive_with_root
void send_to_inactive(NumberType *value, const int count=1) const
const unsigned int n_mpi_processes
const unsigned int this_mpi_process
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_NAMESPACE_CLOSE
#define Assert(cond, exc)
#define AssertThrowMPI(error_code)
static ::ExceptionBase & ExcInternalError()
static ::ExceptionBase & ExcMessage(std::string arg1)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
void free_communicator(MPI_Comm mpi_communicator)
::VectorizedArray< Number, width > min(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
::VectorizedArray< Number, width > sqrt(const ::VectorizedArray< Number, width > &)