16 #include <deal.II/base/process_grid.h> 18 #ifdef DEAL_II_WITH_SCALAPACK 20 #include <deal.II/lac/scalapack.templates.h> 22 DEAL_II_NAMESPACE_OPEN
37 std::pair<int,int> compute_processor_grid_sizes(MPI_Comm mpi_comm,
38 const unsigned int m,
const unsigned int n,
39 const unsigned int block_size_m,
const unsigned int block_size_n)
53 MPI_Comm_size(mpi_comm, &n_processes);
57 const int n_processes_heuristic = int(std::ceil((1.*m)/block_size_m))*
58 int(std::ceil((1.*n)/block_size_n));
59 const int Np = std::min(n_processes_heuristic, n_processes);
66 const double ratio = double(n)/m;
67 int Pc = std::floor(std::sqrt(ratio * Np));
75 int n_process_columns = std::min (Np, std::max(2, Pc));
77 int n_process_rows = Np / n_process_columns ;
79 Assert (n_process_columns >=1 && n_process_rows >=1 && n_processes >= n_process_rows*n_process_columns,
81 std::to_string(n_process_rows)+
"x"+
82 std::to_string(n_process_columns)+
84 std::to_string(n_process_rows*n_process_columns)+
86 std::to_string(n_processes)));
88 return std::make_pair(n_process_rows,n_process_columns);
104 const std::pair<unsigned int,unsigned int> &grid_dimensions)
106 mpi_communicator(mpi_comm),
109 n_process_rows(grid_dimensions.first),
110 n_process_columns(grid_dimensions.second)
112 Assert (grid_dimensions.first > 0,
113 ExcMessage(
"Number of process grid rows has to be positive."));
114 Assert (grid_dimensions.second > 0,
115 ExcMessage(
"Number of process grid columns has to be positive."));
118 ExcMessage(
"Size of process grid is larger than number of available MPI processes."));
121 const bool column_major =
false;
125 const char *order = ( column_major ?
"Col" :
"Row" );
152 std::vector<int> inactive_with_root_ranks;
153 inactive_with_root_ranks.push_back(0);
154 for (
unsigned int i = n_active_mpi_processes; i <
n_mpi_processes; ++i)
155 inactive_with_root_ranks.push_back(i);
164 MPI_Group inactive_with_root_group;
165 const int n = inactive_with_root_ranks.size();
166 ierr = MPI_Group_incl(all_group,
167 n, inactive_with_root_ranks.data(),
168 &inactive_with_root_group);
178 ierr = MPI_Group_free(&all_group);
180 ierr = MPI_Group_free(&inactive_with_root_group);
194 const unsigned int n_rows_matrix,
195 const unsigned int n_columns_matrix,
196 const unsigned int row_block_size,
197 const unsigned int column_block_size)
200 compute_processor_grid_sizes(mpi_comm, n_rows_matrix, n_columns_matrix,
201 row_block_size, column_block_size) )
207 const unsigned int n_rows,
208 const unsigned int n_columns)
211 std::make_pair(n_rows,n_columns))
228 template <
typename NumberType>
235 MPI_Bcast(value,count,
236 Utilities::MPI::internal::mpi_type_id (value),
248 template void Utilities::MPI::ProcessGrid::send_to_inactive<double>(
double *,
const int)
const;
249 template void Utilities::MPI::ProcessGrid::send_to_inactive<float>(
float *,
const int)
const;
250 template void Utilities::MPI::ProcessGrid::send_to_inactive<int>(
int *,
const int)
const;
252 DEAL_II_NAMESPACE_CLOSE
254 #endif // DEAL_II_WITH_SCALAPACK const unsigned int this_mpi_process
MPI_Comm mpi_communicator_inactive_with_root
static ::ExceptionBase & ExcMessage(std::string arg1)
void send_to_inactive(NumberType *value, const int count=1) const
#define Assert(cond, exc)
bool mpi_process_is_active
MPI_Comm mpi_communicator
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
#define AssertThrowMPI(error_code)
const unsigned int n_mpi_processes
ProcessGrid(MPI_Comm mpi_communicator, const unsigned int n_rows, const unsigned int n_columns)
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
static ::ExceptionBase & ExcInternalError()